repo
stringlengths 5
58
| path
stringlengths 9
168
| func_name
stringlengths 9
130
| original_string
stringlengths 66
10.5k
| language
stringclasses 1
value | code
stringlengths 66
10.5k
| code_tokens
sequence | docstring
stringlengths 8
16k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 94
266
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
greyblake/telebot | lib/telebot/client.rb | Telebot.Client.send_message | def send_message(chat_id:, text:, disable_web_page_preview: false, reply_to_message_id: nil, reply_markup: nil, parse_mode: nil)
result = call(:sendMessage,
chat_id: chat_id,
text: text,
disable_web_page_preview: disable_web_page_preview,
reply_to_message_id: reply_to_message_id,
reply_markup: reply_markup,
parse_mode: parse_mode
)
Message.new(result)
end | ruby | def send_message(chat_id:, text:, disable_web_page_preview: false, reply_to_message_id: nil, reply_markup: nil, parse_mode: nil)
result = call(:sendMessage,
chat_id: chat_id,
text: text,
disable_web_page_preview: disable_web_page_preview,
reply_to_message_id: reply_to_message_id,
reply_markup: reply_markup,
parse_mode: parse_mode
)
Message.new(result)
end | [
"def",
"send_message",
"(",
"chat_id",
":",
",",
"text",
":",
",",
"disable_web_page_preview",
":",
"false",
",",
"reply_to_message_id",
":",
"nil",
",",
"reply_markup",
":",
"nil",
",",
"parse_mode",
":",
"nil",
")",
"result",
"=",
"call",
"(",
":sendMessage",
",",
"chat_id",
":",
"chat_id",
",",
"text",
":",
"text",
",",
"disable_web_page_preview",
":",
"disable_web_page_preview",
",",
"reply_to_message_id",
":",
"reply_to_message_id",
",",
"reply_markup",
":",
"reply_markup",
",",
"parse_mode",
":",
"parse_mode",
")",
"Message",
".",
"new",
"(",
"result",
")",
"end"
] | Send text message.
@param chat_id [Integer] Unique identifier for the message recipient - User or GroupChat id
@param text [String] Text of the message to be sent
@param disable_web_page_preview [Boolean] Disables link previews for links in this message
@param reply_to_message_id [Integer] If the message is a reply, ID of the original message
@param reply_markup [ReplyKeyboardMarkup, ReplyKeyboardHide, ForceReply] Additional interface options
@param parse_mode [String] "Markdown" or "HTML", Optional
@return [Telebot::Message] | [
"Send",
"text",
"message",
"."
] | 16c3f73ce47c2dc2480c23b3ef2cc8ee1f9cae4a | https://github.com/greyblake/telebot/blob/16c3f73ce47c2dc2480c23b3ef2cc8ee1f9cae4a/lib/telebot/client.rb#L65-L75 | train |
greyblake/telebot | lib/telebot/client.rb | Telebot.Client.forward_message | def forward_message(chat_id:, from_chat_id:, message_id:)
result = call(:forwardMessage, chat_id: chat_id, from_chat_id: from_chat_id, message_id: message_id)
Message.new(result)
end | ruby | def forward_message(chat_id:, from_chat_id:, message_id:)
result = call(:forwardMessage, chat_id: chat_id, from_chat_id: from_chat_id, message_id: message_id)
Message.new(result)
end | [
"def",
"forward_message",
"(",
"chat_id",
":",
",",
"from_chat_id",
":",
",",
"message_id",
":",
")",
"result",
"=",
"call",
"(",
":forwardMessage",
",",
"chat_id",
":",
"chat_id",
",",
"from_chat_id",
":",
"from_chat_id",
",",
"message_id",
":",
"message_id",
")",
"Message",
".",
"new",
"(",
"result",
")",
"end"
] | Use this method to forward messages of any kind.
@param chat_id [Integer] Unique identifier for the message recipient - User or GroupChat id
@param from_chat_id [Integer] Unique identifier for the chat where the original message was sent - User or GroupChat id
@param message_id [Integer] Unique message identifier
@return [Telebot::Message] | [
"Use",
"this",
"method",
"to",
"forward",
"messages",
"of",
"any",
"kind",
"."
] | 16c3f73ce47c2dc2480c23b3ef2cc8ee1f9cae4a | https://github.com/greyblake/telebot/blob/16c3f73ce47c2dc2480c23b3ef2cc8ee1f9cae4a/lib/telebot/client.rb#L84-L87 | train |
greyblake/telebot | lib/telebot/client.rb | Telebot.Client.send_photo | def send_photo(chat_id:, photo:, caption: nil, reply_to_message_id: nil, reply_markup: nil)
result = call(:sendPhoto, chat_id: chat_id, photo: photo, caption: caption, reply_to_message_id: reply_to_message_id, reply_markup: reply_markup)
Message.new(result)
end | ruby | def send_photo(chat_id:, photo:, caption: nil, reply_to_message_id: nil, reply_markup: nil)
result = call(:sendPhoto, chat_id: chat_id, photo: photo, caption: caption, reply_to_message_id: reply_to_message_id, reply_markup: reply_markup)
Message.new(result)
end | [
"def",
"send_photo",
"(",
"chat_id",
":",
",",
"photo",
":",
",",
"caption",
":",
"nil",
",",
"reply_to_message_id",
":",
"nil",
",",
"reply_markup",
":",
"nil",
")",
"result",
"=",
"call",
"(",
":sendPhoto",
",",
"chat_id",
":",
"chat_id",
",",
"photo",
":",
"photo",
",",
"caption",
":",
"caption",
",",
"reply_to_message_id",
":",
"reply_to_message_id",
",",
"reply_markup",
":",
"reply_markup",
")",
"Message",
".",
"new",
"(",
"result",
")",
"end"
] | Send a picture.
@param chat_id [Integer] Unique identifier for the message recipient - User or GroupChat id
@param photo [InputFile, String] Photo to send. You can either pass a
file_id as String to resend a photo that is already on the Telegram servers,
or upload a new photo using multipart/form-data.
@param caption [String] Photo caption (may also be used when resending photos by file_id)
@param reply_to_message_id [Integer] If the message is a reply, ID of the original message
@param reply_markup [ReplyKeyboardMarkup, ReplyKeyboardHide, ForceReply] Additional interface options
@return [Telebot::Message] | [
"Send",
"a",
"picture",
"."
] | 16c3f73ce47c2dc2480c23b3ef2cc8ee1f9cae4a | https://github.com/greyblake/telebot/blob/16c3f73ce47c2dc2480c23b3ef2cc8ee1f9cae4a/lib/telebot/client.rb#L100-L103 | train |
greyblake/telebot | lib/telebot/client.rb | Telebot.Client.send_document | def send_document(chat_id:, document:, reply_to_message_id: nil, reply_markup: nil)
result = call(:sendDocument, chat_id: chat_id, document: document, reply_to_message_id: reply_to_message_id, reply_markup: reply_markup)
Message.new(result)
end | ruby | def send_document(chat_id:, document:, reply_to_message_id: nil, reply_markup: nil)
result = call(:sendDocument, chat_id: chat_id, document: document, reply_to_message_id: reply_to_message_id, reply_markup: reply_markup)
Message.new(result)
end | [
"def",
"send_document",
"(",
"chat_id",
":",
",",
"document",
":",
",",
"reply_to_message_id",
":",
"nil",
",",
"reply_markup",
":",
"nil",
")",
"result",
"=",
"call",
"(",
":sendDocument",
",",
"chat_id",
":",
"chat_id",
",",
"document",
":",
"document",
",",
"reply_to_message_id",
":",
"reply_to_message_id",
",",
"reply_markup",
":",
"reply_markup",
")",
"Message",
".",
"new",
"(",
"result",
")",
"end"
] | Send general file.
@param chat_id [Integer]
@param document [Telebot::InputFile, String] document to send (file or file_id)
@param reply_to_message_id [Integer] If the message is a reply, ID of the original message
@param reply_markup [ReplyKeyboardMarkup, ReplyKeyboardHide, ForceReply] Additional interface options
@return [Telebot::Message] | [
"Send",
"general",
"file",
"."
] | 16c3f73ce47c2dc2480c23b3ef2cc8ee1f9cae4a | https://github.com/greyblake/telebot/blob/16c3f73ce47c2dc2480c23b3ef2cc8ee1f9cae4a/lib/telebot/client.rb#L128-L131 | train |
greyblake/telebot | lib/telebot/client.rb | Telebot.Client.send_sticker | def send_sticker(chat_id:, sticker:, reply_to_message_id: nil, reply_markup: nil)
result = call(:sendSticker, chat_id: chat_id, sticker: sticker, reply_to_message_id: reply_to_message_id, reply_markup: reply_markup)
Message.new(result)
end | ruby | def send_sticker(chat_id:, sticker:, reply_to_message_id: nil, reply_markup: nil)
result = call(:sendSticker, chat_id: chat_id, sticker: sticker, reply_to_message_id: reply_to_message_id, reply_markup: reply_markup)
Message.new(result)
end | [
"def",
"send_sticker",
"(",
"chat_id",
":",
",",
"sticker",
":",
",",
"reply_to_message_id",
":",
"nil",
",",
"reply_markup",
":",
"nil",
")",
"result",
"=",
"call",
"(",
":sendSticker",
",",
"chat_id",
":",
"chat_id",
",",
"sticker",
":",
"sticker",
",",
"reply_to_message_id",
":",
"reply_to_message_id",
",",
"reply_markup",
":",
"reply_markup",
")",
"Message",
".",
"new",
"(",
"result",
")",
"end"
] | Use this method to send .webp stickers.
@param chat_id [Integer]
@param sticker [Telebot::InputFile, String] sticker to send (file or file_id)
@param reply_to_message_id [Integer] If the message is a reply, ID of the original message
@param reply_markup [ReplyKeyboardMarkup, ReplyKeyboardHide, ForceReply] Additional interface options
@return [Telebot::Message] | [
"Use",
"this",
"method",
"to",
"send",
".",
"webp",
"stickers",
"."
] | 16c3f73ce47c2dc2480c23b3ef2cc8ee1f9cae4a | https://github.com/greyblake/telebot/blob/16c3f73ce47c2dc2480c23b3ef2cc8ee1f9cae4a/lib/telebot/client.rb#L141-L144 | train |
greyblake/telebot | lib/telebot/client.rb | Telebot.Client.send_location | def send_location(chat_id:, latitude:, longitude:, reply_to_message_id: nil, reply_markup: nil)
result = call(:sendLocation, chat_id: chat_id,
latitude: latitude,
longitude: longitude,
reply_to_message_id: reply_to_message_id,
reply_markup: reply_markup)
Message.new(result)
end | ruby | def send_location(chat_id:, latitude:, longitude:, reply_to_message_id: nil, reply_markup: nil)
result = call(:sendLocation, chat_id: chat_id,
latitude: latitude,
longitude: longitude,
reply_to_message_id: reply_to_message_id,
reply_markup: reply_markup)
Message.new(result)
end | [
"def",
"send_location",
"(",
"chat_id",
":",
",",
"latitude",
":",
",",
"longitude",
":",
",",
"reply_to_message_id",
":",
"nil",
",",
"reply_markup",
":",
"nil",
")",
"result",
"=",
"call",
"(",
":sendLocation",
",",
"chat_id",
":",
"chat_id",
",",
"latitude",
":",
"latitude",
",",
"longitude",
":",
"longitude",
",",
"reply_to_message_id",
":",
"reply_to_message_id",
",",
"reply_markup",
":",
"reply_markup",
")",
"Message",
".",
"new",
"(",
"result",
")",
"end"
] | Send a point on the map.
@param chat_id [Integer]
@param latitude [Integer]
@param longitude [Integer]
@param reply_to_message_id [Integer]
@param reply_markup [ReplyKeyboardMarkup, ReplyKeyboardHide, ForceReply]
@return [Telebot::Message] | [
"Send",
"a",
"point",
"on",
"the",
"map",
"."
] | 16c3f73ce47c2dc2480c23b3ef2cc8ee1f9cae4a | https://github.com/greyblake/telebot/blob/16c3f73ce47c2dc2480c23b3ef2cc8ee1f9cae4a/lib/telebot/client.rb#L168-L175 | train |
greyblake/telebot | lib/telebot/client.rb | Telebot.Client.get_user_profile_photos | def get_user_profile_photos(user_id:, offset: nil, limit: nil)
result = call(:getUserProfilePhotos, user_id: user_id, offset: offset, limit: limit)
UserProfilePhotos.new(result)
end | ruby | def get_user_profile_photos(user_id:, offset: nil, limit: nil)
result = call(:getUserProfilePhotos, user_id: user_id, offset: offset, limit: limit)
UserProfilePhotos.new(result)
end | [
"def",
"get_user_profile_photos",
"(",
"user_id",
":",
",",
"offset",
":",
"nil",
",",
"limit",
":",
"nil",
")",
"result",
"=",
"call",
"(",
":getUserProfilePhotos",
",",
"user_id",
":",
"user_id",
",",
"offset",
":",
"offset",
",",
"limit",
":",
"limit",
")",
"UserProfilePhotos",
".",
"new",
"(",
"result",
")",
"end"
] | Use this method to get a list of profile pictures for a user.
@param user_id [Integer]
@param offset [Integer]
@param limit [Integer]
@return [Telebot::UserProfilePhotos] | [
"Use",
"this",
"method",
"to",
"get",
"a",
"list",
"of",
"profile",
"pictures",
"for",
"a",
"user",
"."
] | 16c3f73ce47c2dc2480c23b3ef2cc8ee1f9cae4a | https://github.com/greyblake/telebot/blob/16c3f73ce47c2dc2480c23b3ef2cc8ee1f9cae4a/lib/telebot/client.rb#L197-L200 | train |
Cimpress-MCP/LambdaWrap | lib/lambda_wrap/api_manager.rb | LambdaWrap.API.add_lambda | def add_lambda(*new_lambda)
flattened_lambdas = new_lambda.flatten
flattened_lambdas.each { |lambda| parameter_guard(lambda, LambdaWrap::Lambda, 'LambdaWrap::Lambda') }
lambdas.concat(flattened_lambdas)
end | ruby | def add_lambda(*new_lambda)
flattened_lambdas = new_lambda.flatten
flattened_lambdas.each { |lambda| parameter_guard(lambda, LambdaWrap::Lambda, 'LambdaWrap::Lambda') }
lambdas.concat(flattened_lambdas)
end | [
"def",
"add_lambda",
"(",
"*",
"new_lambda",
")",
"flattened_lambdas",
"=",
"new_lambda",
".",
"flatten",
"flattened_lambdas",
".",
"each",
"{",
"|",
"lambda",
"|",
"parameter_guard",
"(",
"lambda",
",",
"LambdaWrap",
"::",
"Lambda",
",",
"'LambdaWrap::Lambda'",
")",
"}",
"lambdas",
".",
"concat",
"(",
"flattened_lambdas",
")",
"end"
] | Constructor for the high level API Manager class.
@param [Hash] options The Options to configure the API.
@option options [String] :access_key_id The AWS Access Key Id to communicate with AWS. Will also check the
environment variables for this value.
@option options [String] :secret_access_key The AWS Secret Access Key to communicate with AWS. Also checks
environment variables for this value.
@option options [String] :region The AWS Region to deploy API to. Also checks environment variables for this
value.
@todo Allow clients to pass in a YAML file for all construction.
Add Lambda Object(s) to the API.
@param [LambdaWrap::Lambda Array<LambdaWrap::Lambda>] new_lambda Splat of LambdaWrap Lambda
objects to add to the API. Overloaded as:
add_lambda(lambda1) OR add_lambda([lambda1, lambda2]) OR add_lambda(lambda1, lambda2) | [
"Constructor",
"for",
"the",
"high",
"level",
"API",
"Manager",
"class",
"."
] | 9c34b4ea49c86aaf9b5c998b12a61ceb065cedd8 | https://github.com/Cimpress-MCP/LambdaWrap/blob/9c34b4ea49c86aaf9b5c998b12a61ceb065cedd8/lib/lambda_wrap/api_manager.rb#L66-L70 | train |
Cimpress-MCP/LambdaWrap | lib/lambda_wrap/api_manager.rb | LambdaWrap.API.deploy | def deploy(environment_options)
environment_parameter_guard(environment_options)
if no_op?
puts 'Nothing to deploy.'
return
end
deployment_start_message = 'Deploying '
deployment_start_message += "#{dynamo_tables.length} Dynamo Tables, " unless dynamo_tables.empty?
deployment_start_message += "#{lambdas.length} Lambdas, " unless lambdas.empty?
deployment_start_message += "#{api_gateways.length} API Gateways " unless api_gateways.empty?
deployment_start_message += "to Environment: #{environment_options.name}"
puts deployment_start_message
total_time_start = Time.now
services_time_start = total_time_start
dynamo_tables.each { |table| table.deploy(environment_options, @dynamo_client, @region) }
services_time_end = Time.now
unless dynamo_tables.empty?
puts "Deploying #{dynamo_tables.length} Table(s) took: \
#{Time.at(services_time_end - services_time_start).utc.strftime('%H:%M:%S')}"
end
services_time_start = Time.now
lambdas.each { |lambda| lambda.deploy(environment_options, @lambda_client, @region) }
services_time_end = Time.now
unless lambdas.empty?
puts "Deploying #{lambdas.length} Lambda(s) took: \
#{Time.at(services_time_end - services_time_start).utc.strftime('%H:%M:%S')}"
end
services_time_start = Time.now
api_gateways.each { |apig| apig.deploy(environment_options, @api_gateway_client, @region) }
services_time_end = Time.now
unless api_gateways.empty?
puts "Deploying #{api_gateways.length} API Gateway(s) took: \
#{Time.at(services_time_end - services_time_start).utc.strftime('%H:%M:%S')}"
end
total_time_end = Time.now
puts "Total API Deployment took: \
#{Time.at(total_time_end - total_time_start).utc.strftime('%H:%M:%S')}"
puts "Successfully deployed API to #{environment_options.name}"
true
end | ruby | def deploy(environment_options)
environment_parameter_guard(environment_options)
if no_op?
puts 'Nothing to deploy.'
return
end
deployment_start_message = 'Deploying '
deployment_start_message += "#{dynamo_tables.length} Dynamo Tables, " unless dynamo_tables.empty?
deployment_start_message += "#{lambdas.length} Lambdas, " unless lambdas.empty?
deployment_start_message += "#{api_gateways.length} API Gateways " unless api_gateways.empty?
deployment_start_message += "to Environment: #{environment_options.name}"
puts deployment_start_message
total_time_start = Time.now
services_time_start = total_time_start
dynamo_tables.each { |table| table.deploy(environment_options, @dynamo_client, @region) }
services_time_end = Time.now
unless dynamo_tables.empty?
puts "Deploying #{dynamo_tables.length} Table(s) took: \
#{Time.at(services_time_end - services_time_start).utc.strftime('%H:%M:%S')}"
end
services_time_start = Time.now
lambdas.each { |lambda| lambda.deploy(environment_options, @lambda_client, @region) }
services_time_end = Time.now
unless lambdas.empty?
puts "Deploying #{lambdas.length} Lambda(s) took: \
#{Time.at(services_time_end - services_time_start).utc.strftime('%H:%M:%S')}"
end
services_time_start = Time.now
api_gateways.each { |apig| apig.deploy(environment_options, @api_gateway_client, @region) }
services_time_end = Time.now
unless api_gateways.empty?
puts "Deploying #{api_gateways.length} API Gateway(s) took: \
#{Time.at(services_time_end - services_time_start).utc.strftime('%H:%M:%S')}"
end
total_time_end = Time.now
puts "Total API Deployment took: \
#{Time.at(total_time_end - total_time_start).utc.strftime('%H:%M:%S')}"
puts "Successfully deployed API to #{environment_options.name}"
true
end | [
"def",
"deploy",
"(",
"environment_options",
")",
"environment_parameter_guard",
"(",
"environment_options",
")",
"if",
"no_op?",
"puts",
"'Nothing to deploy.'",
"return",
"end",
"deployment_start_message",
"=",
"'Deploying '",
"deployment_start_message",
"+=",
"\"#{dynamo_tables.length} Dynamo Tables, \"",
"unless",
"dynamo_tables",
".",
"empty?",
"deployment_start_message",
"+=",
"\"#{lambdas.length} Lambdas, \"",
"unless",
"lambdas",
".",
"empty?",
"deployment_start_message",
"+=",
"\"#{api_gateways.length} API Gateways \"",
"unless",
"api_gateways",
".",
"empty?",
"deployment_start_message",
"+=",
"\"to Environment: #{environment_options.name}\"",
"puts",
"deployment_start_message",
"total_time_start",
"=",
"Time",
".",
"now",
"services_time_start",
"=",
"total_time_start",
"dynamo_tables",
".",
"each",
"{",
"|",
"table",
"|",
"table",
".",
"deploy",
"(",
"environment_options",
",",
"@dynamo_client",
",",
"@region",
")",
"}",
"services_time_end",
"=",
"Time",
".",
"now",
"unless",
"dynamo_tables",
".",
"empty?",
"puts",
"\"Deploying #{dynamo_tables.length} Table(s) took: \\ #{Time.at(services_time_end - services_time_start).utc.strftime('%H:%M:%S')}\"",
"end",
"services_time_start",
"=",
"Time",
".",
"now",
"lambdas",
".",
"each",
"{",
"|",
"lambda",
"|",
"lambda",
".",
"deploy",
"(",
"environment_options",
",",
"@lambda_client",
",",
"@region",
")",
"}",
"services_time_end",
"=",
"Time",
".",
"now",
"unless",
"lambdas",
".",
"empty?",
"puts",
"\"Deploying #{lambdas.length} Lambda(s) took: \\ #{Time.at(services_time_end - services_time_start).utc.strftime('%H:%M:%S')}\"",
"end",
"services_time_start",
"=",
"Time",
".",
"now",
"api_gateways",
".",
"each",
"{",
"|",
"apig",
"|",
"apig",
".",
"deploy",
"(",
"environment_options",
",",
"@api_gateway_client",
",",
"@region",
")",
"}",
"services_time_end",
"=",
"Time",
".",
"now",
"unless",
"api_gateways",
".",
"empty?",
"puts",
"\"Deploying #{api_gateways.length} API Gateway(s) took: \\ #{Time.at(services_time_end - services_time_start).utc.strftime('%H:%M:%S')}\"",
"end",
"total_time_end",
"=",
"Time",
".",
"now",
"puts",
"\"Total API Deployment took: \\ #{Time.at(total_time_end - total_time_start).utc.strftime('%H:%M:%S')}\"",
"puts",
"\"Successfully deployed API to #{environment_options.name}\"",
"true",
"end"
] | Deploys all services to the specified environment.
@param [LambdaWrap::Environment] environment_options the Environment to deploy | [
"Deploys",
"all",
"services",
"to",
"the",
"specified",
"environment",
"."
] | 9c34b4ea49c86aaf9b5c998b12a61ceb065cedd8 | https://github.com/Cimpress-MCP/LambdaWrap/blob/9c34b4ea49c86aaf9b5c998b12a61ceb065cedd8/lib/lambda_wrap/api_manager.rb#L97-L147 | train |
Cimpress-MCP/LambdaWrap | lib/lambda_wrap/api_manager.rb | LambdaWrap.API.delete | def delete
if dynamo_tables.empty? && lambdas.empty? && api_gateways.empty?
puts 'Nothing to Deleting.'
return
end
deployment_start_message = 'Deleting '
deployment_start_message += "#{dynamo_tables.length} Dynamo Tables, " unless dynamo_tables.empty?
deployment_start_message += "#{lambdas.length} Lambdas, " unless lambdas.empty?
deployment_start_message += "#{api_gateways.length} API Gateways " unless api_gateways.empty?
puts deployment_start_message
total_time_start = Time.now
services_time_start = total_time_start
dynamo_tables.each { |table| table.delete(@dynamo_client, @region) }
services_time_end = Time.now
unless dynamo_tables.empty?
puts "Deleting #{dynamo_tables.length} Table(s) took: \
#{Time.at(services_time_end - services_time_start).utc.strftime('%H:%M:%S')}"
end
services_time_start = Time.now
lambdas.each { |lambda| lambda.delete(@lambda_client, @region) }
services_time_end = Time.now
unless lambdas.empty?
puts "Deleting #{lambdas.length} Lambda(s) took: \
#{Time.at(services_time_end - services_time_start).utc.strftime('%H:%M:%S')}"
end
services_time_start = Time.now
api_gateways.each { |apig| apig.delete(@api_gateway_client, @region) }
services_time_end = Time.now
unless api_gateways.empty?
puts "Deleting #{api_gateways.length} API Gateway(s) took: \
#{Time.at(services_time_end - services_time_start).utc.strftime('%H:%M:%S')}"
end
total_time_end = Time.now
puts "Total API Deletion took: \
#{Time.at(total_time_end - total_time_start).utc.strftime('%H:%M:%S')}"
puts 'Successful Deletion of API'
true
end | ruby | def delete
if dynamo_tables.empty? && lambdas.empty? && api_gateways.empty?
puts 'Nothing to Deleting.'
return
end
deployment_start_message = 'Deleting '
deployment_start_message += "#{dynamo_tables.length} Dynamo Tables, " unless dynamo_tables.empty?
deployment_start_message += "#{lambdas.length} Lambdas, " unless lambdas.empty?
deployment_start_message += "#{api_gateways.length} API Gateways " unless api_gateways.empty?
puts deployment_start_message
total_time_start = Time.now
services_time_start = total_time_start
dynamo_tables.each { |table| table.delete(@dynamo_client, @region) }
services_time_end = Time.now
unless dynamo_tables.empty?
puts "Deleting #{dynamo_tables.length} Table(s) took: \
#{Time.at(services_time_end - services_time_start).utc.strftime('%H:%M:%S')}"
end
services_time_start = Time.now
lambdas.each { |lambda| lambda.delete(@lambda_client, @region) }
services_time_end = Time.now
unless lambdas.empty?
puts "Deleting #{lambdas.length} Lambda(s) took: \
#{Time.at(services_time_end - services_time_start).utc.strftime('%H:%M:%S')}"
end
services_time_start = Time.now
api_gateways.each { |apig| apig.delete(@api_gateway_client, @region) }
services_time_end = Time.now
unless api_gateways.empty?
puts "Deleting #{api_gateways.length} API Gateway(s) took: \
#{Time.at(services_time_end - services_time_start).utc.strftime('%H:%M:%S')}"
end
total_time_end = Time.now
puts "Total API Deletion took: \
#{Time.at(total_time_end - total_time_start).utc.strftime('%H:%M:%S')}"
puts 'Successful Deletion of API'
true
end | [
"def",
"delete",
"if",
"dynamo_tables",
".",
"empty?",
"&&",
"lambdas",
".",
"empty?",
"&&",
"api_gateways",
".",
"empty?",
"puts",
"'Nothing to Deleting.'",
"return",
"end",
"deployment_start_message",
"=",
"'Deleting '",
"deployment_start_message",
"+=",
"\"#{dynamo_tables.length} Dynamo Tables, \"",
"unless",
"dynamo_tables",
".",
"empty?",
"deployment_start_message",
"+=",
"\"#{lambdas.length} Lambdas, \"",
"unless",
"lambdas",
".",
"empty?",
"deployment_start_message",
"+=",
"\"#{api_gateways.length} API Gateways \"",
"unless",
"api_gateways",
".",
"empty?",
"puts",
"deployment_start_message",
"total_time_start",
"=",
"Time",
".",
"now",
"services_time_start",
"=",
"total_time_start",
"dynamo_tables",
".",
"each",
"{",
"|",
"table",
"|",
"table",
".",
"delete",
"(",
"@dynamo_client",
",",
"@region",
")",
"}",
"services_time_end",
"=",
"Time",
".",
"now",
"unless",
"dynamo_tables",
".",
"empty?",
"puts",
"\"Deleting #{dynamo_tables.length} Table(s) took: \\ #{Time.at(services_time_end - services_time_start).utc.strftime('%H:%M:%S')}\"",
"end",
"services_time_start",
"=",
"Time",
".",
"now",
"lambdas",
".",
"each",
"{",
"|",
"lambda",
"|",
"lambda",
".",
"delete",
"(",
"@lambda_client",
",",
"@region",
")",
"}",
"services_time_end",
"=",
"Time",
".",
"now",
"unless",
"lambdas",
".",
"empty?",
"puts",
"\"Deleting #{lambdas.length} Lambda(s) took: \\ #{Time.at(services_time_end - services_time_start).utc.strftime('%H:%M:%S')}\"",
"end",
"services_time_start",
"=",
"Time",
".",
"now",
"api_gateways",
".",
"each",
"{",
"|",
"apig",
"|",
"apig",
".",
"delete",
"(",
"@api_gateway_client",
",",
"@region",
")",
"}",
"services_time_end",
"=",
"Time",
".",
"now",
"unless",
"api_gateways",
".",
"empty?",
"puts",
"\"Deleting #{api_gateways.length} API Gateway(s) took: \\ #{Time.at(services_time_end - services_time_start).utc.strftime('%H:%M:%S')}\"",
"end",
"total_time_end",
"=",
"Time",
".",
"now",
"puts",
"\"Total API Deletion took: \\ #{Time.at(total_time_end - total_time_start).utc.strftime('%H:%M:%S')}\"",
"puts",
"'Successful Deletion of API'",
"true",
"end"
] | Deletes all services from the cloud. | [
"Deletes",
"all",
"services",
"from",
"the",
"cloud",
"."
] | 9c34b4ea49c86aaf9b5c998b12a61ceb065cedd8 | https://github.com/Cimpress-MCP/LambdaWrap/blob/9c34b4ea49c86aaf9b5c998b12a61ceb065cedd8/lib/lambda_wrap/api_manager.rb#L205-L253 | train |
Cimpress-MCP/LambdaWrap | lib/lambda_wrap/lambda_manager.rb | LambdaWrap.Lambda.deploy | def deploy(environment_options, client, region = 'AWS_REGION')
super
puts "Deploying Lambda: #{@lambda_name} to Environment: #{environment_options.name}"
unless File.exist?(@path_to_zip_file)
raise ArgumentError, "Deployment Package Zip File does not exist: #{@path_to_zip_file}!"
end
lambda_details = retrieve_lambda_details
if lambda_details.nil?
function_version = create_lambda
else
update_lambda_config
function_version = update_lambda_code
end
create_alias(function_version, environment_options.name, environment_options.description)
cleanup_unused_versions if @delete_unreferenced_versions
puts "Lambda: #{@lambda_name} successfully deployed!"
true
end | ruby | def deploy(environment_options, client, region = 'AWS_REGION')
super
puts "Deploying Lambda: #{@lambda_name} to Environment: #{environment_options.name}"
unless File.exist?(@path_to_zip_file)
raise ArgumentError, "Deployment Package Zip File does not exist: #{@path_to_zip_file}!"
end
lambda_details = retrieve_lambda_details
if lambda_details.nil?
function_version = create_lambda
else
update_lambda_config
function_version = update_lambda_code
end
create_alias(function_version, environment_options.name, environment_options.description)
cleanup_unused_versions if @delete_unreferenced_versions
puts "Lambda: #{@lambda_name} successfully deployed!"
true
end | [
"def",
"deploy",
"(",
"environment_options",
",",
"client",
",",
"region",
"=",
"'AWS_REGION'",
")",
"super",
"puts",
"\"Deploying Lambda: #{@lambda_name} to Environment: #{environment_options.name}\"",
"unless",
"File",
".",
"exist?",
"(",
"@path_to_zip_file",
")",
"raise",
"ArgumentError",
",",
"\"Deployment Package Zip File does not exist: #{@path_to_zip_file}!\"",
"end",
"lambda_details",
"=",
"retrieve_lambda_details",
"if",
"lambda_details",
".",
"nil?",
"function_version",
"=",
"create_lambda",
"else",
"update_lambda_config",
"function_version",
"=",
"update_lambda_code",
"end",
"create_alias",
"(",
"function_version",
",",
"environment_options",
".",
"name",
",",
"environment_options",
".",
"description",
")",
"cleanup_unused_versions",
"if",
"@delete_unreferenced_versions",
"puts",
"\"Lambda: #{@lambda_name} successfully deployed!\"",
"true",
"end"
] | Initializes a Lambda Manager. Frontloaded configuration.
@param [Hash] options The Configuration for the Lambda
@option options [String] :lambda_name The name you want to assign to the function you are uploading. The function
names appear in the console and are returned in the ListFunctions API. Function names are used to specify
functions to other AWS Lambda API operations, such as Invoke. Note that the length constraint applies only to
the ARN. If you specify only the function name, it is limited to 64 characters in length.
@option options [String] :handler The function within your code that Lambda calls to begin execution.
@option options [String] :role_arn The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it
executes your function to access any other Amazon Web Services (AWS) resources.
@option options [String] :path_to_zip_file The absolute path to the Deployment Package zip file
@option options [String] :runtime The runtime environment for the Lambda function you are uploading.
@option options [String] :description ('Deployed with LambdaWrap') A short, user-defined function description.
Lambda does not use this value. Assign a meaningful description as you see fit.
@option options [Integer] :timeout (30) The function execution time at which Lambda should terminate the function.
@option options [Integer] :memory_size (128) The amount of memory, in MB, your Lambda function is given. Lambda
uses this memory size to infer the amount of CPU and memory allocated to your function. The value must be a
multiple of 64MB. Minimum: 128, Maximum: 3008.
@option options [Array<String>] :subnet_ids ([]) If your Lambda function accesses resources in a VPC, you provide
this parameter identifying the list of subnet IDs. These must belong to the same VPC. You must provide at least
one security group and one subnet ID to configure VPC access.
@option options [Array<String>] :security_group_ids ([]) If your Lambda function accesses resources in a VPC, you
provide this parameter identifying the list of security group IDs. These must belong to the same VPC. You must
provide at least one security group and one subnet ID.
@option options [Boolean] :delete_unreferenced_versions (true) Option to delete any Lambda Function Versions upon
deployment that do not have an alias pointing to them.
@option options [String] :dead_letter_queue_arn ('') The ARN of the SQS Queue for failed async invocations.
Deploys the Lambda to the specified Environment. Creates a Lambda Function if one didn't exist.
Updates the Lambda's configuration, Updates the Lambda's Code, publishes a new version, and creates
an alias that points to the newly published version. If the @delete_unreferenced_versions option
is enabled, all Lambda Function versions that don't have an alias pointing to them will be deleted.
@param environment_options [LambdaWrap::Environment] The target Environment to deploy
@param client [Aws::Lambda::Client] Client to use with SDK. Should be passed in by the API class.
@param region [String] AWS Region string. Should be passed in by the API class. | [
"Initializes",
"a",
"Lambda",
"Manager",
".",
"Frontloaded",
"configuration",
"."
] | 9c34b4ea49c86aaf9b5c998b12a61ceb065cedd8 | https://github.com/Cimpress-MCP/LambdaWrap/blob/9c34b4ea49c86aaf9b5c998b12a61ceb065cedd8/lib/lambda_wrap/lambda_manager.rb#L104-L128 | train |
Cimpress-MCP/LambdaWrap | lib/lambda_wrap/lambda_manager.rb | LambdaWrap.Lambda.delete | def delete(client, region = 'AWS_REGION')
super
puts "Deleting all versions and aliases for Lambda: #{@lambda_name}"
lambda_details = retrieve_lambda_details
if lambda_details.nil?
puts 'No Lambda to delete.'
else
options = { function_name: @lambda_name }
@client.delete_function(options)
puts "Lambda #{@lambda_name} and all Versions & Aliases have been deleted."
end
true
end | ruby | def delete(client, region = 'AWS_REGION')
super
puts "Deleting all versions and aliases for Lambda: #{@lambda_name}"
lambda_details = retrieve_lambda_details
if lambda_details.nil?
puts 'No Lambda to delete.'
else
options = { function_name: @lambda_name }
@client.delete_function(options)
puts "Lambda #{@lambda_name} and all Versions & Aliases have been deleted."
end
true
end | [
"def",
"delete",
"(",
"client",
",",
"region",
"=",
"'AWS_REGION'",
")",
"super",
"puts",
"\"Deleting all versions and aliases for Lambda: #{@lambda_name}\"",
"lambda_details",
"=",
"retrieve_lambda_details",
"if",
"lambda_details",
".",
"nil?",
"puts",
"'No Lambda to delete.'",
"else",
"options",
"=",
"{",
"function_name",
":",
"@lambda_name",
"}",
"@client",
".",
"delete_function",
"(",
"options",
")",
"puts",
"\"Lambda #{@lambda_name} and all Versions & Aliases have been deleted.\"",
"end",
"true",
"end"
] | Deletes the Lambda Object with associated versions, code, configuration, and aliases.
@param client [Aws::Lambda::Client] Client to use with SDK. Should be passed in by the API class.
@param region [String] AWS Region string. Should be passed in by the API class. | [
"Deletes",
"the",
"Lambda",
"Object",
"with",
"associated",
"versions",
"code",
"configuration",
"and",
"aliases",
"."
] | 9c34b4ea49c86aaf9b5c998b12a61ceb065cedd8 | https://github.com/Cimpress-MCP/LambdaWrap/blob/9c34b4ea49c86aaf9b5c998b12a61ceb065cedd8/lib/lambda_wrap/lambda_manager.rb#L147-L159 | train |
Cimpress-MCP/LambdaWrap | lib/lambda_wrap/api_gateway_manager.rb | LambdaWrap.ApiGateway.teardown | def teardown(environment_options, client, region = 'AWS_REGION')
super
api_id = get_id_for_api(@api_name)
if api_id
delete_stage(api_id, environment_options.name)
else
puts "API Gateway Object #{@api_name} not found. No environment to tear down."
end
true
end | ruby | def teardown(environment_options, client, region = 'AWS_REGION')
super
api_id = get_id_for_api(@api_name)
if api_id
delete_stage(api_id, environment_options.name)
else
puts "API Gateway Object #{@api_name} not found. No environment to tear down."
end
true
end | [
"def",
"teardown",
"(",
"environment_options",
",",
"client",
",",
"region",
"=",
"'AWS_REGION'",
")",
"super",
"api_id",
"=",
"get_id_for_api",
"(",
"@api_name",
")",
"if",
"api_id",
"delete_stage",
"(",
"api_id",
",",
"environment_options",
".",
"name",
")",
"else",
"puts",
"\"API Gateway Object #{@api_name} not found. No environment to tear down.\"",
"end",
"true",
"end"
] | Tearsdown environment for API Gateway. Deletes stage.
@param environment_options [LambdaWrap::Environment] The environment to teardown.
@param client [Aws::APIGateway::Client] Client to use with SDK. Should be passed in by the API class.
@param region [String] AWS Region string. Should be passed in by the API class. | [
"Tearsdown",
"environment",
"for",
"API",
"Gateway",
".",
"Deletes",
"stage",
"."
] | 9c34b4ea49c86aaf9b5c998b12a61ceb065cedd8 | https://github.com/Cimpress-MCP/LambdaWrap/blob/9c34b4ea49c86aaf9b5c998b12a61ceb065cedd8/lib/lambda_wrap/api_gateway_manager.rb#L77-L86 | train |
Cimpress-MCP/LambdaWrap | lib/lambda_wrap/api_gateway_manager.rb | LambdaWrap.ApiGateway.delete | def delete(client, region = 'AWS_REGION')
super
api_id = get_id_for_api(@api_name)
if api_id
options = {
rest_api_id: api_id
}
@client.delete_rest_api(options)
puts "Deleted API: #{@api_name} ID:#{api_id}"
else
puts "API Gateway Object #{@api_name} not found. Nothing to delete."
end
true
end | ruby | def delete(client, region = 'AWS_REGION')
super
api_id = get_id_for_api(@api_name)
if api_id
options = {
rest_api_id: api_id
}
@client.delete_rest_api(options)
puts "Deleted API: #{@api_name} ID:#{api_id}"
else
puts "API Gateway Object #{@api_name} not found. Nothing to delete."
end
true
end | [
"def",
"delete",
"(",
"client",
",",
"region",
"=",
"'AWS_REGION'",
")",
"super",
"api_id",
"=",
"get_id_for_api",
"(",
"@api_name",
")",
"if",
"api_id",
"options",
"=",
"{",
"rest_api_id",
":",
"api_id",
"}",
"@client",
".",
"delete_rest_api",
"(",
"options",
")",
"puts",
"\"Deleted API: #{@api_name} ID:#{api_id}\"",
"else",
"puts",
"\"API Gateway Object #{@api_name} not found. Nothing to delete.\"",
"end",
"true",
"end"
] | Deletes all stages and API Gateway object.
@param client [Aws::APIGateway::Client] Client to use with SDK. Should be passed in by the API class.
@param region [String] AWS Region string. Should be passed in by the API class. | [
"Deletes",
"all",
"stages",
"and",
"API",
"Gateway",
"object",
"."
] | 9c34b4ea49c86aaf9b5c998b12a61ceb065cedd8 | https://github.com/Cimpress-MCP/LambdaWrap/blob/9c34b4ea49c86aaf9b5c998b12a61ceb065cedd8/lib/lambda_wrap/api_gateway_manager.rb#L91-L104 | train |
Cimpress-MCP/LambdaWrap | lib/lambda_wrap/dynamo_db_manager.rb | LambdaWrap.DynamoTable.deploy | def deploy(environment_options, client, region = 'AWS_REGION')
super
puts "Deploying Table: #{@table_name} to Environment: #{environment_options.name}"
full_table_name = @table_name + (@append_environment_on_deploy ? "-#{environment_options.name}" : '')
table_details = retrieve_table_details(full_table_name)
if table_details.nil?
create_table(full_table_name)
else
wait_until_table_is_available(full_table_name) if table_details[:table_status] != 'ACTIVE'
update_table(full_table_name, table_details)
end
puts "Dynamo Table #{full_table_name} is now available."
full_table_name
end | ruby | def deploy(environment_options, client, region = 'AWS_REGION')
super
puts "Deploying Table: #{@table_name} to Environment: #{environment_options.name}"
full_table_name = @table_name + (@append_environment_on_deploy ? "-#{environment_options.name}" : '')
table_details = retrieve_table_details(full_table_name)
if table_details.nil?
create_table(full_table_name)
else
wait_until_table_is_available(full_table_name) if table_details[:table_status] != 'ACTIVE'
update_table(full_table_name, table_details)
end
puts "Dynamo Table #{full_table_name} is now available."
full_table_name
end | [
"def",
"deploy",
"(",
"environment_options",
",",
"client",
",",
"region",
"=",
"'AWS_REGION'",
")",
"super",
"puts",
"\"Deploying Table: #{@table_name} to Environment: #{environment_options.name}\"",
"full_table_name",
"=",
"@table_name",
"+",
"(",
"@append_environment_on_deploy",
"?",
"\"-#{environment_options.name}\"",
":",
"''",
")",
"table_details",
"=",
"retrieve_table_details",
"(",
"full_table_name",
")",
"if",
"table_details",
".",
"nil?",
"create_table",
"(",
"full_table_name",
")",
"else",
"wait_until_table_is_available",
"(",
"full_table_name",
")",
"if",
"table_details",
"[",
":table_status",
"]",
"!=",
"'ACTIVE'",
"update_table",
"(",
"full_table_name",
",",
"table_details",
")",
"end",
"puts",
"\"Dynamo Table #{full_table_name} is now available.\"",
"full_table_name",
"end"
] | Sets up the DynamoTable for the Dynamo DB Manager. Preloading the configuration in the constructor.
@param [Hash] options The configuration for the DynamoDB Table.
@option options [String] :table_name The name of the DynamoDB Table. A "Base Name" can be used here where the
environment name can be appended upon deployment.
@option options [Array<Hash>] :attribute_definitions ([{ attribute_name: 'Id', attribute_type: 'S' }]) An array of
attributes that describe the key schema for the table and indexes. The Hash must have symbols: :attribute_name &
:attribute_type. Please see AWS Documentation for the {http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html
Data Model}.
@option options [Array<Hash>] :key_schema ([{ attribute_name: 'Id', key_type: 'HASH' }]) Specifies the attributes
that make up the primary key for a table or an index. The attributes in key_schema must also be defined in the
AttributeDefinitions array. Please see AWS Documentation for the {http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html
Data Model}.
Each element in the array must be composed of:
* <tt>:attribute_name</tt> - The name of this key attribute.
* <tt>:key_type</tt> - The role that the key attribute will assume:
* <tt>HASH</tt> - partition key
* <tt>RANGE</tt> - sort key
The partition key of an item is also known as its hash attribute. The term "hash attribute" derives from
DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their
partition key values.
The sort key of an item is also known as its range attribute. The term "range attribute" derives from the way
DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key
value.
For a simple primary key (partition key), you must provide exactly one element with a <tt>KeyType</tt> of
<tt>HASH</tt>.
For a composite primary key (partition key and sort key), you must provide exactly two elements, in this order:
The first element must have a <tt>KeyType</tt> of <tt>HASH</tt>, and the second element must have a
<tt>KeyType</tt> of <tt>RANGE</tt>.
For more information, see {http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key
Specifying the Primary Key} in the <em>Amazon DynamoDB Developer Guide</em>.
@option options [Integer] :read_capacity_units (1) The maximum number of strongly consistent reads consumed per
second before DynamoDB returns a <tt>ThrottlingException</tt>. Must be at least 1. For current minimum and
maximum provisioned throughput values, see {http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html
Limits} in the <em>Amazon DynamoDB Developer Guide</em>.
@option options [Integer] :write_capacity_units (1) The maximum number of writes consumed per second before
DynamoDB returns a <tt>ThrottlingException</tt>. Must be at least 1. For current minimum and maximum
provisioned throughput values, see {http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html
Limits} in the <em>Amazon DynamoDB Developer Guide</em>.
@option options [Array<Hash>] :local_secondary_indexes ([]) One or more local secondary indexes (the maximum is
five) to be created on the table. Each index is scoped to a given partition key value. There is a 10 GB size
limit per partition key value; otherwise, the size of a local secondary index is unconstrained.
Each element in the array must be a Hash with these symbols:
* <tt>:index_name</tt> - The name of the local secondary index. Must be unique only for this table.
* <tt>:key_schema</tt> - Specifies the key schema for the local secondary index. The key schema must begin with
the same partition key as the table.
* <tt>:projection</tt> - Specifies attributes that are copied (projected) from the table into the index. These
are in addition to the primary key attributes and index key attributes, which are automatically projected. Each
attribute specification is composed of:
* <tt>:projection_type</tt> - One of the following:
* <tt>KEYS_ONLY</tt> - Only the index and primary keys are projected into the index.
* <tt>INCLUDE</tt> - Only the specified table attributes are projected into the index. The list of projected
attributes are in <tt>non_key_attributes</tt>.
* <tt>ALL</tt> - All of the table attributes are projected into the index.
* <tt>:non_key_attributes</tt> - A list of one or more non-key attribute names that are projected into the
secondary index. The total count of attributes provided in NonKeyAttributes, summed across all of the
secondary indexes, must not exceed 20. If you project the same attribute into two different indexes, this
counts as two distinct attributes when determining the total.
@option options [Array<Hash>] :global_secondary_indexes ([]) One or more global secondary indexes (the maximum is
five) to be created on the table. Each global secondary index (Hash) in the array includes the following:
* <tt>:index_name</tt> - The name of the global secondary index. Must be unique only for this table.
* <tt>:key_schema</tt> - Specifies the key schema for the global secondary index.
* <tt>:projection</tt> - Specifies attributes that are copied (projected) from the table into the index. These
are in addition to the primary key attributes and index key attributes, which are automatically projected. Each
attribute specification is composed of:
* <tt>:projection_type</tt> - One of the following:
* <tt>KEYS_ONLY</tt> - Only the index and primary keys are projected into the index.
* <tt>INCLUDE</tt> - Only the specified table attributes are projected into the index. The list of projected
attributes are in <tt>NonKeyAttributes</tt>.
* <tt>ALL</tt> - All of the table attributes are projected into the index.
* <tt>non_key_attributes</tt> - A list of one or more non-key attribute names that are projected into the
secondary index. The total count of attributes provided in NonKeyAttributes, summed across all of the
secondary indexes, must not exceed 20. If you project the same attribute into two different indexes, this
counts as two distinct attributes when determining the total.
* <tt>:provisioned_throughput</tt> - The provisioned throughput settings for the global secondary index,
consisting of read and write capacity units.
@option options [Boolean] :append_environment_on_deploy (false) Option to append the name of the environment to
the table name upon deployment and teardown. DynamoDB Tables cannot shard data in a similar manner as how Lambda
aliases and API Gateway Environments work. This option is supposed to help the user with naming tables instead
of managing the environment names on their own.
Deploys the DynamoDB Table to the target environment. If the @append_environment_on_deploy option is set, the
table_name will be appended with a hyphen and the environment name. This will attempt to Create or Update with
the parameters specified from the constructor. This may take a LONG time for it will wait for any new indexes to
be available.
@param environment_options [LambdaWrap::Environment] Target environment to deploy.
@param client [Aws::DynamoDB::Client] Client to use with SDK. Should be passed in by the API class.
@param region [String] AWS Region string. Should be passed in by the API class. | [
"Sets",
"up",
"the",
"DynamoTable",
"for",
"the",
"Dynamo",
"DB",
"Manager",
".",
"Preloading",
"the",
"configuration",
"in",
"the",
"constructor",
"."
] | 9c34b4ea49c86aaf9b5c998b12a61ceb065cedd8 | https://github.com/Cimpress-MCP/LambdaWrap/blob/9c34b4ea49c86aaf9b5c998b12a61ceb065cedd8/lib/lambda_wrap/dynamo_db_manager.rb#L158-L176 | train |
Cimpress-MCP/LambdaWrap | lib/lambda_wrap/dynamo_db_manager.rb | LambdaWrap.DynamoTable.wait_until_table_is_available | def wait_until_table_is_available(full_table_name, delay = 5, max_attempts = 5)
puts "Waiting for Table #{full_table_name} to be available."
puts "Waiting with a #{delay} second delay between attempts, for a maximum of #{max_attempts} attempts."
max_time = Time.at(delay * max_attempts).utc.strftime('%H:%M:%S')
puts "Max waiting time will be: #{max_time} (approximate)."
# wait until the table has updated to being fully available
# waiting for ~2min at most; an error will be thrown afterwards
started_waiting_at = Time.now
max_attempts.times do |attempt|
puts "Attempt #{attempt + 1}/#{max_attempts}, \
#{Time.at(Time.now - started_waiting_at).utc.strftime('%H:%M:%S')}/#{max_time}"
details = retrieve_table_details(full_table_name)
if details.table_status != 'ACTIVE'
puts "Table: #{full_table_name} is not yet available. Status: #{details.table_status}. Retrying..."
else
updating_indexes = details.global_secondary_indexes.reject do |global_index|
global_index.index_status == 'ACTIVE'
end
return true if updating_indexes.empty?
puts 'Table is available, but the global indexes are not:'
puts(updating_indexes.map { |global_index| "#{global_index.index_name}, #{global_index.index_status}" })
end
Kernel.sleep(delay.seconds)
end
raise Exception, "Table #{full_table_name} did not become available after #{max_attempts} attempts. " \
'Try again later or inspect the AWS console.'
end | ruby | def wait_until_table_is_available(full_table_name, delay = 5, max_attempts = 5)
puts "Waiting for Table #{full_table_name} to be available."
puts "Waiting with a #{delay} second delay between attempts, for a maximum of #{max_attempts} attempts."
max_time = Time.at(delay * max_attempts).utc.strftime('%H:%M:%S')
puts "Max waiting time will be: #{max_time} (approximate)."
# wait until the table has updated to being fully available
# waiting for ~2min at most; an error will be thrown afterwards
started_waiting_at = Time.now
max_attempts.times do |attempt|
puts "Attempt #{attempt + 1}/#{max_attempts}, \
#{Time.at(Time.now - started_waiting_at).utc.strftime('%H:%M:%S')}/#{max_time}"
details = retrieve_table_details(full_table_name)
if details.table_status != 'ACTIVE'
puts "Table: #{full_table_name} is not yet available. Status: #{details.table_status}. Retrying..."
else
updating_indexes = details.global_secondary_indexes.reject do |global_index|
global_index.index_status == 'ACTIVE'
end
return true if updating_indexes.empty?
puts 'Table is available, but the global indexes are not:'
puts(updating_indexes.map { |global_index| "#{global_index.index_name}, #{global_index.index_status}" })
end
Kernel.sleep(delay.seconds)
end
raise Exception, "Table #{full_table_name} did not become available after #{max_attempts} attempts. " \
'Try again later or inspect the AWS console.'
end | [
"def",
"wait_until_table_is_available",
"(",
"full_table_name",
",",
"delay",
"=",
"5",
",",
"max_attempts",
"=",
"5",
")",
"puts",
"\"Waiting for Table #{full_table_name} to be available.\"",
"puts",
"\"Waiting with a #{delay} second delay between attempts, for a maximum of #{max_attempts} attempts.\"",
"max_time",
"=",
"Time",
".",
"at",
"(",
"delay",
"*",
"max_attempts",
")",
".",
"utc",
".",
"strftime",
"(",
"'%H:%M:%S'",
")",
"puts",
"\"Max waiting time will be: #{max_time} (approximate).\"",
"started_waiting_at",
"=",
"Time",
".",
"now",
"max_attempts",
".",
"times",
"do",
"|",
"attempt",
"|",
"puts",
"\"Attempt #{attempt + 1}/#{max_attempts}, \\ #{Time.at(Time.now - started_waiting_at).utc.strftime('%H:%M:%S')}/#{max_time}\"",
"details",
"=",
"retrieve_table_details",
"(",
"full_table_name",
")",
"if",
"details",
".",
"table_status",
"!=",
"'ACTIVE'",
"puts",
"\"Table: #{full_table_name} is not yet available. Status: #{details.table_status}. Retrying...\"",
"else",
"updating_indexes",
"=",
"details",
".",
"global_secondary_indexes",
".",
"reject",
"do",
"|",
"global_index",
"|",
"global_index",
".",
"index_status",
"==",
"'ACTIVE'",
"end",
"return",
"true",
"if",
"updating_indexes",
".",
"empty?",
"puts",
"'Table is available, but the global indexes are not:'",
"puts",
"(",
"updating_indexes",
".",
"map",
"{",
"|",
"global_index",
"|",
"\"#{global_index.index_name}, #{global_index.index_status}\"",
"}",
")",
"end",
"Kernel",
".",
"sleep",
"(",
"delay",
".",
"seconds",
")",
"end",
"raise",
"Exception",
",",
"\"Table #{full_table_name} did not become available after #{max_attempts} attempts. \"",
"'Try again later or inspect the AWS console.'",
"end"
] | Waits for the table to be available | [
"Waits",
"for",
"the",
"table",
"to",
"be",
"available"
] | 9c34b4ea49c86aaf9b5c998b12a61ceb065cedd8 | https://github.com/Cimpress-MCP/LambdaWrap/blob/9c34b4ea49c86aaf9b5c998b12a61ceb065cedd8/lib/lambda_wrap/dynamo_db_manager.rb#L228-L258 | train |
Cimpress-MCP/LambdaWrap | lib/lambda_wrap/dynamo_db_manager.rb | LambdaWrap.DynamoTable.build_global_index_updates_array | def build_global_index_updates_array(current_global_indexes)
indexes_to_update = []
return indexes_to_update if current_global_indexes.empty?
current_global_indexes.each do |current_index|
@global_secondary_indexes.each do |target_index|
# Find the same named index
next unless target_index[:index_name] == current_index[:index_name]
# Skip unless a different ProvisionedThroughput is specified
break unless (target_index[:provisioned_throughput][:read_capacity_units] !=
current_index.provisioned_throughput.read_capacity_units) ||
(target_index[:provisioned_throughput][:write_capacity_units] !=
current_index.provisioned_throughput.write_capacity_units)
indexes_to_update << { index_name: target_index[:index_name],
provisioned_throughput: target_index[:provisioned_throughput] }
end
end
puts indexes_to_update
indexes_to_update
end | ruby | def build_global_index_updates_array(current_global_indexes)
indexes_to_update = []
return indexes_to_update if current_global_indexes.empty?
current_global_indexes.each do |current_index|
@global_secondary_indexes.each do |target_index|
# Find the same named index
next unless target_index[:index_name] == current_index[:index_name]
# Skip unless a different ProvisionedThroughput is specified
break unless (target_index[:provisioned_throughput][:read_capacity_units] !=
current_index.provisioned_throughput.read_capacity_units) ||
(target_index[:provisioned_throughput][:write_capacity_units] !=
current_index.provisioned_throughput.write_capacity_units)
indexes_to_update << { index_name: target_index[:index_name],
provisioned_throughput: target_index[:provisioned_throughput] }
end
end
puts indexes_to_update
indexes_to_update
end | [
"def",
"build_global_index_updates_array",
"(",
"current_global_indexes",
")",
"indexes_to_update",
"=",
"[",
"]",
"return",
"indexes_to_update",
"if",
"current_global_indexes",
".",
"empty?",
"current_global_indexes",
".",
"each",
"do",
"|",
"current_index",
"|",
"@global_secondary_indexes",
".",
"each",
"do",
"|",
"target_index",
"|",
"next",
"unless",
"target_index",
"[",
":index_name",
"]",
"==",
"current_index",
"[",
":index_name",
"]",
"break",
"unless",
"(",
"target_index",
"[",
":provisioned_throughput",
"]",
"[",
":read_capacity_units",
"]",
"!=",
"current_index",
".",
"provisioned_throughput",
".",
"read_capacity_units",
")",
"||",
"(",
"target_index",
"[",
":provisioned_throughput",
"]",
"[",
":write_capacity_units",
"]",
"!=",
"current_index",
".",
"provisioned_throughput",
".",
"write_capacity_units",
")",
"indexes_to_update",
"<<",
"{",
"index_name",
":",
"target_index",
"[",
":index_name",
"]",
",",
"provisioned_throughput",
":",
"target_index",
"[",
":provisioned_throughput",
"]",
"}",
"end",
"end",
"puts",
"indexes_to_update",
"indexes_to_update",
"end"
] | Looks through the list current of Global Secondary Indexes and builds an array if the Provisioned Throughput
in the intended Indexes are higher than the current Indexes. | [
"Looks",
"through",
"the",
"list",
"current",
"of",
"Global",
"Secondary",
"Indexes",
"and",
"builds",
"an",
"array",
"if",
"the",
"Provisioned",
"Throughput",
"in",
"the",
"intended",
"Indexes",
"are",
"higher",
"than",
"the",
"current",
"Indexes",
"."
] | 9c34b4ea49c86aaf9b5c998b12a61ceb065cedd8 | https://github.com/Cimpress-MCP/LambdaWrap/blob/9c34b4ea49c86aaf9b5c998b12a61ceb065cedd8/lib/lambda_wrap/dynamo_db_manager.rb#L341-L359 | train |
greyblake/telebot | lib/telebot/objects/user_profile_photos.rb | Telebot.UserProfilePhotos.photos= | def photos=(values)
@photos = values.map do |photo|
photo.map do |photo_size_attrs|
PhotoSize.new(photo_size_attrs)
end
end
end | ruby | def photos=(values)
@photos = values.map do |photo|
photo.map do |photo_size_attrs|
PhotoSize.new(photo_size_attrs)
end
end
end | [
"def",
"photos",
"=",
"(",
"values",
")",
"@photos",
"=",
"values",
".",
"map",
"do",
"|",
"photo",
"|",
"photo",
".",
"map",
"do",
"|",
"photo_size_attrs",
"|",
"PhotoSize",
".",
"new",
"(",
"photo_size_attrs",
")",
"end",
"end",
"end"
] | Assign Array of Array of PhotoSize
@param values [Array<Array<PhotoSize>>] | [
"Assign",
"Array",
"of",
"Array",
"of",
"PhotoSize"
] | 16c3f73ce47c2dc2480c23b3ef2cc8ee1f9cae4a | https://github.com/greyblake/telebot/blob/16c3f73ce47c2dc2480c23b3ef2cc8ee1f9cae4a/lib/telebot/objects/user_profile_photos.rb#L15-L21 | train |
square/border_patrol | lib/border_patrol/polygon.rb | BorderPatrol.Polygon.contains_point? | def contains_point?(point)
return false unless inside_bounding_box?(point)
c = false
i = -1
j = size - 1
while (i += 1) < size
if (self[i].y <= point.y && point.y < self[j].y) ||
(self[j].y <= point.y && point.y < self[i].y)
if point.x < (self[j].x - self[i].x) * (point.y - self[i].y) / (self[j].y - self[i].y) + self[i].x
c = !c
end
end
j = i
end
c
end | ruby | def contains_point?(point)
return false unless inside_bounding_box?(point)
c = false
i = -1
j = size - 1
while (i += 1) < size
if (self[i].y <= point.y && point.y < self[j].y) ||
(self[j].y <= point.y && point.y < self[i].y)
if point.x < (self[j].x - self[i].x) * (point.y - self[i].y) / (self[j].y - self[i].y) + self[i].x
c = !c
end
end
j = i
end
c
end | [
"def",
"contains_point?",
"(",
"point",
")",
"return",
"false",
"unless",
"inside_bounding_box?",
"(",
"point",
")",
"c",
"=",
"false",
"i",
"=",
"-",
"1",
"j",
"=",
"size",
"-",
"1",
"while",
"(",
"i",
"+=",
"1",
")",
"<",
"size",
"if",
"(",
"self",
"[",
"i",
"]",
".",
"y",
"<=",
"point",
".",
"y",
"&&",
"point",
".",
"y",
"<",
"self",
"[",
"j",
"]",
".",
"y",
")",
"||",
"(",
"self",
"[",
"j",
"]",
".",
"y",
"<=",
"point",
".",
"y",
"&&",
"point",
".",
"y",
"<",
"self",
"[",
"i",
"]",
".",
"y",
")",
"if",
"point",
".",
"x",
"<",
"(",
"self",
"[",
"j",
"]",
".",
"x",
"-",
"self",
"[",
"i",
"]",
".",
"x",
")",
"*",
"(",
"point",
".",
"y",
"-",
"self",
"[",
"i",
"]",
".",
"y",
")",
"/",
"(",
"self",
"[",
"j",
"]",
".",
"y",
"-",
"self",
"[",
"i",
"]",
".",
"y",
")",
"+",
"self",
"[",
"i",
"]",
".",
"x",
"c",
"=",
"!",
"c",
"end",
"end",
"j",
"=",
"i",
"end",
"c",
"end"
] | Quick and dirty hash function | [
"Quick",
"and",
"dirty",
"hash",
"function"
] | 72c50ea17c89f7fa89fbaed25ad3db3fa1d8eeb1 | https://github.com/square/border_patrol/blob/72c50ea17c89f7fa89fbaed25ad3db3fa1d8eeb1/lib/border_patrol/polygon.rb#L43-L58 | train |
haw-itn/openassets-ruby | lib/openassets/api.rb | OpenAssets.Api.list_unspent | def list_unspent(oa_address_list = [])
btc_address_list = oa_address_list.map { |oa_address| oa_address_to_address(oa_address)}
outputs = get_unspent_outputs(btc_address_list)
result = outputs.map{|out| out.to_hash}
result
end | ruby | def list_unspent(oa_address_list = [])
btc_address_list = oa_address_list.map { |oa_address| oa_address_to_address(oa_address)}
outputs = get_unspent_outputs(btc_address_list)
result = outputs.map{|out| out.to_hash}
result
end | [
"def",
"list_unspent",
"(",
"oa_address_list",
"=",
"[",
"]",
")",
"btc_address_list",
"=",
"oa_address_list",
".",
"map",
"{",
"|",
"oa_address",
"|",
"oa_address_to_address",
"(",
"oa_address",
")",
"}",
"outputs",
"=",
"get_unspent_outputs",
"(",
"btc_address_list",
")",
"result",
"=",
"outputs",
".",
"map",
"{",
"|",
"out",
"|",
"out",
".",
"to_hash",
"}",
"result",
"end"
] | get UTXO for colored coins.
@param [Array] oa_address_list Obtain the balance of this open assets address only, or all addresses if unspecified.
@return [Array] Return array of the unspent information Hash. | [
"get",
"UTXO",
"for",
"colored",
"coins",
"."
] | c2171ccb7e3bf2b8c712e9ef82a3bfaef3d1f4bf | https://github.com/haw-itn/openassets-ruby/blob/c2171ccb7e3bf2b8c712e9ef82a3bfaef3d1f4bf/lib/openassets/api.rb#L48-L53 | train |
haw-itn/openassets-ruby | lib/openassets/api.rb | OpenAssets.Api.get_balance | def get_balance(address = nil)
outputs = get_unspent_outputs(address.nil? ? [] : [oa_address_to_address(address)])
colored_outputs = outputs.map{|o|o.output}
sorted_outputs = colored_outputs.sort_by { |o|o.script.to_string}
groups = sorted_outputs.group_by{|o| o.script.to_string}
result = groups.map{|k, v|
btc_address = script_to_address(v[0].script)
sorted_script_outputs = v.sort_by{|o|o.asset_id unless o.asset_id}
group_assets = sorted_script_outputs.group_by{|o|o.asset_id}.select{|k,v| !k.nil?}
assets = group_assets.map{|asset_id, outputs|
{
'asset_id' => asset_id,
'quantity' => outputs.inject(0) { |sum, o| sum + o.asset_quantity }.to_s,
'amount' => outputs.inject(0) { |sum, o| sum + o.asset_amount }.to_s,
'asset_definition_url' => outputs[0].asset_definition_url,
'proof_of_authenticity' => outputs[0].proof_of_authenticity
}
}
{
'address' => btc_address,
'oa_address' => (btc_address.nil? || btc_address.is_a?(Array)) ? nil : address_to_oa_address(btc_address),
'value' => satoshi_to_coin(v.inject(0) { |sum, o|sum + o.value}),
'assets' => assets,
'account' => v[0].account
}
}
address.nil? ? result : result.select{|r|r['oa_address'] == address}
end | ruby | def get_balance(address = nil)
outputs = get_unspent_outputs(address.nil? ? [] : [oa_address_to_address(address)])
colored_outputs = outputs.map{|o|o.output}
sorted_outputs = colored_outputs.sort_by { |o|o.script.to_string}
groups = sorted_outputs.group_by{|o| o.script.to_string}
result = groups.map{|k, v|
btc_address = script_to_address(v[0].script)
sorted_script_outputs = v.sort_by{|o|o.asset_id unless o.asset_id}
group_assets = sorted_script_outputs.group_by{|o|o.asset_id}.select{|k,v| !k.nil?}
assets = group_assets.map{|asset_id, outputs|
{
'asset_id' => asset_id,
'quantity' => outputs.inject(0) { |sum, o| sum + o.asset_quantity }.to_s,
'amount' => outputs.inject(0) { |sum, o| sum + o.asset_amount }.to_s,
'asset_definition_url' => outputs[0].asset_definition_url,
'proof_of_authenticity' => outputs[0].proof_of_authenticity
}
}
{
'address' => btc_address,
'oa_address' => (btc_address.nil? || btc_address.is_a?(Array)) ? nil : address_to_oa_address(btc_address),
'value' => satoshi_to_coin(v.inject(0) { |sum, o|sum + o.value}),
'assets' => assets,
'account' => v[0].account
}
}
address.nil? ? result : result.select{|r|r['oa_address'] == address}
end | [
"def",
"get_balance",
"(",
"address",
"=",
"nil",
")",
"outputs",
"=",
"get_unspent_outputs",
"(",
"address",
".",
"nil?",
"?",
"[",
"]",
":",
"[",
"oa_address_to_address",
"(",
"address",
")",
"]",
")",
"colored_outputs",
"=",
"outputs",
".",
"map",
"{",
"|",
"o",
"|",
"o",
".",
"output",
"}",
"sorted_outputs",
"=",
"colored_outputs",
".",
"sort_by",
"{",
"|",
"o",
"|",
"o",
".",
"script",
".",
"to_string",
"}",
"groups",
"=",
"sorted_outputs",
".",
"group_by",
"{",
"|",
"o",
"|",
"o",
".",
"script",
".",
"to_string",
"}",
"result",
"=",
"groups",
".",
"map",
"{",
"|",
"k",
",",
"v",
"|",
"btc_address",
"=",
"script_to_address",
"(",
"v",
"[",
"0",
"]",
".",
"script",
")",
"sorted_script_outputs",
"=",
"v",
".",
"sort_by",
"{",
"|",
"o",
"|",
"o",
".",
"asset_id",
"unless",
"o",
".",
"asset_id",
"}",
"group_assets",
"=",
"sorted_script_outputs",
".",
"group_by",
"{",
"|",
"o",
"|",
"o",
".",
"asset_id",
"}",
".",
"select",
"{",
"|",
"k",
",",
"v",
"|",
"!",
"k",
".",
"nil?",
"}",
"assets",
"=",
"group_assets",
".",
"map",
"{",
"|",
"asset_id",
",",
"outputs",
"|",
"{",
"'asset_id'",
"=>",
"asset_id",
",",
"'quantity'",
"=>",
"outputs",
".",
"inject",
"(",
"0",
")",
"{",
"|",
"sum",
",",
"o",
"|",
"sum",
"+",
"o",
".",
"asset_quantity",
"}",
".",
"to_s",
",",
"'amount'",
"=>",
"outputs",
".",
"inject",
"(",
"0",
")",
"{",
"|",
"sum",
",",
"o",
"|",
"sum",
"+",
"o",
".",
"asset_amount",
"}",
".",
"to_s",
",",
"'asset_definition_url'",
"=>",
"outputs",
"[",
"0",
"]",
".",
"asset_definition_url",
",",
"'proof_of_authenticity'",
"=>",
"outputs",
"[",
"0",
"]",
".",
"proof_of_authenticity",
"}",
"}",
"{",
"'address'",
"=>",
"btc_address",
",",
"'oa_address'",
"=>",
"(",
"btc_address",
".",
"nil?",
"||",
"btc_address",
".",
"is_a?",
"(",
"Array",
")",
")",
"?",
"nil",
":",
"address_to_oa_address",
"(",
"btc_address",
")",
",",
"'value'",
"=>",
"satoshi_to_coin",
"(",
"v",
".",
"inject",
"(",
"0",
")",
"{",
"|",
"sum",
",",
"o",
"|",
"sum",
"+",
"o",
".",
"value",
"}",
")",
",",
"'assets'",
"=>",
"assets",
",",
"'account'",
"=>",
"v",
"[",
"0",
"]",
".",
"account",
"}",
"}",
"address",
".",
"nil?",
"?",
"result",
":",
"result",
".",
"select",
"{",
"|",
"r",
"|",
"r",
"[",
"'oa_address'",
"]",
"==",
"address",
"}",
"end"
] | Returns the balance in both bitcoin and colored coin assets for all of the addresses available in your Bitcoin Core wallet.
@param [String] address The open assets address. if unspecified nil. | [
"Returns",
"the",
"balance",
"in",
"both",
"bitcoin",
"and",
"colored",
"coin",
"assets",
"for",
"all",
"of",
"the",
"addresses",
"available",
"in",
"your",
"Bitcoin",
"Core",
"wallet",
"."
] | c2171ccb7e3bf2b8c712e9ef82a3bfaef3d1f4bf | https://github.com/haw-itn/openassets-ruby/blob/c2171ccb7e3bf2b8c712e9ef82a3bfaef3d1f4bf/lib/openassets/api.rb#L57-L84 | train |
haw-itn/openassets-ruby | lib/openassets/api.rb | OpenAssets.Api.issue_asset | def issue_asset(from, amount, metadata = nil, to = nil, fees = nil, mode = 'broadcast', output_qty = 1)
to = from if to.nil?
colored_outputs = get_unspent_outputs([oa_address_to_address(from)])
issue_param = OpenAssets::Transaction::TransferParameters.new(colored_outputs, to, from, amount, output_qty)
tx = create_tx_builder.issue_asset(issue_param, metadata, fees.nil? ? @config[:default_fees]: fees)
tx = process_transaction(tx, mode)
tx
end | ruby | def issue_asset(from, amount, metadata = nil, to = nil, fees = nil, mode = 'broadcast', output_qty = 1)
to = from if to.nil?
colored_outputs = get_unspent_outputs([oa_address_to_address(from)])
issue_param = OpenAssets::Transaction::TransferParameters.new(colored_outputs, to, from, amount, output_qty)
tx = create_tx_builder.issue_asset(issue_param, metadata, fees.nil? ? @config[:default_fees]: fees)
tx = process_transaction(tx, mode)
tx
end | [
"def",
"issue_asset",
"(",
"from",
",",
"amount",
",",
"metadata",
"=",
"nil",
",",
"to",
"=",
"nil",
",",
"fees",
"=",
"nil",
",",
"mode",
"=",
"'broadcast'",
",",
"output_qty",
"=",
"1",
")",
"to",
"=",
"from",
"if",
"to",
".",
"nil?",
"colored_outputs",
"=",
"get_unspent_outputs",
"(",
"[",
"oa_address_to_address",
"(",
"from",
")",
"]",
")",
"issue_param",
"=",
"OpenAssets",
"::",
"Transaction",
"::",
"TransferParameters",
".",
"new",
"(",
"colored_outputs",
",",
"to",
",",
"from",
",",
"amount",
",",
"output_qty",
")",
"tx",
"=",
"create_tx_builder",
".",
"issue_asset",
"(",
"issue_param",
",",
"metadata",
",",
"fees",
".",
"nil?",
"?",
"@config",
"[",
":default_fees",
"]",
":",
"fees",
")",
"tx",
"=",
"process_transaction",
"(",
"tx",
",",
"mode",
")",
"tx",
"end"
] | Creates a transaction for issuing an asset.
@param[String] from The open asset address to issue the asset from.
@param[Integer] amount The amount of asset units to issue.
@param[String] to The open asset address to send the asset to; if unspecified, the assets are sent back to the issuing address.
@param[String] metadata The metadata to embed in the transaction. The asset definition pointer defined by this metadata.
@param[Integer] fees The fess in satoshis for the transaction.
@param[String] mode Specify the following mode.
'broadcast' (default) for signing and broadcasting the transaction,
'signed' for signing the transaction without broadcasting,
'unsigned' for getting the raw unsigned transaction without broadcasting"""='broadcast'
@param[Integer] output_qty The number of divides the issue output. Default value is 1.
Ex. amount = 125 and output_qty = 2, asset quantity = [62, 63] and issue TxOut is two.
@return[Bitcoin::Protocol::Tx] The Bitcoin::Protocol::Tx object. | [
"Creates",
"a",
"transaction",
"for",
"issuing",
"an",
"asset",
"."
] | c2171ccb7e3bf2b8c712e9ef82a3bfaef3d1f4bf | https://github.com/haw-itn/openassets-ruby/blob/c2171ccb7e3bf2b8c712e9ef82a3bfaef3d1f4bf/lib/openassets/api.rb#L99-L106 | train |
haw-itn/openassets-ruby | lib/openassets/api.rb | OpenAssets.Api.send_asset | def send_asset(from, asset_id, amount, to, fees = nil, mode = 'broadcast', output_qty = 1)
colored_outputs = get_unspent_outputs([oa_address_to_address(from)])
asset_transfer_spec = OpenAssets::Transaction::TransferParameters.new(colored_outputs, to, from, amount, output_qty)
tx = create_tx_builder.transfer_asset(asset_id, asset_transfer_spec, from, fees.nil? ? @config[:default_fees]: fees)
tx = process_transaction(tx, mode)
tx
end | ruby | def send_asset(from, asset_id, amount, to, fees = nil, mode = 'broadcast', output_qty = 1)
colored_outputs = get_unspent_outputs([oa_address_to_address(from)])
asset_transfer_spec = OpenAssets::Transaction::TransferParameters.new(colored_outputs, to, from, amount, output_qty)
tx = create_tx_builder.transfer_asset(asset_id, asset_transfer_spec, from, fees.nil? ? @config[:default_fees]: fees)
tx = process_transaction(tx, mode)
tx
end | [
"def",
"send_asset",
"(",
"from",
",",
"asset_id",
",",
"amount",
",",
"to",
",",
"fees",
"=",
"nil",
",",
"mode",
"=",
"'broadcast'",
",",
"output_qty",
"=",
"1",
")",
"colored_outputs",
"=",
"get_unspent_outputs",
"(",
"[",
"oa_address_to_address",
"(",
"from",
")",
"]",
")",
"asset_transfer_spec",
"=",
"OpenAssets",
"::",
"Transaction",
"::",
"TransferParameters",
".",
"new",
"(",
"colored_outputs",
",",
"to",
",",
"from",
",",
"amount",
",",
"output_qty",
")",
"tx",
"=",
"create_tx_builder",
".",
"transfer_asset",
"(",
"asset_id",
",",
"asset_transfer_spec",
",",
"from",
",",
"fees",
".",
"nil?",
"?",
"@config",
"[",
":default_fees",
"]",
":",
"fees",
")",
"tx",
"=",
"process_transaction",
"(",
"tx",
",",
"mode",
")",
"tx",
"end"
] | Creates a transaction for sending an asset from an address to another.
@param[String] from The open asset address to send the asset from.
@param[String] asset_id The asset ID identifying the asset to send.
@param[Integer] amount The amount of asset units to send.
@param[String] to The open asset address to send the asset to.
@param[Integer] fees The fess in satoshis for the transaction.
@param[String] mode 'broadcast' (default) for signing and broadcasting the transaction,
'signed' for signing the transaction without broadcasting,
'unsigned' for getting the raw unsigned transaction without broadcasting"""='broadcast'
@return[Bitcoin::Protocol:Tx] The resulting transaction. | [
"Creates",
"a",
"transaction",
"for",
"sending",
"an",
"asset",
"from",
"an",
"address",
"to",
"another",
"."
] | c2171ccb7e3bf2b8c712e9ef82a3bfaef3d1f4bf | https://github.com/haw-itn/openassets-ruby/blob/c2171ccb7e3bf2b8c712e9ef82a3bfaef3d1f4bf/lib/openassets/api.rb#L118-L124 | train |
haw-itn/openassets-ruby | lib/openassets/api.rb | OpenAssets.Api.send_assets | def send_assets(from, send_asset_params, fees = nil, mode = 'broadcast')
transfer_specs = send_asset_params.map{ |param|
colored_outputs = get_unspent_outputs([oa_address_to_address(param.from || from)])
[param.asset_id, OpenAssets::Transaction::TransferParameters.new(colored_outputs, param.to, param.from || from, param.amount)]
}
btc_transfer_spec = OpenAssets::Transaction::TransferParameters.new(
get_unspent_outputs([oa_address_to_address(from)]), nil, oa_address_to_address(from), 0)
tx = create_tx_builder.transfer_assets(transfer_specs, btc_transfer_spec, fees.nil? ? @config[:default_fees]: fees)
tx = process_transaction(tx, mode)
tx
end | ruby | def send_assets(from, send_asset_params, fees = nil, mode = 'broadcast')
transfer_specs = send_asset_params.map{ |param|
colored_outputs = get_unspent_outputs([oa_address_to_address(param.from || from)])
[param.asset_id, OpenAssets::Transaction::TransferParameters.new(colored_outputs, param.to, param.from || from, param.amount)]
}
btc_transfer_spec = OpenAssets::Transaction::TransferParameters.new(
get_unspent_outputs([oa_address_to_address(from)]), nil, oa_address_to_address(from), 0)
tx = create_tx_builder.transfer_assets(transfer_specs, btc_transfer_spec, fees.nil? ? @config[:default_fees]: fees)
tx = process_transaction(tx, mode)
tx
end | [
"def",
"send_assets",
"(",
"from",
",",
"send_asset_params",
",",
"fees",
"=",
"nil",
",",
"mode",
"=",
"'broadcast'",
")",
"transfer_specs",
"=",
"send_asset_params",
".",
"map",
"{",
"|",
"param",
"|",
"colored_outputs",
"=",
"get_unspent_outputs",
"(",
"[",
"oa_address_to_address",
"(",
"param",
".",
"from",
"||",
"from",
")",
"]",
")",
"[",
"param",
".",
"asset_id",
",",
"OpenAssets",
"::",
"Transaction",
"::",
"TransferParameters",
".",
"new",
"(",
"colored_outputs",
",",
"param",
".",
"to",
",",
"param",
".",
"from",
"||",
"from",
",",
"param",
".",
"amount",
")",
"]",
"}",
"btc_transfer_spec",
"=",
"OpenAssets",
"::",
"Transaction",
"::",
"TransferParameters",
".",
"new",
"(",
"get_unspent_outputs",
"(",
"[",
"oa_address_to_address",
"(",
"from",
")",
"]",
")",
",",
"nil",
",",
"oa_address_to_address",
"(",
"from",
")",
",",
"0",
")",
"tx",
"=",
"create_tx_builder",
".",
"transfer_assets",
"(",
"transfer_specs",
",",
"btc_transfer_spec",
",",
"fees",
".",
"nil?",
"?",
"@config",
"[",
":default_fees",
"]",
":",
"fees",
")",
"tx",
"=",
"process_transaction",
"(",
"tx",
",",
"mode",
")",
"tx",
"end"
] | Creates a transaction for sending multiple asset from an address to another.
@param[String] from The open asset address to send the asset from when send_asset_param hasn't from.
to send the bitcoins from, if needed. where to send bitcoin change, if any.
@param[Array[OpenAssets::SendAssetParam]] send_asset_params The send Asset information(asset_id, amount, to, from).
@param[Integer] fees The fess in satoshis for the transaction.
@param[String] mode 'broadcast' (default) for signing and broadcasting the transaction,
'signed' for signing the transaction without broadcasting,
'unsigned' for getting the raw unsigned transaction without broadcasting"""='broadcast'
@return[Bitcoin::Protocol:Tx] The resulting transaction. | [
"Creates",
"a",
"transaction",
"for",
"sending",
"multiple",
"asset",
"from",
"an",
"address",
"to",
"another",
"."
] | c2171ccb7e3bf2b8c712e9ef82a3bfaef3d1f4bf | https://github.com/haw-itn/openassets-ruby/blob/c2171ccb7e3bf2b8c712e9ef82a3bfaef3d1f4bf/lib/openassets/api.rb#L135-L145 | train |
haw-itn/openassets-ruby | lib/openassets/api.rb | OpenAssets.Api.send_bitcoin | def send_bitcoin(from, amount, to, fees = nil, mode = 'broadcast', output_qty = 1)
validate_address([from, to])
colored_outputs = get_unspent_outputs([from])
btc_transfer_spec = OpenAssets::Transaction::TransferParameters.new(colored_outputs, to, from, amount, output_qty)
tx = create_tx_builder.transfer_btc(btc_transfer_spec, fees.nil? ? @config[:default_fees]: fees)
process_transaction(tx, mode)
end | ruby | def send_bitcoin(from, amount, to, fees = nil, mode = 'broadcast', output_qty = 1)
validate_address([from, to])
colored_outputs = get_unspent_outputs([from])
btc_transfer_spec = OpenAssets::Transaction::TransferParameters.new(colored_outputs, to, from, amount, output_qty)
tx = create_tx_builder.transfer_btc(btc_transfer_spec, fees.nil? ? @config[:default_fees]: fees)
process_transaction(tx, mode)
end | [
"def",
"send_bitcoin",
"(",
"from",
",",
"amount",
",",
"to",
",",
"fees",
"=",
"nil",
",",
"mode",
"=",
"'broadcast'",
",",
"output_qty",
"=",
"1",
")",
"validate_address",
"(",
"[",
"from",
",",
"to",
"]",
")",
"colored_outputs",
"=",
"get_unspent_outputs",
"(",
"[",
"from",
"]",
")",
"btc_transfer_spec",
"=",
"OpenAssets",
"::",
"Transaction",
"::",
"TransferParameters",
".",
"new",
"(",
"colored_outputs",
",",
"to",
",",
"from",
",",
"amount",
",",
"output_qty",
")",
"tx",
"=",
"create_tx_builder",
".",
"transfer_btc",
"(",
"btc_transfer_spec",
",",
"fees",
".",
"nil?",
"?",
"@config",
"[",
":default_fees",
"]",
":",
"fees",
")",
"process_transaction",
"(",
"tx",
",",
"mode",
")",
"end"
] | Creates a transaction for sending bitcoins from an address to another.
@param[String] from The address to send the bitcoins from.
@param[Integer] amount The amount of satoshis to send.
@param[String] to The address to send the bitcoins to.
@param[Integer] fees The fess in satoshis for the transaction.
@param[String] mode 'broadcast' (default) for signing and broadcasting the transaction,
'signed' for signing the transaction without broadcasting,
'unsigned' for getting the raw unsigned transaction without broadcasting"""='broadcast'
@param [Integer] output_qty The number of divides the issue output. Default value is 1.
Ex. amount = 125 and output_qty = 2, asset quantity = [62, 63] and issue TxOut is two.
@return[Bitcoin::Protocol:Tx] The resulting transaction. | [
"Creates",
"a",
"transaction",
"for",
"sending",
"bitcoins",
"from",
"an",
"address",
"to",
"another",
"."
] | c2171ccb7e3bf2b8c712e9ef82a3bfaef3d1f4bf | https://github.com/haw-itn/openassets-ruby/blob/c2171ccb7e3bf2b8c712e9ef82a3bfaef3d1f4bf/lib/openassets/api.rb#L158-L164 | train |
haw-itn/openassets-ruby | lib/openassets/api.rb | OpenAssets.Api.send_bitcoins | def send_bitcoins(from, send_params, fees = nil, mode = 'broadcast')
colored_outputs = get_unspent_outputs([from])
btc_transfer_specs = send_params.map{|param|
OpenAssets::Transaction::TransferParameters.new(colored_outputs, param.to, from, param.amount)
}
tx = create_tx_builder.transfer_btcs(btc_transfer_specs, fees.nil? ? @config[:default_fees]: fees)
tx = process_transaction(tx, mode)
tx
end | ruby | def send_bitcoins(from, send_params, fees = nil, mode = 'broadcast')
colored_outputs = get_unspent_outputs([from])
btc_transfer_specs = send_params.map{|param|
OpenAssets::Transaction::TransferParameters.new(colored_outputs, param.to, from, param.amount)
}
tx = create_tx_builder.transfer_btcs(btc_transfer_specs, fees.nil? ? @config[:default_fees]: fees)
tx = process_transaction(tx, mode)
tx
end | [
"def",
"send_bitcoins",
"(",
"from",
",",
"send_params",
",",
"fees",
"=",
"nil",
",",
"mode",
"=",
"'broadcast'",
")",
"colored_outputs",
"=",
"get_unspent_outputs",
"(",
"[",
"from",
"]",
")",
"btc_transfer_specs",
"=",
"send_params",
".",
"map",
"{",
"|",
"param",
"|",
"OpenAssets",
"::",
"Transaction",
"::",
"TransferParameters",
".",
"new",
"(",
"colored_outputs",
",",
"param",
".",
"to",
",",
"from",
",",
"param",
".",
"amount",
")",
"}",
"tx",
"=",
"create_tx_builder",
".",
"transfer_btcs",
"(",
"btc_transfer_specs",
",",
"fees",
".",
"nil?",
"?",
"@config",
"[",
":default_fees",
"]",
":",
"fees",
")",
"tx",
"=",
"process_transaction",
"(",
"tx",
",",
"mode",
")",
"tx",
"end"
] | Creates a transaction for sending multiple bitcoins from an address to others.
@param[String] from The address to send the bitcoins from.
@param[Array[OpenAssets::SendBitcoinParam]] send_params The send information(amount of satoshis and to).
@param[Integer] fees The fees in satoshis for the transaction.
@param[String] mode 'broadcast' (default) for signing and broadcasting the transaction,
'signed' for signing the transaction without broadcasting,
'unsigned' for getting the raw unsigned transaction without broadcasting"""='broadcast'
@return[Bitcoin::Protocol:Tx] The resulting transaction. | [
"Creates",
"a",
"transaction",
"for",
"sending",
"multiple",
"bitcoins",
"from",
"an",
"address",
"to",
"others",
"."
] | c2171ccb7e3bf2b8c712e9ef82a3bfaef3d1f4bf | https://github.com/haw-itn/openassets-ruby/blob/c2171ccb7e3bf2b8c712e9ef82a3bfaef3d1f4bf/lib/openassets/api.rb#L174-L182 | train |
haw-itn/openassets-ruby | lib/openassets/api.rb | OpenAssets.Api.burn_asset | def burn_asset(oa_address, asset_id, fees = nil, mode = 'broadcast')
unspents = get_unspent_outputs([oa_address_to_address(oa_address)])
tx = create_tx_builder.burn_asset(unspents, asset_id, fees.nil? ? @config[:default_fees]: fees)
process_transaction(tx, mode)
end | ruby | def burn_asset(oa_address, asset_id, fees = nil, mode = 'broadcast')
unspents = get_unspent_outputs([oa_address_to_address(oa_address)])
tx = create_tx_builder.burn_asset(unspents, asset_id, fees.nil? ? @config[:default_fees]: fees)
process_transaction(tx, mode)
end | [
"def",
"burn_asset",
"(",
"oa_address",
",",
"asset_id",
",",
"fees",
"=",
"nil",
",",
"mode",
"=",
"'broadcast'",
")",
"unspents",
"=",
"get_unspent_outputs",
"(",
"[",
"oa_address_to_address",
"(",
"oa_address",
")",
"]",
")",
"tx",
"=",
"create_tx_builder",
".",
"burn_asset",
"(",
"unspents",
",",
"asset_id",
",",
"fees",
".",
"nil?",
"?",
"@config",
"[",
":default_fees",
"]",
":",
"fees",
")",
"process_transaction",
"(",
"tx",
",",
"mode",
")",
"end"
] | Creates a transaction for burn asset.
@param[String] oa_address The open asset address to burn asset.
@param[String] asset_id The asset ID identifying the asset to burn.
@param[Integer] fees The fess in satoshis for the transaction.
@param[String] mode 'broadcast' (default) for signing and broadcasting the transaction,
'signed' for signing the transaction without broadcasting,
'unsigned' for getting the raw unsigned transaction without broadcasting"""='broadcast' | [
"Creates",
"a",
"transaction",
"for",
"burn",
"asset",
"."
] | c2171ccb7e3bf2b8c712e9ef82a3bfaef3d1f4bf | https://github.com/haw-itn/openassets-ruby/blob/c2171ccb7e3bf2b8c712e9ef82a3bfaef3d1f4bf/lib/openassets/api.rb#L192-L196 | train |
haw-itn/openassets-ruby | lib/openassets/api.rb | OpenAssets.Api.get_unspent_outputs | def get_unspent_outputs(addresses)
validate_address(addresses)
unspent = provider.list_unspent(addresses, @config[:min_confirmation], @config[:max_confirmation])
result = unspent.map{|item|
output_result = get_output(item['txid'], item['vout'])
output_result.account = item['account']
output = OpenAssets::Transaction::SpendableOutput.new(
OpenAssets::Transaction::OutPoint.new(item['txid'], item['vout']), output_result)
output.confirmations = item['confirmations']
output.spendable = item['spendable']
output.solvable = item['solvable']
output
}
result
end | ruby | def get_unspent_outputs(addresses)
validate_address(addresses)
unspent = provider.list_unspent(addresses, @config[:min_confirmation], @config[:max_confirmation])
result = unspent.map{|item|
output_result = get_output(item['txid'], item['vout'])
output_result.account = item['account']
output = OpenAssets::Transaction::SpendableOutput.new(
OpenAssets::Transaction::OutPoint.new(item['txid'], item['vout']), output_result)
output.confirmations = item['confirmations']
output.spendable = item['spendable']
output.solvable = item['solvable']
output
}
result
end | [
"def",
"get_unspent_outputs",
"(",
"addresses",
")",
"validate_address",
"(",
"addresses",
")",
"unspent",
"=",
"provider",
".",
"list_unspent",
"(",
"addresses",
",",
"@config",
"[",
":min_confirmation",
"]",
",",
"@config",
"[",
":max_confirmation",
"]",
")",
"result",
"=",
"unspent",
".",
"map",
"{",
"|",
"item",
"|",
"output_result",
"=",
"get_output",
"(",
"item",
"[",
"'txid'",
"]",
",",
"item",
"[",
"'vout'",
"]",
")",
"output_result",
".",
"account",
"=",
"item",
"[",
"'account'",
"]",
"output",
"=",
"OpenAssets",
"::",
"Transaction",
"::",
"SpendableOutput",
".",
"new",
"(",
"OpenAssets",
"::",
"Transaction",
"::",
"OutPoint",
".",
"new",
"(",
"item",
"[",
"'txid'",
"]",
",",
"item",
"[",
"'vout'",
"]",
")",
",",
"output_result",
")",
"output",
".",
"confirmations",
"=",
"item",
"[",
"'confirmations'",
"]",
"output",
".",
"spendable",
"=",
"item",
"[",
"'spendable'",
"]",
"output",
".",
"solvable",
"=",
"item",
"[",
"'solvable'",
"]",
"output",
"}",
"result",
"end"
] | Get unspent outputs.
@param [Array] addresses The array of Bitcoin address.
@return [Array[OpenAssets::Transaction::SpendableOutput]] The array of unspent outputs. | [
"Get",
"unspent",
"outputs",
"."
] | c2171ccb7e3bf2b8c712e9ef82a3bfaef3d1f4bf | https://github.com/haw-itn/openassets-ruby/blob/c2171ccb7e3bf2b8c712e9ef82a3bfaef3d1f4bf/lib/openassets/api.rb#L201-L215 | train |
haw-itn/openassets-ruby | lib/openassets/api.rb | OpenAssets.Api.get_outputs_from_txid | def get_outputs_from_txid(txid, use_cache = false)
tx = get_tx(txid, use_cache)
outputs = get_color_outputs_from_tx(tx)
outputs.map.with_index{|out, i|out.to_hash.merge({'txid' => tx.hash, 'vout' => i})}
end | ruby | def get_outputs_from_txid(txid, use_cache = false)
tx = get_tx(txid, use_cache)
outputs = get_color_outputs_from_tx(tx)
outputs.map.with_index{|out, i|out.to_hash.merge({'txid' => tx.hash, 'vout' => i})}
end | [
"def",
"get_outputs_from_txid",
"(",
"txid",
",",
"use_cache",
"=",
"false",
")",
"tx",
"=",
"get_tx",
"(",
"txid",
",",
"use_cache",
")",
"outputs",
"=",
"get_color_outputs_from_tx",
"(",
"tx",
")",
"outputs",
".",
"map",
".",
"with_index",
"{",
"|",
"out",
",",
"i",
"|",
"out",
".",
"to_hash",
".",
"merge",
"(",
"{",
"'txid'",
"=>",
"tx",
".",
"hash",
",",
"'vout'",
"=>",
"i",
"}",
")",
"}",
"end"
] | Get tx outputs.
@param[String] txid Transaction ID.
@param[Boolean] use_cache If specified true use cache.(default value is false)
@return[Array] Return array of the transaction output Hash with coloring information. | [
"Get",
"tx",
"outputs",
"."
] | c2171ccb7e3bf2b8c712e9ef82a3bfaef3d1f4bf | https://github.com/haw-itn/openassets-ruby/blob/c2171ccb7e3bf2b8c712e9ef82a3bfaef3d1f4bf/lib/openassets/api.rb#L248-L252 | train |
haw-itn/openassets-ruby | lib/openassets/api.rb | OpenAssets.Api.parse_issuance_p2sh_pointer | def parse_issuance_p2sh_pointer(script_sig)
script = Bitcoin::Script.new(script_sig).chunks.last
redeem_script = Bitcoin::Script.new(script)
return nil unless redeem_script.chunks[1] == Bitcoin::Script::OP_DROP
asset_def = to_bytes(redeem_script.chunks[0].to_s.bth)[0..-1].map{|x|x.to_i(16).chr}.join
asset_def && asset_def.start_with?('u=') ? asset_def : nil
end | ruby | def parse_issuance_p2sh_pointer(script_sig)
script = Bitcoin::Script.new(script_sig).chunks.last
redeem_script = Bitcoin::Script.new(script)
return nil unless redeem_script.chunks[1] == Bitcoin::Script::OP_DROP
asset_def = to_bytes(redeem_script.chunks[0].to_s.bth)[0..-1].map{|x|x.to_i(16).chr}.join
asset_def && asset_def.start_with?('u=') ? asset_def : nil
end | [
"def",
"parse_issuance_p2sh_pointer",
"(",
"script_sig",
")",
"script",
"=",
"Bitcoin",
"::",
"Script",
".",
"new",
"(",
"script_sig",
")",
".",
"chunks",
".",
"last",
"redeem_script",
"=",
"Bitcoin",
"::",
"Script",
".",
"new",
"(",
"script",
")",
"return",
"nil",
"unless",
"redeem_script",
".",
"chunks",
"[",
"1",
"]",
"==",
"Bitcoin",
"::",
"Script",
"::",
"OP_DROP",
"asset_def",
"=",
"to_bytes",
"(",
"redeem_script",
".",
"chunks",
"[",
"0",
"]",
".",
"to_s",
".",
"bth",
")",
"[",
"0",
"..",
"-",
"1",
"]",
".",
"map",
"{",
"|",
"x",
"|",
"x",
".",
"to_i",
"(",
"16",
")",
".",
"chr",
"}",
".",
"join",
"asset_def",
"&&",
"asset_def",
".",
"start_with?",
"(",
"'u='",
")",
"?",
"asset_def",
":",
"nil",
"end"
] | parse issuance p2sh which contains asset definition pointer | [
"parse",
"issuance",
"p2sh",
"which",
"contains",
"asset",
"definition",
"pointer"
] | c2171ccb7e3bf2b8c712e9ef82a3bfaef3d1f4bf | https://github.com/haw-itn/openassets-ruby/blob/c2171ccb7e3bf2b8c712e9ef82a3bfaef3d1f4bf/lib/openassets/api.rb#L409-L415 | train |
poise/halite | lib/halite/spec_helper.rb | Halite.SpecHelper.chef_runner_options | def chef_runner_options
super.tap do |options|
options[:halite_gemspec] = halite_gemspec
# And some legacy data.
options[:default_attributes].update(default_attributes)
options[:normal_attributes].update(normal_attributes)
options[:override_attributes].update(override_attributes)
options.update(chefspec_options)
end
end | ruby | def chef_runner_options
super.tap do |options|
options[:halite_gemspec] = halite_gemspec
# And some legacy data.
options[:default_attributes].update(default_attributes)
options[:normal_attributes].update(normal_attributes)
options[:override_attributes].update(override_attributes)
options.update(chefspec_options)
end
end | [
"def",
"chef_runner_options",
"super",
".",
"tap",
"do",
"|",
"options",
"|",
"options",
"[",
":halite_gemspec",
"]",
"=",
"halite_gemspec",
"options",
"[",
":default_attributes",
"]",
".",
"update",
"(",
"default_attributes",
")",
"options",
"[",
":normal_attributes",
"]",
".",
"update",
"(",
"normal_attributes",
")",
"options",
"[",
":override_attributes",
"]",
".",
"update",
"(",
"override_attributes",
")",
"options",
".",
"update",
"(",
"chefspec_options",
")",
"end",
"end"
] | Merge in extra options data. | [
"Merge",
"in",
"extra",
"options",
"data",
"."
] | 9ae174e6b7c5d4674f3301394e14567fa89a8b3e | https://github.com/poise/halite/blob/9ae174e6b7c5d4674f3301394e14567fa89a8b3e/lib/halite/spec_helper.rb#L106-L115 | train |
poise/halite | lib/halite/rake_helper.rb | Halite.RakeHelper.install | def install
extend Rake::DSL
# Core Halite tasks
unless options[:no_gem]
desc "Convert #{gemspec.name}-#{gemspec.version} to a cookbook in the pkg directory"
task 'chef:build' do
build_cookbook
end
desc "Push #{gemspec.name}-#{gemspec.version} to Supermarket"
task 'chef:release' => ['chef:build'] do
release_cookbook(pkg_path)
end
# Patch the core gem tasks to run ours too
task 'build' => ['chef:build']
task 'release' => ['chef:release']
else
desc "Push #{gem_name} to Supermarket"
task 'chef:release' do
release_cookbook(base)
end
end
# Foodcritic doesn't have a config file, so just always try to add it.
unless options[:no_foodcritic]
install_foodcritic
end
# If a .kitchen.yml exists, install the Test Kitchen tasks.
unless options[:no_kitchen] || !File.exist?(File.join(@base, '.kitchen.yml'))
install_kitchen
end
end | ruby | def install
extend Rake::DSL
# Core Halite tasks
unless options[:no_gem]
desc "Convert #{gemspec.name}-#{gemspec.version} to a cookbook in the pkg directory"
task 'chef:build' do
build_cookbook
end
desc "Push #{gemspec.name}-#{gemspec.version} to Supermarket"
task 'chef:release' => ['chef:build'] do
release_cookbook(pkg_path)
end
# Patch the core gem tasks to run ours too
task 'build' => ['chef:build']
task 'release' => ['chef:release']
else
desc "Push #{gem_name} to Supermarket"
task 'chef:release' do
release_cookbook(base)
end
end
# Foodcritic doesn't have a config file, so just always try to add it.
unless options[:no_foodcritic]
install_foodcritic
end
# If a .kitchen.yml exists, install the Test Kitchen tasks.
unless options[:no_kitchen] || !File.exist?(File.join(@base, '.kitchen.yml'))
install_kitchen
end
end | [
"def",
"install",
"extend",
"Rake",
"::",
"DSL",
"unless",
"options",
"[",
":no_gem",
"]",
"desc",
"\"Convert #{gemspec.name}-#{gemspec.version} to a cookbook in the pkg directory\"",
"task",
"'chef:build'",
"do",
"build_cookbook",
"end",
"desc",
"\"Push #{gemspec.name}-#{gemspec.version} to Supermarket\"",
"task",
"'chef:release'",
"=>",
"[",
"'chef:build'",
"]",
"do",
"release_cookbook",
"(",
"pkg_path",
")",
"end",
"task",
"'build'",
"=>",
"[",
"'chef:build'",
"]",
"task",
"'release'",
"=>",
"[",
"'chef:release'",
"]",
"else",
"desc",
"\"Push #{gem_name} to Supermarket\"",
"task",
"'chef:release'",
"do",
"release_cookbook",
"(",
"base",
")",
"end",
"end",
"unless",
"options",
"[",
":no_foodcritic",
"]",
"install_foodcritic",
"end",
"unless",
"options",
"[",
":no_kitchen",
"]",
"||",
"!",
"File",
".",
"exist?",
"(",
"File",
".",
"join",
"(",
"@base",
",",
"'.kitchen.yml'",
")",
")",
"install_kitchen",
"end",
"end"
] | Install all Rake tasks.
@return [void] | [
"Install",
"all",
"Rake",
"tasks",
"."
] | 9ae174e6b7c5d4674f3301394e14567fa89a8b3e | https://github.com/poise/halite/blob/9ae174e6b7c5d4674f3301394e14567fa89a8b3e/lib/halite/rake_helper.rb#L37-L70 | train |
poise/halite | lib/halite/rake_helper.rb | Halite.RakeHelper.remove_files_in_folder | def remove_files_in_folder(base_path)
existing_files = Dir.glob(File.join(base_path, '**', '*'), File::FNM_DOTMATCH).map {|path| File.expand_path(path)}.uniq.reverse # expand_path just to normalize foo/. -> foo
existing_files.delete(base_path) # Don't remove the base
# Fuck FileUtils, it is a confusing pile of fail for remove*/rm*
existing_files.each do |path|
if File.file?(path)
File.unlink(path)
elsif File.directory?(path)
Dir.unlink(path)
else
# Because paranoia
raise Error.new("Unknown type of file at '#{path}', possible symlink deletion attack")
end
end
end | ruby | def remove_files_in_folder(base_path)
existing_files = Dir.glob(File.join(base_path, '**', '*'), File::FNM_DOTMATCH).map {|path| File.expand_path(path)}.uniq.reverse # expand_path just to normalize foo/. -> foo
existing_files.delete(base_path) # Don't remove the base
# Fuck FileUtils, it is a confusing pile of fail for remove*/rm*
existing_files.each do |path|
if File.file?(path)
File.unlink(path)
elsif File.directory?(path)
Dir.unlink(path)
else
# Because paranoia
raise Error.new("Unknown type of file at '#{path}', possible symlink deletion attack")
end
end
end | [
"def",
"remove_files_in_folder",
"(",
"base_path",
")",
"existing_files",
"=",
"Dir",
".",
"glob",
"(",
"File",
".",
"join",
"(",
"base_path",
",",
"'**'",
",",
"'*'",
")",
",",
"File",
"::",
"FNM_DOTMATCH",
")",
".",
"map",
"{",
"|",
"path",
"|",
"File",
".",
"expand_path",
"(",
"path",
")",
"}",
".",
"uniq",
".",
"reverse",
"existing_files",
".",
"delete",
"(",
"base_path",
")",
"existing_files",
".",
"each",
"do",
"|",
"path",
"|",
"if",
"File",
".",
"file?",
"(",
"path",
")",
"File",
".",
"unlink",
"(",
"path",
")",
"elsif",
"File",
".",
"directory?",
"(",
"path",
")",
"Dir",
".",
"unlink",
"(",
"path",
")",
"else",
"raise",
"Error",
".",
"new",
"(",
"\"Unknown type of file at '#{path}', possible symlink deletion attack\"",
")",
"end",
"end",
"end"
] | Remove everything in a path, but not the directory itself | [
"Remove",
"everything",
"in",
"a",
"path",
"but",
"not",
"the",
"directory",
"itself"
] | 9ae174e6b7c5d4674f3301394e14567fa89a8b3e | https://github.com/poise/halite/blob/9ae174e6b7c5d4674f3301394e14567fa89a8b3e/lib/halite/rake_helper.rb#L145-L159 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/udptrackerdriver.rb | QuartzTorrent.UdpTrackerDriver.readWithTimeout | def readWithTimeout(socket, length, timeout)
rc = IO.select([socket], nil, nil, timeout)
if ! rc
raise "Waiting for response from UDP tracker #{@host}:#{@trackerPort} timed out after #{@timeout} seconds"
elsif rc[0].size > 0
socket.recvfrom(length)[0]
else
raise "Error receiving response from UDP tracker #{@host}:#{@trackerPort}"
end
end | ruby | def readWithTimeout(socket, length, timeout)
rc = IO.select([socket], nil, nil, timeout)
if ! rc
raise "Waiting for response from UDP tracker #{@host}:#{@trackerPort} timed out after #{@timeout} seconds"
elsif rc[0].size > 0
socket.recvfrom(length)[0]
else
raise "Error receiving response from UDP tracker #{@host}:#{@trackerPort}"
end
end | [
"def",
"readWithTimeout",
"(",
"socket",
",",
"length",
",",
"timeout",
")",
"rc",
"=",
"IO",
".",
"select",
"(",
"[",
"socket",
"]",
",",
"nil",
",",
"nil",
",",
"timeout",
")",
"if",
"!",
"rc",
"raise",
"\"Waiting for response from UDP tracker #{@host}:#{@trackerPort} timed out after #{@timeout} seconds\"",
"elsif",
"rc",
"[",
"0",
"]",
".",
"size",
">",
"0",
"socket",
".",
"recvfrom",
"(",
"length",
")",
"[",
"0",
"]",
"else",
"raise",
"\"Error receiving response from UDP tracker #{@host}:#{@trackerPort}\"",
"end",
"end"
] | Throws exception if timeout occurs | [
"Throws",
"exception",
"if",
"timeout",
"occurs"
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/udptrackerdriver.rb#L82-L91 | train |
poise/halite | lib/halite/helper_base.rb | Halite.HelperBase.find_gem_name | def find_gem_name(base)
spec = Dir[File.join(base, '*.gemspec')].first
File.basename(spec, '.gemspec') if spec
end | ruby | def find_gem_name(base)
spec = Dir[File.join(base, '*.gemspec')].first
File.basename(spec, '.gemspec') if spec
end | [
"def",
"find_gem_name",
"(",
"base",
")",
"spec",
"=",
"Dir",
"[",
"File",
".",
"join",
"(",
"base",
",",
"'*.gemspec'",
")",
"]",
".",
"first",
"File",
".",
"basename",
"(",
"spec",
",",
"'.gemspec'",
")",
"if",
"spec",
"end"
] | Search a directory for a .gemspec file to determine the gem name.
Returns nil if no gemspec is found.
@param base [String] Folder to search.
@return [String, nil] | [
"Search",
"a",
"directory",
"for",
"a",
".",
"gemspec",
"file",
"to",
"determine",
"the",
"gem",
"name",
".",
"Returns",
"nil",
"if",
"no",
"gemspec",
"is",
"found",
"."
] | 9ae174e6b7c5d4674f3301394e14567fa89a8b3e | https://github.com/poise/halite/blob/9ae174e6b7c5d4674f3301394e14567fa89a8b3e/lib/halite/helper_base.rb#L102-L105 | train |
poise/halite | lib/halite/helper_base.rb | Halite.HelperBase.gemspec | def gemspec
@gemspec ||= begin
raise Error.new("Unable to automatically determine gem name from specs in #{base}. Please set the gem name via #{self.class.name}.install_tasks(gem_name: 'name')") unless gem_name
g = Bundler.load_gemspec(File.join(base, gem_name+'.gemspec'))
# This is returning the path it would be in if installed normally,
# override so we get the local path. Also for reasons that are entirely
# beyond me, #tap makes Gem::Specification flip out so do it old-school.
g.full_gem_path = base
g
end
end | ruby | def gemspec
@gemspec ||= begin
raise Error.new("Unable to automatically determine gem name from specs in #{base}. Please set the gem name via #{self.class.name}.install_tasks(gem_name: 'name')") unless gem_name
g = Bundler.load_gemspec(File.join(base, gem_name+'.gemspec'))
# This is returning the path it would be in if installed normally,
# override so we get the local path. Also for reasons that are entirely
# beyond me, #tap makes Gem::Specification flip out so do it old-school.
g.full_gem_path = base
g
end
end | [
"def",
"gemspec",
"@gemspec",
"||=",
"begin",
"raise",
"Error",
".",
"new",
"(",
"\"Unable to automatically determine gem name from specs in #{base}. Please set the gem name via #{self.class.name}.install_tasks(gem_name: 'name')\"",
")",
"unless",
"gem_name",
"g",
"=",
"Bundler",
".",
"load_gemspec",
"(",
"File",
".",
"join",
"(",
"base",
",",
"gem_name",
"+",
"'.gemspec'",
")",
")",
"g",
".",
"full_gem_path",
"=",
"base",
"g",
"end",
"end"
] | Gem specification for the current gem.
@return [Gem::Specification] | [
"Gem",
"specification",
"for",
"the",
"current",
"gem",
"."
] | 9ae174e6b7c5d4674f3301394e14567fa89a8b3e | https://github.com/poise/halite/blob/9ae174e6b7c5d4674f3301394e14567fa89a8b3e/lib/halite/helper_base.rb#L110-L120 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/metainfopiecestate.rb | QuartzTorrent.MetainfoPieceState.metainfoCompletedLength | def metainfoCompletedLength
num = @completePieces.countSet
# Last block may be smaller
extra = 0
if @completePieces.set?(@completePieces.length-1)
num -= 1
extra = @lastPieceLength
end
num*BlockSize + extra
end | ruby | def metainfoCompletedLength
num = @completePieces.countSet
# Last block may be smaller
extra = 0
if @completePieces.set?(@completePieces.length-1)
num -= 1
extra = @lastPieceLength
end
num*BlockSize + extra
end | [
"def",
"metainfoCompletedLength",
"num",
"=",
"@completePieces",
".",
"countSet",
"extra",
"=",
"0",
"if",
"@completePieces",
".",
"set?",
"(",
"@completePieces",
".",
"length",
"-",
"1",
")",
"num",
"-=",
"1",
"extra",
"=",
"@lastPieceLength",
"end",
"num",
"*",
"BlockSize",
"+",
"extra",
"end"
] | Return the number of bytes of the metainfo that we have downloaded so far. | [
"Return",
"the",
"number",
"of",
"bytes",
"of",
"the",
"metainfo",
"that",
"we",
"have",
"downloaded",
"so",
"far",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/metainfopiecestate.rb#L110-L119 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/metainfopiecestate.rb | QuartzTorrent.MetainfoPieceState.savePiece | def savePiece(pieceIndex, data)
id = @pieceManager.writeBlock pieceIndex, 0, data
@pieceManagerRequests[id] = PieceManagerRequestMetadata.new(:write, pieceIndex)
id
end | ruby | def savePiece(pieceIndex, data)
id = @pieceManager.writeBlock pieceIndex, 0, data
@pieceManagerRequests[id] = PieceManagerRequestMetadata.new(:write, pieceIndex)
id
end | [
"def",
"savePiece",
"(",
"pieceIndex",
",",
"data",
")",
"id",
"=",
"@pieceManager",
".",
"writeBlock",
"pieceIndex",
",",
"0",
",",
"data",
"@pieceManagerRequests",
"[",
"id",
"]",
"=",
"PieceManagerRequestMetadata",
".",
"new",
"(",
":write",
",",
"pieceIndex",
")",
"id",
"end"
] | Save the specified piece to disk asynchronously. | [
"Save",
"the",
"specified",
"piece",
"to",
"disk",
"asynchronously",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/metainfopiecestate.rb#L141-L145 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/metainfopiecestate.rb | QuartzTorrent.MetainfoPieceState.readPiece | def readPiece(pieceIndex)
length = BlockSize
length = @lastPieceLength if pieceIndex == @numPieces - 1
id = @pieceManager.readBlock pieceIndex, 0, length
#result = manager.nextResult
@pieceManagerRequests[id] = PieceManagerRequestMetadata.new(:read, pieceIndex)
id
end | ruby | def readPiece(pieceIndex)
length = BlockSize
length = @lastPieceLength if pieceIndex == @numPieces - 1
id = @pieceManager.readBlock pieceIndex, 0, length
#result = manager.nextResult
@pieceManagerRequests[id] = PieceManagerRequestMetadata.new(:read, pieceIndex)
id
end | [
"def",
"readPiece",
"(",
"pieceIndex",
")",
"length",
"=",
"BlockSize",
"length",
"=",
"@lastPieceLength",
"if",
"pieceIndex",
"==",
"@numPieces",
"-",
"1",
"id",
"=",
"@pieceManager",
".",
"readBlock",
"pieceIndex",
",",
"0",
",",
"length",
"@pieceManagerRequests",
"[",
"id",
"]",
"=",
"PieceManagerRequestMetadata",
".",
"new",
"(",
":read",
",",
"pieceIndex",
")",
"id",
"end"
] | Read a piece from disk. This method is asynchronous; it returns a handle that can be later
used to retreive the result. | [
"Read",
"a",
"piece",
"from",
"disk",
".",
"This",
"method",
"is",
"asynchronous",
";",
"it",
"returns",
"a",
"handle",
"that",
"can",
"be",
"later",
"used",
"to",
"retreive",
"the",
"result",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/metainfopiecestate.rb#L149-L156 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/metainfopiecestate.rb | QuartzTorrent.MetainfoPieceState.checkResults | def checkResults
results = []
while true
result = @pieceManager.nextResult
break if ! result
results.push result
metaData = @pieceManagerRequests.delete(result.requestId)
if ! metaData
@logger.error "Can't find metadata for PieceManager request #{result.requestId}"
next
end
if metaData.type == :write
if result.successful?
@completePieces.set(metaData.data)
else
@requestedPieces.clear(metaData.data)
@pieceRequestTime[metaData.data] = nil
@logger.error "Writing metainfo piece failed: #{result.error}"
end
elsif metaData.type == :read
if ! result.successful?
@logger.error "Reading metainfo piece failed: #{result.error}"
end
end
end
results
end | ruby | def checkResults
results = []
while true
result = @pieceManager.nextResult
break if ! result
results.push result
metaData = @pieceManagerRequests.delete(result.requestId)
if ! metaData
@logger.error "Can't find metadata for PieceManager request #{result.requestId}"
next
end
if metaData.type == :write
if result.successful?
@completePieces.set(metaData.data)
else
@requestedPieces.clear(metaData.data)
@pieceRequestTime[metaData.data] = nil
@logger.error "Writing metainfo piece failed: #{result.error}"
end
elsif metaData.type == :read
if ! result.successful?
@logger.error "Reading metainfo piece failed: #{result.error}"
end
end
end
results
end | [
"def",
"checkResults",
"results",
"=",
"[",
"]",
"while",
"true",
"result",
"=",
"@pieceManager",
".",
"nextResult",
"break",
"if",
"!",
"result",
"results",
".",
"push",
"result",
"metaData",
"=",
"@pieceManagerRequests",
".",
"delete",
"(",
"result",
".",
"requestId",
")",
"if",
"!",
"metaData",
"@logger",
".",
"error",
"\"Can't find metadata for PieceManager request #{result.requestId}\"",
"next",
"end",
"if",
"metaData",
".",
"type",
"==",
":write",
"if",
"result",
".",
"successful?",
"@completePieces",
".",
"set",
"(",
"metaData",
".",
"data",
")",
"else",
"@requestedPieces",
".",
"clear",
"(",
"metaData",
".",
"data",
")",
"@pieceRequestTime",
"[",
"metaData",
".",
"data",
"]",
"=",
"nil",
"@logger",
".",
"error",
"\"Writing metainfo piece failed: #{result.error}\"",
"end",
"elsif",
"metaData",
".",
"type",
"==",
":read",
"if",
"!",
"result",
".",
"successful?",
"@logger",
".",
"error",
"\"Reading metainfo piece failed: #{result.error}\"",
"end",
"end",
"end",
"results",
"end"
] | Check the results of savePiece and readPiece. This method returns a list
of the PieceManager results. | [
"Check",
"the",
"results",
"of",
"savePiece",
"and",
"readPiece",
".",
"This",
"method",
"returns",
"a",
"list",
"of",
"the",
"PieceManager",
"results",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/metainfopiecestate.rb#L160-L189 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/metainfopiecestate.rb | QuartzTorrent.MetainfoPieceState.findRequestablePieces | def findRequestablePieces
piecesRequired = []
removeOldRequests
@numPieces.times do |pieceIndex|
piecesRequired.push pieceIndex if ! @completePieces.set?(pieceIndex) && ! @requestedPieces.set?(pieceIndex)
end
piecesRequired
end | ruby | def findRequestablePieces
piecesRequired = []
removeOldRequests
@numPieces.times do |pieceIndex|
piecesRequired.push pieceIndex if ! @completePieces.set?(pieceIndex) && ! @requestedPieces.set?(pieceIndex)
end
piecesRequired
end | [
"def",
"findRequestablePieces",
"piecesRequired",
"=",
"[",
"]",
"removeOldRequests",
"@numPieces",
".",
"times",
"do",
"|",
"pieceIndex",
"|",
"piecesRequired",
".",
"push",
"pieceIndex",
"if",
"!",
"@completePieces",
".",
"set?",
"(",
"pieceIndex",
")",
"&&",
"!",
"@requestedPieces",
".",
"set?",
"(",
"pieceIndex",
")",
"end",
"piecesRequired",
"end"
] | Return a list of torrent pieces that can still be requested. These are pieces that are not completed and are not requested. | [
"Return",
"a",
"list",
"of",
"torrent",
"pieces",
"that",
"can",
"still",
"be",
"requested",
".",
"These",
"are",
"pieces",
"that",
"are",
"not",
"completed",
"and",
"are",
"not",
"requested",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/metainfopiecestate.rb#L192-L202 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/metainfopiecestate.rb | QuartzTorrent.MetainfoPieceState.findRequestablePeers | def findRequestablePeers(classifiedPeers)
result = []
classifiedPeers.establishedPeers.each do |peer|
result.push peer if ! @badPeers.findByAddr(peer.trackerPeer.ip, peer.trackerPeer.port)
end
result
end | ruby | def findRequestablePeers(classifiedPeers)
result = []
classifiedPeers.establishedPeers.each do |peer|
result.push peer if ! @badPeers.findByAddr(peer.trackerPeer.ip, peer.trackerPeer.port)
end
result
end | [
"def",
"findRequestablePeers",
"(",
"classifiedPeers",
")",
"result",
"=",
"[",
"]",
"classifiedPeers",
".",
"establishedPeers",
".",
"each",
"do",
"|",
"peer",
"|",
"result",
".",
"push",
"peer",
"if",
"!",
"@badPeers",
".",
"findByAddr",
"(",
"peer",
".",
"trackerPeer",
".",
"ip",
",",
"peer",
".",
"trackerPeer",
".",
"port",
")",
"end",
"result",
"end"
] | Return a list of peers from whom we can request pieces. These are peers for whom we have an established connection, and
are not marked as bad. See markPeerBad. | [
"Return",
"a",
"list",
"of",
"peers",
"from",
"whom",
"we",
"can",
"request",
"pieces",
".",
"These",
"are",
"peers",
"for",
"whom",
"we",
"have",
"an",
"established",
"connection",
"and",
"are",
"not",
"marked",
"as",
"bad",
".",
"See",
"markPeerBad",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/metainfopiecestate.rb#L206-L214 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/metainfopiecestate.rb | QuartzTorrent.MetainfoPieceState.setPieceRequested | def setPieceRequested(pieceIndex, bool)
if bool
@requestedPieces.set pieceIndex
@pieceRequestTime[pieceIndex] = Time.new
else
@requestedPieces.clear pieceIndex
@pieceRequestTime[pieceIndex] = nil
end
end | ruby | def setPieceRequested(pieceIndex, bool)
if bool
@requestedPieces.set pieceIndex
@pieceRequestTime[pieceIndex] = Time.new
else
@requestedPieces.clear pieceIndex
@pieceRequestTime[pieceIndex] = nil
end
end | [
"def",
"setPieceRequested",
"(",
"pieceIndex",
",",
"bool",
")",
"if",
"bool",
"@requestedPieces",
".",
"set",
"pieceIndex",
"@pieceRequestTime",
"[",
"pieceIndex",
"]",
"=",
"Time",
".",
"new",
"else",
"@requestedPieces",
".",
"clear",
"pieceIndex",
"@pieceRequestTime",
"[",
"pieceIndex",
"]",
"=",
"nil",
"end",
"end"
] | Set whether the piece with the passed pieceIndex is requested or not. | [
"Set",
"whether",
"the",
"piece",
"with",
"the",
"passed",
"pieceIndex",
"is",
"requested",
"or",
"not",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/metainfopiecestate.rb#L217-L225 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/metainfopiecestate.rb | QuartzTorrent.MetainfoPieceState.removeOldRequests | def removeOldRequests
now = Time.new
@requestedPieces.length.times do |i|
if @requestedPieces.set? i
if now - @pieceRequestTime[i] > @requestTimeout
@requestedPieces.clear i
@pieceRequestTime[i] = nil
end
end
end
end | ruby | def removeOldRequests
now = Time.new
@requestedPieces.length.times do |i|
if @requestedPieces.set? i
if now - @pieceRequestTime[i] > @requestTimeout
@requestedPieces.clear i
@pieceRequestTime[i] = nil
end
end
end
end | [
"def",
"removeOldRequests",
"now",
"=",
"Time",
".",
"new",
"@requestedPieces",
".",
"length",
".",
"times",
"do",
"|",
"i",
"|",
"if",
"@requestedPieces",
".",
"set?",
"i",
"if",
"now",
"-",
"@pieceRequestTime",
"[",
"i",
"]",
">",
"@requestTimeout",
"@requestedPieces",
".",
"clear",
"i",
"@pieceRequestTime",
"[",
"i",
"]",
"=",
"nil",
"end",
"end",
"end",
"end"
] | Remove any pending requests after a timeout. | [
"Remove",
"any",
"pending",
"requests",
"after",
"a",
"timeout",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/metainfopiecestate.rb#L263-L273 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/reactor.rb | QuartzTorrent.Handler.scheduleTimer | def scheduleTimer(duration, metainfo = nil, recurring = true, immed = false)
@reactor.scheduleTimer(duration, metainfo, recurring, immed) if @reactor
end | ruby | def scheduleTimer(duration, metainfo = nil, recurring = true, immed = false)
@reactor.scheduleTimer(duration, metainfo, recurring, immed) if @reactor
end | [
"def",
"scheduleTimer",
"(",
"duration",
",",
"metainfo",
"=",
"nil",
",",
"recurring",
"=",
"true",
",",
"immed",
"=",
"false",
")",
"@reactor",
".",
"scheduleTimer",
"(",
"duration",
",",
"metainfo",
",",
"recurring",
",",
"immed",
")",
"if",
"@reactor",
"end"
] | Schedule a timer.
@param duration The duration of the timer in seconds
@param metainfo The metainfo to associate with the timer
@param recurring If true when the timer duration expires, the timer will be rescheduled. If false the timer
will not be rescheduled.
@param immed If true then the timer will expire immediately (the next pass through the event loop). If the timer
is also recurring it will then be rescheduled according to it's duratoin. | [
"Schedule",
"a",
"timer",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/reactor.rb#L75-L77 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/reactor.rb | QuartzTorrent.Handler.connect | def connect(addr, port, metainfo, timeout = nil)
@reactor.connect(addr, port, metainfo, timeout) if @reactor
end | ruby | def connect(addr, port, metainfo, timeout = nil)
@reactor.connect(addr, port, metainfo, timeout) if @reactor
end | [
"def",
"connect",
"(",
"addr",
",",
"port",
",",
"metainfo",
",",
"timeout",
"=",
"nil",
")",
"@reactor",
".",
"connect",
"(",
"addr",
",",
"port",
",",
"metainfo",
",",
"timeout",
")",
"if",
"@reactor",
"end"
] | Create a TCP connection to the specified host and port. Associate the passed metainfo with the IO representing the connection. | [
"Create",
"a",
"TCP",
"connection",
"to",
"the",
"specified",
"host",
"and",
"port",
".",
"Associate",
"the",
"passed",
"metainfo",
"with",
"the",
"IO",
"representing",
"the",
"connection",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/reactor.rb#L86-L88 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/reactor.rb | QuartzTorrent.IoFacade.read | def read(length)
data = ''
while data.length < length
begin
toRead = length-data.length
rateLimited = false
if @ioInfo.readRateLimit
avail = @ioInfo.readRateLimit.avail.to_i
if avail < toRead
toRead = avail
rateLimited = true
end
@ioInfo.readRateLimit.withdraw toRead
end
@logger.debug "IoFacade: must read: #{length} have read: #{data.length}. Reading #{toRead} bytes now" if @logger
data << @io.read_nonblock(toRead) if toRead > 0
# If we tried to read more than we are allowed to by rate limiting, yield.
Fiber.yield if rateLimited
rescue Errno::EWOULDBLOCK
# Wait for more data.
@logger.debug "IoFacade: read would block" if @logger
Fiber.yield
rescue Errno::EAGAIN, Errno::EINTR
# Wait for more data.
@logger.debug "IoFacade: read was interrupted" if @logger
Fiber.yield
rescue
@logger.debug "IoFacade: read error: #{$!}" if @logger
# Read failure occurred
@ioInfo.lastReadError = $!
if @ioInfo.useErrorhandler
@ioInfo.state = :error
Fiber.yield
else
raise $!
end
end
end
data
end | ruby | def read(length)
data = ''
while data.length < length
begin
toRead = length-data.length
rateLimited = false
if @ioInfo.readRateLimit
avail = @ioInfo.readRateLimit.avail.to_i
if avail < toRead
toRead = avail
rateLimited = true
end
@ioInfo.readRateLimit.withdraw toRead
end
@logger.debug "IoFacade: must read: #{length} have read: #{data.length}. Reading #{toRead} bytes now" if @logger
data << @io.read_nonblock(toRead) if toRead > 0
# If we tried to read more than we are allowed to by rate limiting, yield.
Fiber.yield if rateLimited
rescue Errno::EWOULDBLOCK
# Wait for more data.
@logger.debug "IoFacade: read would block" if @logger
Fiber.yield
rescue Errno::EAGAIN, Errno::EINTR
# Wait for more data.
@logger.debug "IoFacade: read was interrupted" if @logger
Fiber.yield
rescue
@logger.debug "IoFacade: read error: #{$!}" if @logger
# Read failure occurred
@ioInfo.lastReadError = $!
if @ioInfo.useErrorhandler
@ioInfo.state = :error
Fiber.yield
else
raise $!
end
end
end
data
end | [
"def",
"read",
"(",
"length",
")",
"data",
"=",
"''",
"while",
"data",
".",
"length",
"<",
"length",
"begin",
"toRead",
"=",
"length",
"-",
"data",
".",
"length",
"rateLimited",
"=",
"false",
"if",
"@ioInfo",
".",
"readRateLimit",
"avail",
"=",
"@ioInfo",
".",
"readRateLimit",
".",
"avail",
".",
"to_i",
"if",
"avail",
"<",
"toRead",
"toRead",
"=",
"avail",
"rateLimited",
"=",
"true",
"end",
"@ioInfo",
".",
"readRateLimit",
".",
"withdraw",
"toRead",
"end",
"@logger",
".",
"debug",
"\"IoFacade: must read: #{length} have read: #{data.length}. Reading #{toRead} bytes now\"",
"if",
"@logger",
"data",
"<<",
"@io",
".",
"read_nonblock",
"(",
"toRead",
")",
"if",
"toRead",
">",
"0",
"Fiber",
".",
"yield",
"if",
"rateLimited",
"rescue",
"Errno",
"::",
"EWOULDBLOCK",
"@logger",
".",
"debug",
"\"IoFacade: read would block\"",
"if",
"@logger",
"Fiber",
".",
"yield",
"rescue",
"Errno",
"::",
"EAGAIN",
",",
"Errno",
"::",
"EINTR",
"@logger",
".",
"debug",
"\"IoFacade: read was interrupted\"",
"if",
"@logger",
"Fiber",
".",
"yield",
"rescue",
"@logger",
".",
"debug",
"\"IoFacade: read error: #{$!}\"",
"if",
"@logger",
"@ioInfo",
".",
"lastReadError",
"=",
"$!",
"if",
"@ioInfo",
".",
"useErrorhandler",
"@ioInfo",
".",
"state",
"=",
":error",
"Fiber",
".",
"yield",
"else",
"raise",
"$!",
"end",
"end",
"end",
"data",
"end"
] | Read `length` bytes. | [
"Read",
"length",
"bytes",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/reactor.rb#L230-L269 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/reactor.rb | QuartzTorrent.Reactor.connect | def connect(addr, port, metainfo, timeout = nil)
ioInfo = startConnection(port, addr, metainfo)
@ioInfo[ioInfo.io] = ioInfo
if timeout && ioInfo.state == :connecting
ioInfo.connectTimeout = timeout
ioInfo.connectTimer = scheduleTimer(timeout, InternalTimerInfo.new(:connect_timeout, ioInfo), false)
end
end | ruby | def connect(addr, port, metainfo, timeout = nil)
ioInfo = startConnection(port, addr, metainfo)
@ioInfo[ioInfo.io] = ioInfo
if timeout && ioInfo.state == :connecting
ioInfo.connectTimeout = timeout
ioInfo.connectTimer = scheduleTimer(timeout, InternalTimerInfo.new(:connect_timeout, ioInfo), false)
end
end | [
"def",
"connect",
"(",
"addr",
",",
"port",
",",
"metainfo",
",",
"timeout",
"=",
"nil",
")",
"ioInfo",
"=",
"startConnection",
"(",
"port",
",",
"addr",
",",
"metainfo",
")",
"@ioInfo",
"[",
"ioInfo",
".",
"io",
"]",
"=",
"ioInfo",
"if",
"timeout",
"&&",
"ioInfo",
".",
"state",
"==",
":connecting",
"ioInfo",
".",
"connectTimeout",
"=",
"timeout",
"ioInfo",
".",
"connectTimer",
"=",
"scheduleTimer",
"(",
"timeout",
",",
"InternalTimerInfo",
".",
"new",
"(",
":connect_timeout",
",",
"ioInfo",
")",
",",
"false",
")",
"end",
"end"
] | Create a TCP connection to the specified host.
Note that this method may raise exceptions. For example 'Too many open files' might be raised if
the process is using too many file descriptors | [
"Create",
"a",
"TCP",
"connection",
"to",
"the",
"specified",
"host",
".",
"Note",
"that",
"this",
"method",
"may",
"raise",
"exceptions",
".",
"For",
"example",
"Too",
"many",
"open",
"files",
"might",
"be",
"raised",
"if",
"the",
"process",
"is",
"using",
"too",
"many",
"file",
"descriptors"
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/reactor.rb#L422-L429 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/reactor.rb | QuartzTorrent.Reactor.listen | def listen(addr, port, metainfo)
listener = Socket.new( AF_INET, SOCK_STREAM, 0 )
sockaddr = Socket.pack_sockaddr_in( port, "0.0.0.0" )
listener.setsockopt(Socket::SOL_SOCKET,Socket::SO_REUSEADDR, true)
listener.bind( sockaddr )
@logger.debug "listening on port #{port}" if @logger
listener.listen( @listenBacklog )
info = IOInfo.new(listener, metainfo)
info.readFiberIoFacade.logger = @logger if @logger
info.state = :listening
@ioInfo[info.io] = info
end | ruby | def listen(addr, port, metainfo)
listener = Socket.new( AF_INET, SOCK_STREAM, 0 )
sockaddr = Socket.pack_sockaddr_in( port, "0.0.0.0" )
listener.setsockopt(Socket::SOL_SOCKET,Socket::SO_REUSEADDR, true)
listener.bind( sockaddr )
@logger.debug "listening on port #{port}" if @logger
listener.listen( @listenBacklog )
info = IOInfo.new(listener, metainfo)
info.readFiberIoFacade.logger = @logger if @logger
info.state = :listening
@ioInfo[info.io] = info
end | [
"def",
"listen",
"(",
"addr",
",",
"port",
",",
"metainfo",
")",
"listener",
"=",
"Socket",
".",
"new",
"(",
"AF_INET",
",",
"SOCK_STREAM",
",",
"0",
")",
"sockaddr",
"=",
"Socket",
".",
"pack_sockaddr_in",
"(",
"port",
",",
"\"0.0.0.0\"",
")",
"listener",
".",
"setsockopt",
"(",
"Socket",
"::",
"SOL_SOCKET",
",",
"Socket",
"::",
"SO_REUSEADDR",
",",
"true",
")",
"listener",
".",
"bind",
"(",
"sockaddr",
")",
"@logger",
".",
"debug",
"\"listening on port #{port}\"",
"if",
"@logger",
"listener",
".",
"listen",
"(",
"@listenBacklog",
")",
"info",
"=",
"IOInfo",
".",
"new",
"(",
"listener",
",",
"metainfo",
")",
"info",
".",
"readFiberIoFacade",
".",
"logger",
"=",
"@logger",
"if",
"@logger",
"info",
".",
"state",
"=",
":listening",
"@ioInfo",
"[",
"info",
".",
"io",
"]",
"=",
"info",
"end"
] | Create a TCP server that listens for connections on the specified
port | [
"Create",
"a",
"TCP",
"server",
"that",
"listens",
"for",
"connections",
"on",
"the",
"specified",
"port"
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/reactor.rb#L433-L445 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/reactor.rb | QuartzTorrent.Reactor.open | def open(path, mode, metainfo, useErrorhandler = true)
file = File.open(path, mode)
info = IOInfo.new(file, metainfo, true)
info.useErrorhandler = useErrorhandler
info.readFiberIoFacade.logger = @logger if @logger
info.state = :connected
@ioInfo[info.io] = info
end | ruby | def open(path, mode, metainfo, useErrorhandler = true)
file = File.open(path, mode)
info = IOInfo.new(file, metainfo, true)
info.useErrorhandler = useErrorhandler
info.readFiberIoFacade.logger = @logger if @logger
info.state = :connected
@ioInfo[info.io] = info
end | [
"def",
"open",
"(",
"path",
",",
"mode",
",",
"metainfo",
",",
"useErrorhandler",
"=",
"true",
")",
"file",
"=",
"File",
".",
"open",
"(",
"path",
",",
"mode",
")",
"info",
"=",
"IOInfo",
".",
"new",
"(",
"file",
",",
"metainfo",
",",
"true",
")",
"info",
".",
"useErrorhandler",
"=",
"useErrorhandler",
"info",
".",
"readFiberIoFacade",
".",
"logger",
"=",
"@logger",
"if",
"@logger",
"info",
".",
"state",
"=",
":connected",
"@ioInfo",
"[",
"info",
".",
"io",
"]",
"=",
"info",
"end"
] | Open the specified file for the specified mode. | [
"Open",
"the",
"specified",
"file",
"for",
"the",
"specified",
"mode",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/reactor.rb#L448-L456 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/reactor.rb | QuartzTorrent.Reactor.start | def start
while true
begin
break if eventLoopBody == :halt
rescue
@logger.error "Unexpected exception in reactor event loop: #{$!}" if @logger
@logger.error $!.backtrace.join "\n" if @logger
end
end
@logger.info "Reactor shutting down" if @logger
# Event loop finished
@ioInfo.each do |k,v|
k.close
end
end | ruby | def start
while true
begin
break if eventLoopBody == :halt
rescue
@logger.error "Unexpected exception in reactor event loop: #{$!}" if @logger
@logger.error $!.backtrace.join "\n" if @logger
end
end
@logger.info "Reactor shutting down" if @logger
# Event loop finished
@ioInfo.each do |k,v|
k.close
end
end | [
"def",
"start",
"while",
"true",
"begin",
"break",
"if",
"eventLoopBody",
"==",
":halt",
"rescue",
"@logger",
".",
"error",
"\"Unexpected exception in reactor event loop: #{$!}\"",
"if",
"@logger",
"@logger",
".",
"error",
"$!",
".",
"backtrace",
".",
"join",
"\"\\n\"",
"if",
"@logger",
"end",
"end",
"@logger",
".",
"info",
"\"Reactor shutting down\"",
"if",
"@logger",
"@ioInfo",
".",
"each",
"do",
"|",
"k",
",",
"v",
"|",
"k",
".",
"close",
"end",
"end"
] | Run event loop | [
"Run",
"event",
"loop"
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/reactor.rb#L465-L482 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/reactor.rb | QuartzTorrent.Reactor.findIoByMetainfo | def findIoByMetainfo(metainfo)
@ioInfo.each_value do |info|
if info.metainfo == metainfo
io = info.readFiberIoFacade
# Don't allow read calls from timer handlers. This is to prevent a complex situation.
# See the processTimer call in eventLoopBody for more info
io = WriteOnlyIoFacade.new(info) if @currentHandlerCallback == :timer
return io
end
end
nil
end | ruby | def findIoByMetainfo(metainfo)
@ioInfo.each_value do |info|
if info.metainfo == metainfo
io = info.readFiberIoFacade
# Don't allow read calls from timer handlers. This is to prevent a complex situation.
# See the processTimer call in eventLoopBody for more info
io = WriteOnlyIoFacade.new(info) if @currentHandlerCallback == :timer
return io
end
end
nil
end | [
"def",
"findIoByMetainfo",
"(",
"metainfo",
")",
"@ioInfo",
".",
"each_value",
"do",
"|",
"info",
"|",
"if",
"info",
".",
"metainfo",
"==",
"metainfo",
"io",
"=",
"info",
".",
"readFiberIoFacade",
"io",
"=",
"WriteOnlyIoFacade",
".",
"new",
"(",
"info",
")",
"if",
"@currentHandlerCallback",
"==",
":timer",
"return",
"io",
"end",
"end",
"nil",
"end"
] | Meant to be called from the handler. Find an IO by metainfo. The == operator is used to
match the metainfo. | [
"Meant",
"to",
"be",
"called",
"from",
"the",
"handler",
".",
"Find",
"an",
"IO",
"by",
"metainfo",
".",
"The",
"==",
"operator",
"is",
"used",
"to",
"match",
"the",
"metainfo",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/reactor.rb#L569-L580 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/reactor.rb | QuartzTorrent.Reactor.handleAccept | def handleAccept(ioInfo)
socket, clientAddr = ioInfo.io.accept
info = IOInfo.new(socket, ioInfo.metainfo)
info.readFiberIoFacade.logger = @logger if @logger
info.state = :connected
@ioInfo[info.io] = info
if @logger
port, addr = Socket.unpack_sockaddr_in(clientAddr)
@logger.debug "Accepted connection from #{addr}:#{port}" if @logger
end
[info, addr, port]
end | ruby | def handleAccept(ioInfo)
socket, clientAddr = ioInfo.io.accept
info = IOInfo.new(socket, ioInfo.metainfo)
info.readFiberIoFacade.logger = @logger if @logger
info.state = :connected
@ioInfo[info.io] = info
if @logger
port, addr = Socket.unpack_sockaddr_in(clientAddr)
@logger.debug "Accepted connection from #{addr}:#{port}" if @logger
end
[info, addr, port]
end | [
"def",
"handleAccept",
"(",
"ioInfo",
")",
"socket",
",",
"clientAddr",
"=",
"ioInfo",
".",
"io",
".",
"accept",
"info",
"=",
"IOInfo",
".",
"new",
"(",
"socket",
",",
"ioInfo",
".",
"metainfo",
")",
"info",
".",
"readFiberIoFacade",
".",
"logger",
"=",
"@logger",
"if",
"@logger",
"info",
".",
"state",
"=",
":connected",
"@ioInfo",
"[",
"info",
".",
"io",
"]",
"=",
"info",
"if",
"@logger",
"port",
",",
"addr",
"=",
"Socket",
".",
"unpack_sockaddr_in",
"(",
"clientAddr",
")",
"@logger",
".",
"debug",
"\"Accepted connection from #{addr}:#{port}\"",
"if",
"@logger",
"end",
"[",
"info",
",",
"addr",
",",
"port",
"]",
"end"
] | Given the ioInfo for a listening socket, call accept and return the new ioInfo for the
client's socket | [
"Given",
"the",
"ioInfo",
"for",
"a",
"listening",
"socket",
"call",
"accept",
"and",
"return",
"the",
"new",
"ioInfo",
"for",
"the",
"client",
"s",
"socket"
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/reactor.rb#L813-L825 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/reactor.rb | QuartzTorrent.Reactor.withReadFiber | def withReadFiber(ioInfo)
if ioInfo.readFiber.nil? || ! ioInfo.readFiber.alive?
ioInfo.readFiber = Fiber.new do |ioInfo|
yield ioInfo.readFiberIoFacade
end
end
# Allow handler to read some data.
# This call will return either if:
# 1. the handler needs more data but it isn't available yet,
# 2. if it's read all the data it wanted to read for the current message it's building
# 3. if a read error occurred.
#
# In case 2 the latter case the fiber will be dead. In cases 1 and 2, we should select on the socket
# until data is ready. For case 3, the state of the ioInfo is set to error and the io should be
# removed.
ioInfo.readFiber.resume(ioInfo)
if ioInfo.state == :error
@currentHandlerCallback = :error
@handler.error(ioInfo.metainfo, ioInfo.lastReadError)
disposeIo(ioInfo)
end
end | ruby | def withReadFiber(ioInfo)
if ioInfo.readFiber.nil? || ! ioInfo.readFiber.alive?
ioInfo.readFiber = Fiber.new do |ioInfo|
yield ioInfo.readFiberIoFacade
end
end
# Allow handler to read some data.
# This call will return either if:
# 1. the handler needs more data but it isn't available yet,
# 2. if it's read all the data it wanted to read for the current message it's building
# 3. if a read error occurred.
#
# In case 2 the latter case the fiber will be dead. In cases 1 and 2, we should select on the socket
# until data is ready. For case 3, the state of the ioInfo is set to error and the io should be
# removed.
ioInfo.readFiber.resume(ioInfo)
if ioInfo.state == :error
@currentHandlerCallback = :error
@handler.error(ioInfo.metainfo, ioInfo.lastReadError)
disposeIo(ioInfo)
end
end | [
"def",
"withReadFiber",
"(",
"ioInfo",
")",
"if",
"ioInfo",
".",
"readFiber",
".",
"nil?",
"||",
"!",
"ioInfo",
".",
"readFiber",
".",
"alive?",
"ioInfo",
".",
"readFiber",
"=",
"Fiber",
".",
"new",
"do",
"|",
"ioInfo",
"|",
"yield",
"ioInfo",
".",
"readFiberIoFacade",
"end",
"end",
"ioInfo",
".",
"readFiber",
".",
"resume",
"(",
"ioInfo",
")",
"if",
"ioInfo",
".",
"state",
"==",
":error",
"@currentHandlerCallback",
"=",
":error",
"@handler",
".",
"error",
"(",
"ioInfo",
".",
"metainfo",
",",
"ioInfo",
".",
"lastReadError",
")",
"disposeIo",
"(",
"ioInfo",
")",
"end",
"end"
] | Call the passed block in the context of the read Fiber. Basically the
passed block is run as normal, but if the block performs a read from an io and that
read would block, the block is paused, and withReadFiber returns. The next time withReadFiber
is called the block will be resumed at the point of the read. | [
"Call",
"the",
"passed",
"block",
"in",
"the",
"context",
"of",
"the",
"read",
"Fiber",
".",
"Basically",
"the",
"passed",
"block",
"is",
"run",
"as",
"normal",
"but",
"if",
"the",
"block",
"performs",
"a",
"read",
"from",
"an",
"io",
"and",
"that",
"read",
"would",
"block",
"the",
"block",
"is",
"paused",
"and",
"withReadFiber",
"returns",
".",
"The",
"next",
"time",
"withReadFiber",
"is",
"called",
"the",
"block",
"will",
"be",
"resumed",
"at",
"the",
"point",
"of",
"the",
"read",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/reactor.rb#L856-L879 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/peermanager.rb | QuartzTorrent.PeerManager.manageConnections | def manageConnections(classifiedPeers)
n = classifiedPeers.handshakingPeers.size + classifiedPeers.establishedPeers.size
if n < @targetActivePeerCount
result = classifiedPeers.disconnectedPeers.shuffle.first(@targetActivePeerCount - n)
@logger.debug "There are #{n} peers connected or in handshaking. Will establish #{result.size} more connections to peers."
result
else
[]
end
end | ruby | def manageConnections(classifiedPeers)
n = classifiedPeers.handshakingPeers.size + classifiedPeers.establishedPeers.size
if n < @targetActivePeerCount
result = classifiedPeers.disconnectedPeers.shuffle.first(@targetActivePeerCount - n)
@logger.debug "There are #{n} peers connected or in handshaking. Will establish #{result.size} more connections to peers."
result
else
[]
end
end | [
"def",
"manageConnections",
"(",
"classifiedPeers",
")",
"n",
"=",
"classifiedPeers",
".",
"handshakingPeers",
".",
"size",
"+",
"classifiedPeers",
".",
"establishedPeers",
".",
"size",
"if",
"n",
"<",
"@targetActivePeerCount",
"result",
"=",
"classifiedPeers",
".",
"disconnectedPeers",
".",
"shuffle",
".",
"first",
"(",
"@targetActivePeerCount",
"-",
"n",
")",
"@logger",
".",
"debug",
"\"There are #{n} peers connected or in handshaking. Will establish #{result.size} more connections to peers.\"",
"result",
"else",
"[",
"]",
"end",
"end"
] | Determine if we need to connect to more peers.
Returns a list of peers to connect to. | [
"Determine",
"if",
"we",
"need",
"to",
"connect",
"to",
"more",
"peers",
".",
"Returns",
"a",
"list",
"of",
"peers",
"to",
"connect",
"to",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/peermanager.rb#L46-L56 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/peermanager.rb | QuartzTorrent.PeerManager.selectOptimisticPeer | def selectOptimisticPeer(classifiedPeers)
# "at any one time there is a single peer which is unchoked regardless of its upload rate (if interested, it counts as one of the four allowed downloaders). Which peer is optimistically
# unchoked rotates every 30 seconds. Newly connected peers are three times as likely to start as the current optimistic unchoke as anywhere else in the rotation. This gives them a decent chance
# of getting a complete piece to upload."
if !@lastOptimisticPeerChangeTime || (Time.new - @lastOptimisticPeerChangeTime > @optimisticPeerChangeDuration)
list = []
classifiedPeers.establishedPeers.each do |peer|
if (Time.new - peer.firstEstablishTime) < @newlyConnectedDuration
3.times{ list.push peer }
else
list.push peer
end
end
@optimisticUnchokePeer = list[rand(list.size)]
if @optimisticUnchokePeer
@logger.info "Optimistically unchoked peer set to #{@optimisticUnchokePeer.trackerPeer}"
@lastOptimisticPeerChangeTime = Time.new
end
end
end | ruby | def selectOptimisticPeer(classifiedPeers)
# "at any one time there is a single peer which is unchoked regardless of its upload rate (if interested, it counts as one of the four allowed downloaders). Which peer is optimistically
# unchoked rotates every 30 seconds. Newly connected peers are three times as likely to start as the current optimistic unchoke as anywhere else in the rotation. This gives them a decent chance
# of getting a complete piece to upload."
if !@lastOptimisticPeerChangeTime || (Time.new - @lastOptimisticPeerChangeTime > @optimisticPeerChangeDuration)
list = []
classifiedPeers.establishedPeers.each do |peer|
if (Time.new - peer.firstEstablishTime) < @newlyConnectedDuration
3.times{ list.push peer }
else
list.push peer
end
end
@optimisticUnchokePeer = list[rand(list.size)]
if @optimisticUnchokePeer
@logger.info "Optimistically unchoked peer set to #{@optimisticUnchokePeer.trackerPeer}"
@lastOptimisticPeerChangeTime = Time.new
end
end
end | [
"def",
"selectOptimisticPeer",
"(",
"classifiedPeers",
")",
"if",
"!",
"@lastOptimisticPeerChangeTime",
"||",
"(",
"Time",
".",
"new",
"-",
"@lastOptimisticPeerChangeTime",
">",
"@optimisticPeerChangeDuration",
")",
"list",
"=",
"[",
"]",
"classifiedPeers",
".",
"establishedPeers",
".",
"each",
"do",
"|",
"peer",
"|",
"if",
"(",
"Time",
".",
"new",
"-",
"peer",
".",
"firstEstablishTime",
")",
"<",
"@newlyConnectedDuration",
"3",
".",
"times",
"{",
"list",
".",
"push",
"peer",
"}",
"else",
"list",
".",
"push",
"peer",
"end",
"end",
"@optimisticUnchokePeer",
"=",
"list",
"[",
"rand",
"(",
"list",
".",
"size",
")",
"]",
"if",
"@optimisticUnchokePeer",
"@logger",
".",
"info",
"\"Optimistically unchoked peer set to #{@optimisticUnchokePeer.trackerPeer}\"",
"@lastOptimisticPeerChangeTime",
"=",
"Time",
".",
"new",
"end",
"end",
"end"
] | Choose a peer that we will optimistically unchoke. | [
"Choose",
"a",
"peer",
"that",
"we",
"will",
"optimistically",
"unchoke",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/peermanager.rb#L148-L168 | train |
rjurado01/rapidoc | lib/rapidoc/controller_extractor.rb | Rapidoc.ControllerExtractor.extract_blocks | def extract_blocks( lines )
init_doc_lines = lines.each_index.select{ |i| lines[i].include? "=begin" }
end_doc_lines = lines.each_index.select{ |i| lines[i].include? "=end" }
blocks = init_doc_lines.each_index.map do |i|
{ :init => init_doc_lines[i], :end => end_doc_lines[i] }
end
end | ruby | def extract_blocks( lines )
init_doc_lines = lines.each_index.select{ |i| lines[i].include? "=begin" }
end_doc_lines = lines.each_index.select{ |i| lines[i].include? "=end" }
blocks = init_doc_lines.each_index.map do |i|
{ :init => init_doc_lines[i], :end => end_doc_lines[i] }
end
end | [
"def",
"extract_blocks",
"(",
"lines",
")",
"init_doc_lines",
"=",
"lines",
".",
"each_index",
".",
"select",
"{",
"|",
"i",
"|",
"lines",
"[",
"i",
"]",
".",
"include?",
"\"=begin\"",
"}",
"end_doc_lines",
"=",
"lines",
".",
"each_index",
".",
"select",
"{",
"|",
"i",
"|",
"lines",
"[",
"i",
"]",
".",
"include?",
"\"=end\"",
"}",
"blocks",
"=",
"init_doc_lines",
".",
"each_index",
".",
"map",
"do",
"|",
"i",
"|",
"{",
":init",
"=>",
"init_doc_lines",
"[",
"i",
"]",
",",
":end",
"=>",
"end_doc_lines",
"[",
"i",
"]",
"}",
"end",
"end"
] | Gets init and end lines of each comment block | [
"Gets",
"init",
"and",
"end",
"lines",
"of",
"each",
"comment",
"block"
] | 03b7a8f29a37dd03f4ed5036697b48551d3b4ae6 | https://github.com/rjurado01/rapidoc/blob/03b7a8f29a37dd03f4ed5036697b48551d3b4ae6/lib/rapidoc/controller_extractor.rb#L45-L52 | train |
merqlove/do_snapshot | lib/do_snapshot/command.rb | DoSnapshot.Command.create_snapshot | def create_snapshot(droplet) # rubocop:disable MethodLength,Metrics/AbcSize
fail_if_shutdown(droplet)
logger.info "Start creating snapshot for droplet id: #{droplet.id} name: #{droplet.name}."
today = DateTime.now
name = "#{droplet.name}_#{today.strftime('%Y_%m_%d')}"
# noinspection RubyResolve
snapshot_size = api.snapshots(droplet).size
logger.debug 'Wait until snapshot will be created.'
api.create_snapshot droplet.id, name
snapshot_size += 1
logger.info "Snapshot name: #{name} created successfully."
logger.info "Droplet id: #{droplet.id} name: #{droplet.name} snapshots: #{snapshot_size}."
# Cleanup snapshots.
cleanup_snapshots droplet, snapshot_size if clean
rescue => e
case e.class.to_s
when 'DoSnapshot::SnapshotCleanupError'
raise e.class, e.message, e.backtrace
when 'DoSnapshot::DropletPowerError'
return
else
raise SnapshotCreateError.new(droplet.id), e.message, e.backtrace
end
end | ruby | def create_snapshot(droplet) # rubocop:disable MethodLength,Metrics/AbcSize
fail_if_shutdown(droplet)
logger.info "Start creating snapshot for droplet id: #{droplet.id} name: #{droplet.name}."
today = DateTime.now
name = "#{droplet.name}_#{today.strftime('%Y_%m_%d')}"
# noinspection RubyResolve
snapshot_size = api.snapshots(droplet).size
logger.debug 'Wait until snapshot will be created.'
api.create_snapshot droplet.id, name
snapshot_size += 1
logger.info "Snapshot name: #{name} created successfully."
logger.info "Droplet id: #{droplet.id} name: #{droplet.name} snapshots: #{snapshot_size}."
# Cleanup snapshots.
cleanup_snapshots droplet, snapshot_size if clean
rescue => e
case e.class.to_s
when 'DoSnapshot::SnapshotCleanupError'
raise e.class, e.message, e.backtrace
when 'DoSnapshot::DropletPowerError'
return
else
raise SnapshotCreateError.new(droplet.id), e.message, e.backtrace
end
end | [
"def",
"create_snapshot",
"(",
"droplet",
")",
"fail_if_shutdown",
"(",
"droplet",
")",
"logger",
".",
"info",
"\"Start creating snapshot for droplet id: #{droplet.id} name: #{droplet.name}.\"",
"today",
"=",
"DateTime",
".",
"now",
"name",
"=",
"\"#{droplet.name}_#{today.strftime('%Y_%m_%d')}\"",
"snapshot_size",
"=",
"api",
".",
"snapshots",
"(",
"droplet",
")",
".",
"size",
"logger",
".",
"debug",
"'Wait until snapshot will be created.'",
"api",
".",
"create_snapshot",
"droplet",
".",
"id",
",",
"name",
"snapshot_size",
"+=",
"1",
"logger",
".",
"info",
"\"Snapshot name: #{name} created successfully.\"",
"logger",
".",
"info",
"\"Droplet id: #{droplet.id} name: #{droplet.name} snapshots: #{snapshot_size}.\"",
"cleanup_snapshots",
"droplet",
",",
"snapshot_size",
"if",
"clean",
"rescue",
"=>",
"e",
"case",
"e",
".",
"class",
".",
"to_s",
"when",
"'DoSnapshot::SnapshotCleanupError'",
"raise",
"e",
".",
"class",
",",
"e",
".",
"message",
",",
"e",
".",
"backtrace",
"when",
"'DoSnapshot::DropletPowerError'",
"return",
"else",
"raise",
"SnapshotCreateError",
".",
"new",
"(",
"droplet",
".",
"id",
")",
",",
"e",
".",
"message",
",",
"e",
".",
"backtrace",
"end",
"end"
] | Trying to create a snapshot. | [
"Trying",
"to",
"create",
"a",
"snapshot",
"."
] | a72212ca489973a64987f0e9eb4abaae57de1abe | https://github.com/merqlove/do_snapshot/blob/a72212ca489973a64987f0e9eb4abaae57de1abe/lib/do_snapshot/command.rb#L61-L91 | train |
merqlove/do_snapshot | lib/do_snapshot/command.rb | DoSnapshot.Command.dispatch_droplets | def dispatch_droplets
droplets.each do |droplet|
id = droplet.id.to_s
next if exclude.include? id
next unless only.empty? || only.include?(id)
prepare_droplet id, droplet.name
end
end | ruby | def dispatch_droplets
droplets.each do |droplet|
id = droplet.id.to_s
next if exclude.include? id
next unless only.empty? || only.include?(id)
prepare_droplet id, droplet.name
end
end | [
"def",
"dispatch_droplets",
"droplets",
".",
"each",
"do",
"|",
"droplet",
"|",
"id",
"=",
"droplet",
".",
"id",
".",
"to_s",
"next",
"if",
"exclude",
".",
"include?",
"id",
"next",
"unless",
"only",
".",
"empty?",
"||",
"only",
".",
"include?",
"(",
"id",
")",
"prepare_droplet",
"id",
",",
"droplet",
".",
"name",
"end",
"end"
] | Dispatch received droplets, each by each. | [
"Dispatch",
"received",
"droplets",
"each",
"by",
"each",
"."
] | a72212ca489973a64987f0e9eb4abaae57de1abe | https://github.com/merqlove/do_snapshot/blob/a72212ca489973a64987f0e9eb4abaae57de1abe/lib/do_snapshot/command.rb#L149-L157 | train |
merqlove/do_snapshot | lib/do_snapshot/command.rb | DoSnapshot.Command.prepare_droplet | def prepare_droplet(id, name)
logger.debug "Droplet id: #{id} name: #{name}\n"
droplet = api.droplet id
return unless droplet
logger.info "Preparing droplet id: #{droplet.id} name: #{droplet.name} to take snapshot."
return if too_much_snapshots?(droplet)
processed_droplet_ids << droplet.id
thread_runner(droplet)
end | ruby | def prepare_droplet(id, name)
logger.debug "Droplet id: #{id} name: #{name}\n"
droplet = api.droplet id
return unless droplet
logger.info "Preparing droplet id: #{droplet.id} name: #{droplet.name} to take snapshot."
return if too_much_snapshots?(droplet)
processed_droplet_ids << droplet.id
thread_runner(droplet)
end | [
"def",
"prepare_droplet",
"(",
"id",
",",
"name",
")",
"logger",
".",
"debug",
"\"Droplet id: #{id} name: #{name}\\n\"",
"droplet",
"=",
"api",
".",
"droplet",
"id",
"return",
"unless",
"droplet",
"logger",
".",
"info",
"\"Preparing droplet id: #{droplet.id} name: #{droplet.name} to take snapshot.\"",
"return",
"if",
"too_much_snapshots?",
"(",
"droplet",
")",
"processed_droplet_ids",
"<<",
"droplet",
".",
"id",
"thread_runner",
"(",
"droplet",
")",
"end"
] | Preparing droplet to take a snapshot.
Droplet instance must be powered off first! | [
"Preparing",
"droplet",
"to",
"take",
"a",
"snapshot",
".",
"Droplet",
"instance",
"must",
"be",
"powered",
"off",
"first!"
] | a72212ca489973a64987f0e9eb4abaae57de1abe | https://github.com/merqlove/do_snapshot/blob/a72212ca489973a64987f0e9eb4abaae57de1abe/lib/do_snapshot/command.rb#L176-L185 | train |
merqlove/do_snapshot | lib/do_snapshot/command.rb | DoSnapshot.Command.cleanup_snapshots | def cleanup_snapshots(droplet, size) # rubocop:disable Metrics/AbcSize
return unless size > keep
warning_size(droplet.id, droplet.name, size)
logger.debug "Cleaning up snapshots for droplet id: #{droplet.id} name: #{droplet.name}."
api.cleanup_snapshots(droplet, size - keep - 1)
rescue => e
raise SnapshotCleanupError, e.message, e.backtrace
end | ruby | def cleanup_snapshots(droplet, size) # rubocop:disable Metrics/AbcSize
return unless size > keep
warning_size(droplet.id, droplet.name, size)
logger.debug "Cleaning up snapshots for droplet id: #{droplet.id} name: #{droplet.name}."
api.cleanup_snapshots(droplet, size - keep - 1)
rescue => e
raise SnapshotCleanupError, e.message, e.backtrace
end | [
"def",
"cleanup_snapshots",
"(",
"droplet",
",",
"size",
")",
"return",
"unless",
"size",
">",
"keep",
"warning_size",
"(",
"droplet",
".",
"id",
",",
"droplet",
".",
"name",
",",
"size",
")",
"logger",
".",
"debug",
"\"Cleaning up snapshots for droplet id: #{droplet.id} name: #{droplet.name}.\"",
"api",
".",
"cleanup_snapshots",
"(",
"droplet",
",",
"size",
"-",
"keep",
"-",
"1",
")",
"rescue",
"=>",
"e",
"raise",
"SnapshotCleanupError",
",",
"e",
".",
"message",
",",
"e",
".",
"backtrace",
"end"
] | Cleanup our snapshots. | [
"Cleanup",
"our",
"snapshots",
"."
] | a72212ca489973a64987f0e9eb4abaae57de1abe | https://github.com/merqlove/do_snapshot/blob/a72212ca489973a64987f0e9eb4abaae57de1abe/lib/do_snapshot/command.rb#L200-L210 | train |
philm/twilio | lib/twilio/available_phone_numbers.rb | Twilio.AvailablePhoneNumbers.search | def search(opts={})
iso_country_code = opts[:iso_country_code] || 'US'
resource = opts.delete(:resource)
params = {
:AreaCode => opts[:area_code],
:InPostalCode => opts[:postal_code],
:InRegion => opts[:in_region],
:Contains => opts[:contains],
:NearLatLong => opts[:near_lat_long],
:NearNumber => opts[:near_number],
:InLata => opts[:in_lata],
:InRateCenter => opts[:in_rate_center],
:Distance => opts[:distance],
:Page => opts[:page],
:PageSize => opts[:page_size]
}.reject {|k,v| v == nil} unless opts.empty?
Twilio.get("/AvailablePhoneNumbers/#{iso_country_code}/#{resource}", :query => params)
end | ruby | def search(opts={})
iso_country_code = opts[:iso_country_code] || 'US'
resource = opts.delete(:resource)
params = {
:AreaCode => opts[:area_code],
:InPostalCode => opts[:postal_code],
:InRegion => opts[:in_region],
:Contains => opts[:contains],
:NearLatLong => opts[:near_lat_long],
:NearNumber => opts[:near_number],
:InLata => opts[:in_lata],
:InRateCenter => opts[:in_rate_center],
:Distance => opts[:distance],
:Page => opts[:page],
:PageSize => opts[:page_size]
}.reject {|k,v| v == nil} unless opts.empty?
Twilio.get("/AvailablePhoneNumbers/#{iso_country_code}/#{resource}", :query => params)
end | [
"def",
"search",
"(",
"opts",
"=",
"{",
"}",
")",
"iso_country_code",
"=",
"opts",
"[",
":iso_country_code",
"]",
"||",
"'US'",
"resource",
"=",
"opts",
".",
"delete",
"(",
":resource",
")",
"params",
"=",
"{",
":AreaCode",
"=>",
"opts",
"[",
":area_code",
"]",
",",
":InPostalCode",
"=>",
"opts",
"[",
":postal_code",
"]",
",",
":InRegion",
"=>",
"opts",
"[",
":in_region",
"]",
",",
":Contains",
"=>",
"opts",
"[",
":contains",
"]",
",",
":NearLatLong",
"=>",
"opts",
"[",
":near_lat_long",
"]",
",",
":NearNumber",
"=>",
"opts",
"[",
":near_number",
"]",
",",
":InLata",
"=>",
"opts",
"[",
":in_lata",
"]",
",",
":InRateCenter",
"=>",
"opts",
"[",
":in_rate_center",
"]",
",",
":Distance",
"=>",
"opts",
"[",
":distance",
"]",
",",
":Page",
"=>",
"opts",
"[",
":page",
"]",
",",
":PageSize",
"=>",
"opts",
"[",
":page_size",
"]",
"}",
".",
"reject",
"{",
"|",
"k",
",",
"v",
"|",
"v",
"==",
"nil",
"}",
"unless",
"opts",
".",
"empty?",
"Twilio",
".",
"get",
"(",
"\"/AvailablePhoneNumbers/#{iso_country_code}/#{resource}\"",
",",
":query",
"=>",
"params",
")",
"end"
] | The Search method handles the searching of both local and toll-free
numbers. | [
"The",
"Search",
"method",
"handles",
"the",
"searching",
"of",
"both",
"local",
"and",
"toll",
"-",
"free",
"numbers",
"."
] | 81c05795924bbfa780ea44efd52d7ca5670bcb55 | https://github.com/philm/twilio/blob/81c05795924bbfa780ea44efd52d7ca5670bcb55/lib/twilio/available_phone_numbers.rb#L12-L31 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/peerclient.rb | QuartzTorrent.PeerClientHandler.addTrackerClient | def addTrackerClient(infoHash, info, trackerclient)
raise "There is already a tracker registered for torrent #{QuartzTorrent.bytesToHex(infoHash)}" if @torrentData.has_key? infoHash
torrentData = TorrentData.new(infoHash, info, trackerclient)
trackerclient.alarms = torrentData.alarms
@torrentData[infoHash] = torrentData
torrentData.info = info
torrentData.state = :initializing
queue(torrentData)
dequeue
torrentData
end | ruby | def addTrackerClient(infoHash, info, trackerclient)
raise "There is already a tracker registered for torrent #{QuartzTorrent.bytesToHex(infoHash)}" if @torrentData.has_key? infoHash
torrentData = TorrentData.new(infoHash, info, trackerclient)
trackerclient.alarms = torrentData.alarms
@torrentData[infoHash] = torrentData
torrentData.info = info
torrentData.state = :initializing
queue(torrentData)
dequeue
torrentData
end | [
"def",
"addTrackerClient",
"(",
"infoHash",
",",
"info",
",",
"trackerclient",
")",
"raise",
"\"There is already a tracker registered for torrent #{QuartzTorrent.bytesToHex(infoHash)}\"",
"if",
"@torrentData",
".",
"has_key?",
"infoHash",
"torrentData",
"=",
"TorrentData",
".",
"new",
"(",
"infoHash",
",",
"info",
",",
"trackerclient",
")",
"trackerclient",
".",
"alarms",
"=",
"torrentData",
".",
"alarms",
"@torrentData",
"[",
"infoHash",
"]",
"=",
"torrentData",
"torrentData",
".",
"info",
"=",
"info",
"torrentData",
".",
"state",
"=",
":initializing",
"queue",
"(",
"torrentData",
")",
"dequeue",
"torrentData",
"end"
] | Add a new tracker client. This effectively adds a new torrent to download. Returns the TorrentData object for the
new torrent. | [
"Add",
"a",
"new",
"tracker",
"client",
".",
"This",
"effectively",
"adds",
"a",
"new",
"torrent",
"to",
"download",
".",
"Returns",
"the",
"TorrentData",
"object",
"for",
"the",
"new",
"torrent",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/peerclient.rb#L286-L298 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/peerclient.rb | QuartzTorrent.PeerClientHandler.removeTorrent | def removeTorrent(infoHash, deleteFiles = false)
# Can't do this right now, since it could be in use by an event handler. Use an immediate, non-recurring timer instead.
@logger.info "#{QuartzTorrent.bytesToHex(infoHash)}: Scheduling immediate timer to remove torrent. #{deleteFiles ? "Will" : "Wont"} delete downloaded files."
@reactor.scheduleTimer(0, [:removetorrent, infoHash, deleteFiles], false, true)
end | ruby | def removeTorrent(infoHash, deleteFiles = false)
# Can't do this right now, since it could be in use by an event handler. Use an immediate, non-recurring timer instead.
@logger.info "#{QuartzTorrent.bytesToHex(infoHash)}: Scheduling immediate timer to remove torrent. #{deleteFiles ? "Will" : "Wont"} delete downloaded files."
@reactor.scheduleTimer(0, [:removetorrent, infoHash, deleteFiles], false, true)
end | [
"def",
"removeTorrent",
"(",
"infoHash",
",",
"deleteFiles",
"=",
"false",
")",
"@logger",
".",
"info",
"\"#{QuartzTorrent.bytesToHex(infoHash)}: Scheduling immediate timer to remove torrent. #{deleteFiles ? \"Will\" : \"Wont\"} delete downloaded files.\"",
"@reactor",
".",
"scheduleTimer",
"(",
"0",
",",
"[",
":removetorrent",
",",
"infoHash",
",",
"deleteFiles",
"]",
",",
"false",
",",
"true",
")",
"end"
] | Remove a torrent. | [
"Remove",
"a",
"torrent",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/peerclient.rb#L301-L305 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/peerclient.rb | QuartzTorrent.PeerClientHandler.setDownloadRateLimit | def setDownloadRateLimit(infoHash, bytesPerSecond)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.warn "Asked to set download rate limit for a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}"
return
end
if bytesPerSecond
if ! torrentData.downRateLimit
torrentData.downRateLimit = RateLimit.new(bytesPerSecond, 2*bytesPerSecond, 0)
else
torrentData.downRateLimit.unitsPerSecond = bytesPerSecond
end
else
torrentData.downRateLimit = nil
end
torrentData.peers.all.each do |peer|
withPeersIo(peer, "setting download rate limit") do |io|
io.readRateLimit = torrentData.downRateLimit
end
end
end | ruby | def setDownloadRateLimit(infoHash, bytesPerSecond)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.warn "Asked to set download rate limit for a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}"
return
end
if bytesPerSecond
if ! torrentData.downRateLimit
torrentData.downRateLimit = RateLimit.new(bytesPerSecond, 2*bytesPerSecond, 0)
else
torrentData.downRateLimit.unitsPerSecond = bytesPerSecond
end
else
torrentData.downRateLimit = nil
end
torrentData.peers.all.each do |peer|
withPeersIo(peer, "setting download rate limit") do |io|
io.readRateLimit = torrentData.downRateLimit
end
end
end | [
"def",
"setDownloadRateLimit",
"(",
"infoHash",
",",
"bytesPerSecond",
")",
"torrentData",
"=",
"@torrentData",
"[",
"infoHash",
"]",
"if",
"!",
"torrentData",
"@logger",
".",
"warn",
"\"Asked to set download rate limit for a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}\"",
"return",
"end",
"if",
"bytesPerSecond",
"if",
"!",
"torrentData",
".",
"downRateLimit",
"torrentData",
".",
"downRateLimit",
"=",
"RateLimit",
".",
"new",
"(",
"bytesPerSecond",
",",
"2",
"*",
"bytesPerSecond",
",",
"0",
")",
"else",
"torrentData",
".",
"downRateLimit",
".",
"unitsPerSecond",
"=",
"bytesPerSecond",
"end",
"else",
"torrentData",
".",
"downRateLimit",
"=",
"nil",
"end",
"torrentData",
".",
"peers",
".",
"all",
".",
"each",
"do",
"|",
"peer",
"|",
"withPeersIo",
"(",
"peer",
",",
"\"setting download rate limit\"",
")",
"do",
"|",
"io",
"|",
"io",
".",
"readRateLimit",
"=",
"torrentData",
".",
"downRateLimit",
"end",
"end",
"end"
] | Set the download rate limit. Pass nil as the bytesPerSecond to disable the limit. | [
"Set",
"the",
"download",
"rate",
"limit",
".",
"Pass",
"nil",
"as",
"the",
"bytesPerSecond",
"to",
"disable",
"the",
"limit",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/peerclient.rb#L315-L338 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/peerclient.rb | QuartzTorrent.PeerClientHandler.setUploadRateLimit | def setUploadRateLimit(infoHash, bytesPerSecond)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.warn "Asked to set upload rate limit for a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}"
return
end
if bytesPerSecond
if ! torrentData.upRateLimit
torrentData.upRateLimit = RateLimit.new(bytesPerSecond, 2*bytesPerSecond, 0)
else
torrentData.upRateLimit.unitsPerSecond = bytesPerSecond
end
else
torrentData.upRateLimit = nil
end
torrentData.peers.all.each do |peer|
withPeersIo(peer, "setting upload rate limit") do |io|
io.writeRateLimit = torrentData.upRateLimit
end
end
end | ruby | def setUploadRateLimit(infoHash, bytesPerSecond)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.warn "Asked to set upload rate limit for a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}"
return
end
if bytesPerSecond
if ! torrentData.upRateLimit
torrentData.upRateLimit = RateLimit.new(bytesPerSecond, 2*bytesPerSecond, 0)
else
torrentData.upRateLimit.unitsPerSecond = bytesPerSecond
end
else
torrentData.upRateLimit = nil
end
torrentData.peers.all.each do |peer|
withPeersIo(peer, "setting upload rate limit") do |io|
io.writeRateLimit = torrentData.upRateLimit
end
end
end | [
"def",
"setUploadRateLimit",
"(",
"infoHash",
",",
"bytesPerSecond",
")",
"torrentData",
"=",
"@torrentData",
"[",
"infoHash",
"]",
"if",
"!",
"torrentData",
"@logger",
".",
"warn",
"\"Asked to set upload rate limit for a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}\"",
"return",
"end",
"if",
"bytesPerSecond",
"if",
"!",
"torrentData",
".",
"upRateLimit",
"torrentData",
".",
"upRateLimit",
"=",
"RateLimit",
".",
"new",
"(",
"bytesPerSecond",
",",
"2",
"*",
"bytesPerSecond",
",",
"0",
")",
"else",
"torrentData",
".",
"upRateLimit",
".",
"unitsPerSecond",
"=",
"bytesPerSecond",
"end",
"else",
"torrentData",
".",
"upRateLimit",
"=",
"nil",
"end",
"torrentData",
".",
"peers",
".",
"all",
".",
"each",
"do",
"|",
"peer",
"|",
"withPeersIo",
"(",
"peer",
",",
"\"setting upload rate limit\"",
")",
"do",
"|",
"io",
"|",
"io",
".",
"writeRateLimit",
"=",
"torrentData",
".",
"upRateLimit",
"end",
"end",
"end"
] | Set the upload rate limit. Pass nil as the bytesPerSecond to disable the limit. | [
"Set",
"the",
"upload",
"rate",
"limit",
".",
"Pass",
"nil",
"as",
"the",
"bytesPerSecond",
"to",
"disable",
"the",
"limit",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/peerclient.rb#L341-L363 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/peerclient.rb | QuartzTorrent.PeerClientHandler.adjustBytesDownloaded | def adjustBytesDownloaded(infoHash, adjustment)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.warn "Asked to adjust uploaded bytes for a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}"
return
end
runInReactorThread do
torrentData.bytesDownloaded += adjustment
torrentData.bytesDownloadedDataOnly += adjustment
end
end | ruby | def adjustBytesDownloaded(infoHash, adjustment)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.warn "Asked to adjust uploaded bytes for a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}"
return
end
runInReactorThread do
torrentData.bytesDownloaded += adjustment
torrentData.bytesDownloadedDataOnly += adjustment
end
end | [
"def",
"adjustBytesDownloaded",
"(",
"infoHash",
",",
"adjustment",
")",
"torrentData",
"=",
"@torrentData",
"[",
"infoHash",
"]",
"if",
"!",
"torrentData",
"@logger",
".",
"warn",
"\"Asked to adjust uploaded bytes for a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}\"",
"return",
"end",
"runInReactorThread",
"do",
"torrentData",
".",
"bytesDownloaded",
"+=",
"adjustment",
"torrentData",
".",
"bytesDownloadedDataOnly",
"+=",
"adjustment",
"end",
"end"
] | Adjust the bytesDownloaded property of the specified torrent by the passed amount.
Adjustment should be an integer. It is added to the current bytesDownloaded amount. | [
"Adjust",
"the",
"bytesDownloaded",
"property",
"of",
"the",
"specified",
"torrent",
"by",
"the",
"passed",
"amount",
".",
"Adjustment",
"should",
"be",
"an",
"integer",
".",
"It",
"is",
"added",
"to",
"the",
"current",
"bytesDownloaded",
"amount",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/peerclient.rb#L405-L416 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/peerclient.rb | QuartzTorrent.PeerClientHandler.updateDelegateTorrentData | def updateDelegateTorrentData(delegate)
return if stopped?
# Use an immediate, non-recurring timer.
semaphore = Semaphore.new
@reactor.scheduleTimer(0, [:update_torrent_data, delegate, semaphore], false, true)
semaphore.wait
result
end | ruby | def updateDelegateTorrentData(delegate)
return if stopped?
# Use an immediate, non-recurring timer.
semaphore = Semaphore.new
@reactor.scheduleTimer(0, [:update_torrent_data, delegate, semaphore], false, true)
semaphore.wait
result
end | [
"def",
"updateDelegateTorrentData",
"(",
"delegate",
")",
"return",
"if",
"stopped?",
"semaphore",
"=",
"Semaphore",
".",
"new",
"@reactor",
".",
"scheduleTimer",
"(",
"0",
",",
"[",
":update_torrent_data",
",",
"delegate",
",",
"semaphore",
"]",
",",
"false",
",",
"true",
")",
"semaphore",
".",
"wait",
"result",
"end"
] | Update the data stored in a TorrentDataDelegate to the latest information. | [
"Update",
"the",
"data",
"stored",
"in",
"a",
"TorrentDataDelegate",
"to",
"the",
"latest",
"information",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/peerclient.rb#L438-L445 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/peerclient.rb | QuartzTorrent.PeerClientHandler.serverInit | def serverInit(metadata, addr, port)
# A peer connected to us
# Read handshake message
@logger.warn "Peer connection from #{addr}:#{port}"
begin
msg = PeerHandshake.unserializeExceptPeerIdFrom currentIo
rescue
@logger.warn "Peer failed handshake: #{$!}"
close
return
end
torrentData = torrentDataForHandshake(msg, "#{addr}:#{port}")
# Are we tracking this torrent?
if !torrentData
@logger.warn "Peer sent handshake for unknown torrent"
close
return
end
trackerclient = torrentData.trackerClient
# If we already have too many connections, don't allow this connection.
classifiedPeers = ClassifiedPeers.new torrentData.peers.all
if classifiedPeers.establishedPeers.length > @targetActivePeerCount
@logger.warn "Closing connection to peer from #{addr}:#{port} because we already have #{classifiedPeers.establishedPeers.length} active peers which is > the target count of #{@targetActivePeerCount} "
close
return
end
# Send handshake
outgoing = PeerHandshake.new
outgoing.peerId = trackerclient.peerId
outgoing.infoHash = torrentData.infoHash
outgoing.serializeTo currentIo
# Send extended handshake if the peer supports extensions
if (msg.reserved.unpack("C8")[5] & 0x10) != 0
@logger.warn "Peer supports extensions. Sending extended handshake"
extended = Extension.createExtendedHandshake torrentData.info
extended.serializeTo currentIo
end
# Read incoming handshake's peerid
msg.peerId = currentIo.read(PeerHandshake::PeerIdLen)
if msg.peerId == trackerclient.peerId
@logger.info "We got a connection from ourself. Closing connection."
close
return
end
peer = nil
peers = torrentData.peers.findById(msg.peerId)
if peers
peers.each do |existingPeer|
if existingPeer.state != :disconnected
@logger.warn "Peer with id #{msg.peerId} created a new connection when we already have a connection in state #{existingPeer.state}. Closing new connection."
close
return
else
if existingPeer.trackerPeer.ip == addr && existingPeer.trackerPeer.port == port
peer = existingPeer
end
end
end
end
if ! peer
peer = Peer.new(TrackerPeer.new(addr, port))
updatePeerWithHandshakeInfo(torrentData, msg, peer)
torrentData.peers.add peer
if ! peers
@logger.warn "Unknown peer with id #{msg.peerId} connected."
else
@logger.warn "Known peer with id #{msg.peerId} connected from new location."
end
else
@logger.warn "Known peer with id #{msg.peerId} connected from known location."
end
@logger.info "Peer #{peer} connected to us. "
peer.state = :established
peer.amChoked = true
peer.peerChoked = true
peer.amInterested = false
peer.peerInterested = false
if torrentData.info
peer.bitfield = Bitfield.new(torrentData.info.pieces.length)
else
peer.bitfield = EmptyBitfield.new
@logger.info "We have no metainfo yet, so setting peer #{peer} to have an EmptyBitfield"
end
# Send bitfield
sendBitfield(currentIo, torrentData.blockState.completePieceBitfield) if torrentData.blockState
setMetaInfo(peer)
setReadRateLimit(torrentData.downRateLimit) if torrentData.downRateLimit
setWriteRateLimit(torrentData.upRateLimit) if torrentData.upRateLimit
end | ruby | def serverInit(metadata, addr, port)
# A peer connected to us
# Read handshake message
@logger.warn "Peer connection from #{addr}:#{port}"
begin
msg = PeerHandshake.unserializeExceptPeerIdFrom currentIo
rescue
@logger.warn "Peer failed handshake: #{$!}"
close
return
end
torrentData = torrentDataForHandshake(msg, "#{addr}:#{port}")
# Are we tracking this torrent?
if !torrentData
@logger.warn "Peer sent handshake for unknown torrent"
close
return
end
trackerclient = torrentData.trackerClient
# If we already have too many connections, don't allow this connection.
classifiedPeers = ClassifiedPeers.new torrentData.peers.all
if classifiedPeers.establishedPeers.length > @targetActivePeerCount
@logger.warn "Closing connection to peer from #{addr}:#{port} because we already have #{classifiedPeers.establishedPeers.length} active peers which is > the target count of #{@targetActivePeerCount} "
close
return
end
# Send handshake
outgoing = PeerHandshake.new
outgoing.peerId = trackerclient.peerId
outgoing.infoHash = torrentData.infoHash
outgoing.serializeTo currentIo
# Send extended handshake if the peer supports extensions
if (msg.reserved.unpack("C8")[5] & 0x10) != 0
@logger.warn "Peer supports extensions. Sending extended handshake"
extended = Extension.createExtendedHandshake torrentData.info
extended.serializeTo currentIo
end
# Read incoming handshake's peerid
msg.peerId = currentIo.read(PeerHandshake::PeerIdLen)
if msg.peerId == trackerclient.peerId
@logger.info "We got a connection from ourself. Closing connection."
close
return
end
peer = nil
peers = torrentData.peers.findById(msg.peerId)
if peers
peers.each do |existingPeer|
if existingPeer.state != :disconnected
@logger.warn "Peer with id #{msg.peerId} created a new connection when we already have a connection in state #{existingPeer.state}. Closing new connection."
close
return
else
if existingPeer.trackerPeer.ip == addr && existingPeer.trackerPeer.port == port
peer = existingPeer
end
end
end
end
if ! peer
peer = Peer.new(TrackerPeer.new(addr, port))
updatePeerWithHandshakeInfo(torrentData, msg, peer)
torrentData.peers.add peer
if ! peers
@logger.warn "Unknown peer with id #{msg.peerId} connected."
else
@logger.warn "Known peer with id #{msg.peerId} connected from new location."
end
else
@logger.warn "Known peer with id #{msg.peerId} connected from known location."
end
@logger.info "Peer #{peer} connected to us. "
peer.state = :established
peer.amChoked = true
peer.peerChoked = true
peer.amInterested = false
peer.peerInterested = false
if torrentData.info
peer.bitfield = Bitfield.new(torrentData.info.pieces.length)
else
peer.bitfield = EmptyBitfield.new
@logger.info "We have no metainfo yet, so setting peer #{peer} to have an EmptyBitfield"
end
# Send bitfield
sendBitfield(currentIo, torrentData.blockState.completePieceBitfield) if torrentData.blockState
setMetaInfo(peer)
setReadRateLimit(torrentData.downRateLimit) if torrentData.downRateLimit
setWriteRateLimit(torrentData.upRateLimit) if torrentData.upRateLimit
end | [
"def",
"serverInit",
"(",
"metadata",
",",
"addr",
",",
"port",
")",
"@logger",
".",
"warn",
"\"Peer connection from #{addr}:#{port}\"",
"begin",
"msg",
"=",
"PeerHandshake",
".",
"unserializeExceptPeerIdFrom",
"currentIo",
"rescue",
"@logger",
".",
"warn",
"\"Peer failed handshake: #{$!}\"",
"close",
"return",
"end",
"torrentData",
"=",
"torrentDataForHandshake",
"(",
"msg",
",",
"\"#{addr}:#{port}\"",
")",
"if",
"!",
"torrentData",
"@logger",
".",
"warn",
"\"Peer sent handshake for unknown torrent\"",
"close",
"return",
"end",
"trackerclient",
"=",
"torrentData",
".",
"trackerClient",
"classifiedPeers",
"=",
"ClassifiedPeers",
".",
"new",
"torrentData",
".",
"peers",
".",
"all",
"if",
"classifiedPeers",
".",
"establishedPeers",
".",
"length",
">",
"@targetActivePeerCount",
"@logger",
".",
"warn",
"\"Closing connection to peer from #{addr}:#{port} because we already have #{classifiedPeers.establishedPeers.length} active peers which is > the target count of #{@targetActivePeerCount} \"",
"close",
"return",
"end",
"outgoing",
"=",
"PeerHandshake",
".",
"new",
"outgoing",
".",
"peerId",
"=",
"trackerclient",
".",
"peerId",
"outgoing",
".",
"infoHash",
"=",
"torrentData",
".",
"infoHash",
"outgoing",
".",
"serializeTo",
"currentIo",
"if",
"(",
"msg",
".",
"reserved",
".",
"unpack",
"(",
"\"C8\"",
")",
"[",
"5",
"]",
"&",
"0x10",
")",
"!=",
"0",
"@logger",
".",
"warn",
"\"Peer supports extensions. Sending extended handshake\"",
"extended",
"=",
"Extension",
".",
"createExtendedHandshake",
"torrentData",
".",
"info",
"extended",
".",
"serializeTo",
"currentIo",
"end",
"msg",
".",
"peerId",
"=",
"currentIo",
".",
"read",
"(",
"PeerHandshake",
"::",
"PeerIdLen",
")",
"if",
"msg",
".",
"peerId",
"==",
"trackerclient",
".",
"peerId",
"@logger",
".",
"info",
"\"We got a connection from ourself. Closing connection.\"",
"close",
"return",
"end",
"peer",
"=",
"nil",
"peers",
"=",
"torrentData",
".",
"peers",
".",
"findById",
"(",
"msg",
".",
"peerId",
")",
"if",
"peers",
"peers",
".",
"each",
"do",
"|",
"existingPeer",
"|",
"if",
"existingPeer",
".",
"state",
"!=",
":disconnected",
"@logger",
".",
"warn",
"\"Peer with id #{msg.peerId} created a new connection when we already have a connection in state #{existingPeer.state}. Closing new connection.\"",
"close",
"return",
"else",
"if",
"existingPeer",
".",
"trackerPeer",
".",
"ip",
"==",
"addr",
"&&",
"existingPeer",
".",
"trackerPeer",
".",
"port",
"==",
"port",
"peer",
"=",
"existingPeer",
"end",
"end",
"end",
"end",
"if",
"!",
"peer",
"peer",
"=",
"Peer",
".",
"new",
"(",
"TrackerPeer",
".",
"new",
"(",
"addr",
",",
"port",
")",
")",
"updatePeerWithHandshakeInfo",
"(",
"torrentData",
",",
"msg",
",",
"peer",
")",
"torrentData",
".",
"peers",
".",
"add",
"peer",
"if",
"!",
"peers",
"@logger",
".",
"warn",
"\"Unknown peer with id #{msg.peerId} connected.\"",
"else",
"@logger",
".",
"warn",
"\"Known peer with id #{msg.peerId} connected from new location.\"",
"end",
"else",
"@logger",
".",
"warn",
"\"Known peer with id #{msg.peerId} connected from known location.\"",
"end",
"@logger",
".",
"info",
"\"Peer #{peer} connected to us. \"",
"peer",
".",
"state",
"=",
":established",
"peer",
".",
"amChoked",
"=",
"true",
"peer",
".",
"peerChoked",
"=",
"true",
"peer",
".",
"amInterested",
"=",
"false",
"peer",
".",
"peerInterested",
"=",
"false",
"if",
"torrentData",
".",
"info",
"peer",
".",
"bitfield",
"=",
"Bitfield",
".",
"new",
"(",
"torrentData",
".",
"info",
".",
"pieces",
".",
"length",
")",
"else",
"peer",
".",
"bitfield",
"=",
"EmptyBitfield",
".",
"new",
"@logger",
".",
"info",
"\"We have no metainfo yet, so setting peer #{peer} to have an EmptyBitfield\"",
"end",
"sendBitfield",
"(",
"currentIo",
",",
"torrentData",
".",
"blockState",
".",
"completePieceBitfield",
")",
"if",
"torrentData",
".",
"blockState",
"setMetaInfo",
"(",
"peer",
")",
"setReadRateLimit",
"(",
"torrentData",
".",
"downRateLimit",
")",
"if",
"torrentData",
".",
"downRateLimit",
"setWriteRateLimit",
"(",
"torrentData",
".",
"upRateLimit",
")",
"if",
"torrentData",
".",
"upRateLimit",
"end"
] | REACTOR METHODS
Reactor method called when a peer has connected to us. | [
"REACTOR",
"METHODS",
"Reactor",
"method",
"called",
"when",
"a",
"peer",
"has",
"connected",
"to",
"us",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/peerclient.rb#L451-L551 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/peerclient.rb | QuartzTorrent.PeerClientHandler.clientInit | def clientInit(peer)
# We connected to a peer
# Send handshake
torrentData = @torrentData[peer.infoHash]
if ! torrentData
@logger.warn "No tracker client found for peer #{peer}. Closing connection."
close
return
end
trackerclient = torrentData.trackerClient
@logger.info "Connected to peer #{peer}. Sending handshake."
msg = PeerHandshake.new
msg.peerId = trackerclient.peerId
msg.infoHash = peer.infoHash
msg.serializeTo currentIo
peer.state = :handshaking
@reactor.scheduleTimer(@handshakeTimeout, [:handshake_timeout, peer], false)
@logger.debug "Done sending handshake."
# Send bitfield
sendBitfield(currentIo, torrentData.blockState.completePieceBitfield) if torrentData.blockState
setReadRateLimit(torrentData.downRateLimit) if torrentData.downRateLimit
setWriteRateLimit(torrentData.upRateLimit) if torrentData.upRateLimit
end | ruby | def clientInit(peer)
# We connected to a peer
# Send handshake
torrentData = @torrentData[peer.infoHash]
if ! torrentData
@logger.warn "No tracker client found for peer #{peer}. Closing connection."
close
return
end
trackerclient = torrentData.trackerClient
@logger.info "Connected to peer #{peer}. Sending handshake."
msg = PeerHandshake.new
msg.peerId = trackerclient.peerId
msg.infoHash = peer.infoHash
msg.serializeTo currentIo
peer.state = :handshaking
@reactor.scheduleTimer(@handshakeTimeout, [:handshake_timeout, peer], false)
@logger.debug "Done sending handshake."
# Send bitfield
sendBitfield(currentIo, torrentData.blockState.completePieceBitfield) if torrentData.blockState
setReadRateLimit(torrentData.downRateLimit) if torrentData.downRateLimit
setWriteRateLimit(torrentData.upRateLimit) if torrentData.upRateLimit
end | [
"def",
"clientInit",
"(",
"peer",
")",
"torrentData",
"=",
"@torrentData",
"[",
"peer",
".",
"infoHash",
"]",
"if",
"!",
"torrentData",
"@logger",
".",
"warn",
"\"No tracker client found for peer #{peer}. Closing connection.\"",
"close",
"return",
"end",
"trackerclient",
"=",
"torrentData",
".",
"trackerClient",
"@logger",
".",
"info",
"\"Connected to peer #{peer}. Sending handshake.\"",
"msg",
"=",
"PeerHandshake",
".",
"new",
"msg",
".",
"peerId",
"=",
"trackerclient",
".",
"peerId",
"msg",
".",
"infoHash",
"=",
"peer",
".",
"infoHash",
"msg",
".",
"serializeTo",
"currentIo",
"peer",
".",
"state",
"=",
":handshaking",
"@reactor",
".",
"scheduleTimer",
"(",
"@handshakeTimeout",
",",
"[",
":handshake_timeout",
",",
"peer",
"]",
",",
"false",
")",
"@logger",
".",
"debug",
"\"Done sending handshake.\"",
"sendBitfield",
"(",
"currentIo",
",",
"torrentData",
".",
"blockState",
".",
"completePieceBitfield",
")",
"if",
"torrentData",
".",
"blockState",
"setReadRateLimit",
"(",
"torrentData",
".",
"downRateLimit",
")",
"if",
"torrentData",
".",
"downRateLimit",
"setWriteRateLimit",
"(",
"torrentData",
".",
"upRateLimit",
")",
"if",
"torrentData",
".",
"upRateLimit",
"end"
] | Reactor method called when we have connected to a peer. | [
"Reactor",
"method",
"called",
"when",
"we",
"have",
"connected",
"to",
"a",
"peer",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/peerclient.rb#L554-L579 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/peerclient.rb | QuartzTorrent.PeerClientHandler.recvData | def recvData(peer)
msg = nil
@logger.debug "Got data from peer #{peer}"
if peer.state == :handshaking
# Read handshake message
begin
@logger.debug "Reading handshake from #{peer}"
msg = PeerHandshake.unserializeFrom currentIo
rescue
@logger.warn "Peer #{peer} failed handshake: #{$!}"
setPeerDisconnected(peer)
close
return
end
else
begin
@logger.debug "Reading wire-message from #{peer}"
msg = peer.peerMsgSerializer.unserializeFrom currentIo
#msg = PeerWireMessage.unserializeFrom currentIo
rescue EOFError
@logger.info "Peer #{peer} disconnected."
setPeerDisconnected(peer)
close
return
rescue
@logger.warn "Unserializing message from peer #{peer} failed: #{$!}"
@logger.warn $!.backtrace.join "\n"
setPeerDisconnected(peer)
close
return
end
peer.updateUploadRate msg
torrentData = @torrentData[peer.infoHash]
torrentData.bytesDownloaded += msg.length if torrentData
@logger.debug "Peer #{peer} upload rate: #{peer.uploadRate.value} data only: #{peer.uploadRateDataOnly.value}"
end
if msg.is_a? PeerHandshake
# This is a remote peer that we connected to returning our handshake.
processHandshake(msg, peer)
peer.state = :established
peer.amChoked = true
peer.peerChoked = true
peer.amInterested = false
peer.peerInterested = false
elsif msg.is_a? BitfieldMessage
@logger.debug "Received bitfield message from peer."
handleBitfield(msg, peer)
elsif msg.is_a? Unchoke
@logger.debug "Received unchoke message from peer."
peer.amChoked = false
elsif msg.is_a? Choke
@logger.debug "Received choke message from peer."
peer.amChoked = true
elsif msg.is_a? Interested
@logger.debug "Received interested message from peer."
peer.peerInterested = true
elsif msg.is_a? Uninterested
@logger.debug "Received uninterested message from peer."
peer.peerInterested = false
elsif msg.is_a? Piece
@logger.debug "Received piece message from peer for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)}: piece #{msg.pieceIndex} offset #{msg.blockOffset} length #{msg.data.length}."
handlePieceReceive(msg, peer)
elsif msg.is_a? Request
@logger.debug "Received request message from peer for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)}: piece #{msg.pieceIndex} offset #{msg.blockOffset} length #{msg.blockLength}."
handleRequest(msg, peer)
elsif msg.is_a? Have
@logger.debug "Received have message from peer for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)}: piece #{msg.pieceIndex}"
handleHave(msg, peer)
elsif msg.is_a? KeepAlive
@logger.debug "Received keep alive message from peer."
elsif msg.is_a? ExtendedHandshake
@logger.debug "Received extended handshake message from peer."
handleExtendedHandshake(msg, peer)
elsif msg.is_a? ExtendedMetaInfo
@logger.debug "Received extended metainfo message from peer."
handleExtendedMetainfo(msg, peer)
else
@logger.warn "Received a #{msg.class} message but handler is not implemented"
end
end | ruby | def recvData(peer)
msg = nil
@logger.debug "Got data from peer #{peer}"
if peer.state == :handshaking
# Read handshake message
begin
@logger.debug "Reading handshake from #{peer}"
msg = PeerHandshake.unserializeFrom currentIo
rescue
@logger.warn "Peer #{peer} failed handshake: #{$!}"
setPeerDisconnected(peer)
close
return
end
else
begin
@logger.debug "Reading wire-message from #{peer}"
msg = peer.peerMsgSerializer.unserializeFrom currentIo
#msg = PeerWireMessage.unserializeFrom currentIo
rescue EOFError
@logger.info "Peer #{peer} disconnected."
setPeerDisconnected(peer)
close
return
rescue
@logger.warn "Unserializing message from peer #{peer} failed: #{$!}"
@logger.warn $!.backtrace.join "\n"
setPeerDisconnected(peer)
close
return
end
peer.updateUploadRate msg
torrentData = @torrentData[peer.infoHash]
torrentData.bytesDownloaded += msg.length if torrentData
@logger.debug "Peer #{peer} upload rate: #{peer.uploadRate.value} data only: #{peer.uploadRateDataOnly.value}"
end
if msg.is_a? PeerHandshake
# This is a remote peer that we connected to returning our handshake.
processHandshake(msg, peer)
peer.state = :established
peer.amChoked = true
peer.peerChoked = true
peer.amInterested = false
peer.peerInterested = false
elsif msg.is_a? BitfieldMessage
@logger.debug "Received bitfield message from peer."
handleBitfield(msg, peer)
elsif msg.is_a? Unchoke
@logger.debug "Received unchoke message from peer."
peer.amChoked = false
elsif msg.is_a? Choke
@logger.debug "Received choke message from peer."
peer.amChoked = true
elsif msg.is_a? Interested
@logger.debug "Received interested message from peer."
peer.peerInterested = true
elsif msg.is_a? Uninterested
@logger.debug "Received uninterested message from peer."
peer.peerInterested = false
elsif msg.is_a? Piece
@logger.debug "Received piece message from peer for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)}: piece #{msg.pieceIndex} offset #{msg.blockOffset} length #{msg.data.length}."
handlePieceReceive(msg, peer)
elsif msg.is_a? Request
@logger.debug "Received request message from peer for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)}: piece #{msg.pieceIndex} offset #{msg.blockOffset} length #{msg.blockLength}."
handleRequest(msg, peer)
elsif msg.is_a? Have
@logger.debug "Received have message from peer for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)}: piece #{msg.pieceIndex}"
handleHave(msg, peer)
elsif msg.is_a? KeepAlive
@logger.debug "Received keep alive message from peer."
elsif msg.is_a? ExtendedHandshake
@logger.debug "Received extended handshake message from peer."
handleExtendedHandshake(msg, peer)
elsif msg.is_a? ExtendedMetaInfo
@logger.debug "Received extended metainfo message from peer."
handleExtendedMetainfo(msg, peer)
else
@logger.warn "Received a #{msg.class} message but handler is not implemented"
end
end | [
"def",
"recvData",
"(",
"peer",
")",
"msg",
"=",
"nil",
"@logger",
".",
"debug",
"\"Got data from peer #{peer}\"",
"if",
"peer",
".",
"state",
"==",
":handshaking",
"begin",
"@logger",
".",
"debug",
"\"Reading handshake from #{peer}\"",
"msg",
"=",
"PeerHandshake",
".",
"unserializeFrom",
"currentIo",
"rescue",
"@logger",
".",
"warn",
"\"Peer #{peer} failed handshake: #{$!}\"",
"setPeerDisconnected",
"(",
"peer",
")",
"close",
"return",
"end",
"else",
"begin",
"@logger",
".",
"debug",
"\"Reading wire-message from #{peer}\"",
"msg",
"=",
"peer",
".",
"peerMsgSerializer",
".",
"unserializeFrom",
"currentIo",
"rescue",
"EOFError",
"@logger",
".",
"info",
"\"Peer #{peer} disconnected.\"",
"setPeerDisconnected",
"(",
"peer",
")",
"close",
"return",
"rescue",
"@logger",
".",
"warn",
"\"Unserializing message from peer #{peer} failed: #{$!}\"",
"@logger",
".",
"warn",
"$!",
".",
"backtrace",
".",
"join",
"\"\\n\"",
"setPeerDisconnected",
"(",
"peer",
")",
"close",
"return",
"end",
"peer",
".",
"updateUploadRate",
"msg",
"torrentData",
"=",
"@torrentData",
"[",
"peer",
".",
"infoHash",
"]",
"torrentData",
".",
"bytesDownloaded",
"+=",
"msg",
".",
"length",
"if",
"torrentData",
"@logger",
".",
"debug",
"\"Peer #{peer} upload rate: #{peer.uploadRate.value} data only: #{peer.uploadRateDataOnly.value}\"",
"end",
"if",
"msg",
".",
"is_a?",
"PeerHandshake",
"processHandshake",
"(",
"msg",
",",
"peer",
")",
"peer",
".",
"state",
"=",
":established",
"peer",
".",
"amChoked",
"=",
"true",
"peer",
".",
"peerChoked",
"=",
"true",
"peer",
".",
"amInterested",
"=",
"false",
"peer",
".",
"peerInterested",
"=",
"false",
"elsif",
"msg",
".",
"is_a?",
"BitfieldMessage",
"@logger",
".",
"debug",
"\"Received bitfield message from peer.\"",
"handleBitfield",
"(",
"msg",
",",
"peer",
")",
"elsif",
"msg",
".",
"is_a?",
"Unchoke",
"@logger",
".",
"debug",
"\"Received unchoke message from peer.\"",
"peer",
".",
"amChoked",
"=",
"false",
"elsif",
"msg",
".",
"is_a?",
"Choke",
"@logger",
".",
"debug",
"\"Received choke message from peer.\"",
"peer",
".",
"amChoked",
"=",
"true",
"elsif",
"msg",
".",
"is_a?",
"Interested",
"@logger",
".",
"debug",
"\"Received interested message from peer.\"",
"peer",
".",
"peerInterested",
"=",
"true",
"elsif",
"msg",
".",
"is_a?",
"Uninterested",
"@logger",
".",
"debug",
"\"Received uninterested message from peer.\"",
"peer",
".",
"peerInterested",
"=",
"false",
"elsif",
"msg",
".",
"is_a?",
"Piece",
"@logger",
".",
"debug",
"\"Received piece message from peer for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)}: piece #{msg.pieceIndex} offset #{msg.blockOffset} length #{msg.data.length}.\"",
"handlePieceReceive",
"(",
"msg",
",",
"peer",
")",
"elsif",
"msg",
".",
"is_a?",
"Request",
"@logger",
".",
"debug",
"\"Received request message from peer for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)}: piece #{msg.pieceIndex} offset #{msg.blockOffset} length #{msg.blockLength}.\"",
"handleRequest",
"(",
"msg",
",",
"peer",
")",
"elsif",
"msg",
".",
"is_a?",
"Have",
"@logger",
".",
"debug",
"\"Received have message from peer for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)}: piece #{msg.pieceIndex}\"",
"handleHave",
"(",
"msg",
",",
"peer",
")",
"elsif",
"msg",
".",
"is_a?",
"KeepAlive",
"@logger",
".",
"debug",
"\"Received keep alive message from peer.\"",
"elsif",
"msg",
".",
"is_a?",
"ExtendedHandshake",
"@logger",
".",
"debug",
"\"Received extended handshake message from peer.\"",
"handleExtendedHandshake",
"(",
"msg",
",",
"peer",
")",
"elsif",
"msg",
".",
"is_a?",
"ExtendedMetaInfo",
"@logger",
".",
"debug",
"\"Received extended metainfo message from peer.\"",
"handleExtendedMetainfo",
"(",
"msg",
",",
"peer",
")",
"else",
"@logger",
".",
"warn",
"\"Received a #{msg.class} message but handler is not implemented\"",
"end",
"end"
] | Reactor method called when there is data ready to be read from a socket | [
"Reactor",
"method",
"called",
"when",
"there",
"is",
"data",
"ready",
"to",
"be",
"read",
"from",
"a",
"socket"
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/peerclient.rb#L582-L666 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/peerclient.rb | QuartzTorrent.PeerClientHandler.timerExpired | def timerExpired(metadata)
if metadata.is_a?(Array) && metadata[0] == :manage_peers
managePeers(metadata[1])
elsif metadata.is_a?(Array) && metadata[0] == :request_blocks
requestBlocks(metadata[1])
elsif metadata.is_a?(Array) && metadata[0] == :check_piece_manager
checkPieceManagerResults(metadata[1])
elsif metadata.is_a?(Array) && metadata[0] == :handshake_timeout
handleHandshakeTimeout(metadata[1])
elsif metadata.is_a?(Array) && metadata[0] == :removetorrent
handleRemoveTorrent(metadata[1], metadata[2])
elsif metadata.is_a?(Array) && metadata[0] == :pausetorrent
handlePause(metadata[1], metadata[2])
elsif metadata.is_a?(Array) && metadata[0] == :get_torrent_data
@torrentData.each do |k,v|
begin
if metadata[3].nil? || k == metadata[3]
v = TorrentDataDelegate.new(v, self)
metadata[1][k] = v
end
rescue
@logger.error "Error building torrent data response for user: #{$!}"
@logger.error "#{$!.backtrace.join("\n")}"
end
end
metadata[2].signal
elsif metadata.is_a?(Array) && metadata[0] == :update_torrent_data
delegate = metadata[1]
if ! @torrentData.has_key?(infoHash)
delegate.state = :deleted
else
delegate.internalRefresh
end
metadata[2].signal
elsif metadata.is_a?(Array) && metadata[0] == :request_metadata_pieces
requestMetadataPieces(metadata[1])
elsif metadata.is_a?(Array) && metadata[0] == :check_metadata_piece_manager
checkMetadataPieceManagerResults(metadata[1])
elsif metadata.is_a?(Array) && metadata[0] == :runproc
metadata[1].call
else
@logger.info "Unknown timer #{metadata} expired."
end
end | ruby | def timerExpired(metadata)
if metadata.is_a?(Array) && metadata[0] == :manage_peers
managePeers(metadata[1])
elsif metadata.is_a?(Array) && metadata[0] == :request_blocks
requestBlocks(metadata[1])
elsif metadata.is_a?(Array) && metadata[0] == :check_piece_manager
checkPieceManagerResults(metadata[1])
elsif metadata.is_a?(Array) && metadata[0] == :handshake_timeout
handleHandshakeTimeout(metadata[1])
elsif metadata.is_a?(Array) && metadata[0] == :removetorrent
handleRemoveTorrent(metadata[1], metadata[2])
elsif metadata.is_a?(Array) && metadata[0] == :pausetorrent
handlePause(metadata[1], metadata[2])
elsif metadata.is_a?(Array) && metadata[0] == :get_torrent_data
@torrentData.each do |k,v|
begin
if metadata[3].nil? || k == metadata[3]
v = TorrentDataDelegate.new(v, self)
metadata[1][k] = v
end
rescue
@logger.error "Error building torrent data response for user: #{$!}"
@logger.error "#{$!.backtrace.join("\n")}"
end
end
metadata[2].signal
elsif metadata.is_a?(Array) && metadata[0] == :update_torrent_data
delegate = metadata[1]
if ! @torrentData.has_key?(infoHash)
delegate.state = :deleted
else
delegate.internalRefresh
end
metadata[2].signal
elsif metadata.is_a?(Array) && metadata[0] == :request_metadata_pieces
requestMetadataPieces(metadata[1])
elsif metadata.is_a?(Array) && metadata[0] == :check_metadata_piece_manager
checkMetadataPieceManagerResults(metadata[1])
elsif metadata.is_a?(Array) && metadata[0] == :runproc
metadata[1].call
else
@logger.info "Unknown timer #{metadata} expired."
end
end | [
"def",
"timerExpired",
"(",
"metadata",
")",
"if",
"metadata",
".",
"is_a?",
"(",
"Array",
")",
"&&",
"metadata",
"[",
"0",
"]",
"==",
":manage_peers",
"managePeers",
"(",
"metadata",
"[",
"1",
"]",
")",
"elsif",
"metadata",
".",
"is_a?",
"(",
"Array",
")",
"&&",
"metadata",
"[",
"0",
"]",
"==",
":request_blocks",
"requestBlocks",
"(",
"metadata",
"[",
"1",
"]",
")",
"elsif",
"metadata",
".",
"is_a?",
"(",
"Array",
")",
"&&",
"metadata",
"[",
"0",
"]",
"==",
":check_piece_manager",
"checkPieceManagerResults",
"(",
"metadata",
"[",
"1",
"]",
")",
"elsif",
"metadata",
".",
"is_a?",
"(",
"Array",
")",
"&&",
"metadata",
"[",
"0",
"]",
"==",
":handshake_timeout",
"handleHandshakeTimeout",
"(",
"metadata",
"[",
"1",
"]",
")",
"elsif",
"metadata",
".",
"is_a?",
"(",
"Array",
")",
"&&",
"metadata",
"[",
"0",
"]",
"==",
":removetorrent",
"handleRemoveTorrent",
"(",
"metadata",
"[",
"1",
"]",
",",
"metadata",
"[",
"2",
"]",
")",
"elsif",
"metadata",
".",
"is_a?",
"(",
"Array",
")",
"&&",
"metadata",
"[",
"0",
"]",
"==",
":pausetorrent",
"handlePause",
"(",
"metadata",
"[",
"1",
"]",
",",
"metadata",
"[",
"2",
"]",
")",
"elsif",
"metadata",
".",
"is_a?",
"(",
"Array",
")",
"&&",
"metadata",
"[",
"0",
"]",
"==",
":get_torrent_data",
"@torrentData",
".",
"each",
"do",
"|",
"k",
",",
"v",
"|",
"begin",
"if",
"metadata",
"[",
"3",
"]",
".",
"nil?",
"||",
"k",
"==",
"metadata",
"[",
"3",
"]",
"v",
"=",
"TorrentDataDelegate",
".",
"new",
"(",
"v",
",",
"self",
")",
"metadata",
"[",
"1",
"]",
"[",
"k",
"]",
"=",
"v",
"end",
"rescue",
"@logger",
".",
"error",
"\"Error building torrent data response for user: #{$!}\"",
"@logger",
".",
"error",
"\"#{$!.backtrace.join(\"\\n\")}\"",
"end",
"end",
"metadata",
"[",
"2",
"]",
".",
"signal",
"elsif",
"metadata",
".",
"is_a?",
"(",
"Array",
")",
"&&",
"metadata",
"[",
"0",
"]",
"==",
":update_torrent_data",
"delegate",
"=",
"metadata",
"[",
"1",
"]",
"if",
"!",
"@torrentData",
".",
"has_key?",
"(",
"infoHash",
")",
"delegate",
".",
"state",
"=",
":deleted",
"else",
"delegate",
".",
"internalRefresh",
"end",
"metadata",
"[",
"2",
"]",
".",
"signal",
"elsif",
"metadata",
".",
"is_a?",
"(",
"Array",
")",
"&&",
"metadata",
"[",
"0",
"]",
"==",
":request_metadata_pieces",
"requestMetadataPieces",
"(",
"metadata",
"[",
"1",
"]",
")",
"elsif",
"metadata",
".",
"is_a?",
"(",
"Array",
")",
"&&",
"metadata",
"[",
"0",
"]",
"==",
":check_metadata_piece_manager",
"checkMetadataPieceManagerResults",
"(",
"metadata",
"[",
"1",
"]",
")",
"elsif",
"metadata",
".",
"is_a?",
"(",
"Array",
")",
"&&",
"metadata",
"[",
"0",
"]",
"==",
":runproc",
"metadata",
"[",
"1",
"]",
".",
"call",
"else",
"@logger",
".",
"info",
"\"Unknown timer #{metadata} expired.\"",
"end",
"end"
] | Reactor method called when a scheduled timer expires. | [
"Reactor",
"method",
"called",
"when",
"a",
"scheduled",
"timer",
"expires",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/peerclient.rb#L669-L712 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/peerclient.rb | QuartzTorrent.PeerClientHandler.requestMetadataPieces | def requestMetadataPieces(infoHash)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.error "Request metadata pices: torrent data for torrent #{QuartzTorrent.bytesToHex(infoHash)} not found."
return
end
return if torrentData.paused || torrentData.queued
# We may not have completed the extended handshake with the peer which specifies the torrent size.
# In this case torrentData.metainfoPieceState is not yet set.
return if ! torrentData.metainfoPieceState
@logger.info "#{QuartzTorrent.bytesToHex(infoHash)}: Obtained all pieces of metainfo." if torrentData.metainfoPieceState.complete?
pieces = torrentData.metainfoPieceState.findRequestablePieces
classifiedPeers = ClassifiedPeers.new torrentData.peers.all
peers = torrentData.metainfoPieceState.findRequestablePeers(classifiedPeers)
if peers.size > 0
# For now, just request all pieces from the first peer.
pieces.each do |pieceIndex|
msg = ExtendedMetaInfo.new
msg.msgType = :request
msg.piece = pieceIndex
withPeersIo(peers.first, "requesting metadata piece") do |io|
sendMessageToPeer msg, io, peers.first
torrentData.metainfoPieceState.setPieceRequested(pieceIndex, true)
@logger.debug "#{QuartzTorrent.bytesToHex(infoHash)}: Requesting metainfo piece from #{peers.first}: piece #{pieceIndex}"
end
end
else
@logger.error "#{QuartzTorrent.bytesToHex(infoHash)}: No peers found that have metadata."
end
end | ruby | def requestMetadataPieces(infoHash)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.error "Request metadata pices: torrent data for torrent #{QuartzTorrent.bytesToHex(infoHash)} not found."
return
end
return if torrentData.paused || torrentData.queued
# We may not have completed the extended handshake with the peer which specifies the torrent size.
# In this case torrentData.metainfoPieceState is not yet set.
return if ! torrentData.metainfoPieceState
@logger.info "#{QuartzTorrent.bytesToHex(infoHash)}: Obtained all pieces of metainfo." if torrentData.metainfoPieceState.complete?
pieces = torrentData.metainfoPieceState.findRequestablePieces
classifiedPeers = ClassifiedPeers.new torrentData.peers.all
peers = torrentData.metainfoPieceState.findRequestablePeers(classifiedPeers)
if peers.size > 0
# For now, just request all pieces from the first peer.
pieces.each do |pieceIndex|
msg = ExtendedMetaInfo.new
msg.msgType = :request
msg.piece = pieceIndex
withPeersIo(peers.first, "requesting metadata piece") do |io|
sendMessageToPeer msg, io, peers.first
torrentData.metainfoPieceState.setPieceRequested(pieceIndex, true)
@logger.debug "#{QuartzTorrent.bytesToHex(infoHash)}: Requesting metainfo piece from #{peers.first}: piece #{pieceIndex}"
end
end
else
@logger.error "#{QuartzTorrent.bytesToHex(infoHash)}: No peers found that have metadata."
end
end | [
"def",
"requestMetadataPieces",
"(",
"infoHash",
")",
"torrentData",
"=",
"@torrentData",
"[",
"infoHash",
"]",
"if",
"!",
"torrentData",
"@logger",
".",
"error",
"\"Request metadata pices: torrent data for torrent #{QuartzTorrent.bytesToHex(infoHash)} not found.\"",
"return",
"end",
"return",
"if",
"torrentData",
".",
"paused",
"||",
"torrentData",
".",
"queued",
"return",
"if",
"!",
"torrentData",
".",
"metainfoPieceState",
"@logger",
".",
"info",
"\"#{QuartzTorrent.bytesToHex(infoHash)}: Obtained all pieces of metainfo.\"",
"if",
"torrentData",
".",
"metainfoPieceState",
".",
"complete?",
"pieces",
"=",
"torrentData",
".",
"metainfoPieceState",
".",
"findRequestablePieces",
"classifiedPeers",
"=",
"ClassifiedPeers",
".",
"new",
"torrentData",
".",
"peers",
".",
"all",
"peers",
"=",
"torrentData",
".",
"metainfoPieceState",
".",
"findRequestablePeers",
"(",
"classifiedPeers",
")",
"if",
"peers",
".",
"size",
">",
"0",
"pieces",
".",
"each",
"do",
"|",
"pieceIndex",
"|",
"msg",
"=",
"ExtendedMetaInfo",
".",
"new",
"msg",
".",
"msgType",
"=",
":request",
"msg",
".",
"piece",
"=",
"pieceIndex",
"withPeersIo",
"(",
"peers",
".",
"first",
",",
"\"requesting metadata piece\"",
")",
"do",
"|",
"io",
"|",
"sendMessageToPeer",
"msg",
",",
"io",
",",
"peers",
".",
"first",
"torrentData",
".",
"metainfoPieceState",
".",
"setPieceRequested",
"(",
"pieceIndex",
",",
"true",
")",
"@logger",
".",
"debug",
"\"#{QuartzTorrent.bytesToHex(infoHash)}: Requesting metainfo piece from #{peers.first}: piece #{pieceIndex}\"",
"end",
"end",
"else",
"@logger",
".",
"error",
"\"#{QuartzTorrent.bytesToHex(infoHash)}: No peers found that have metadata.\"",
"end",
"end"
] | For a torrent where we don't have the metainfo, request metainfo pieces from peers. | [
"For",
"a",
"torrent",
"where",
"we",
"don",
"t",
"have",
"the",
"metainfo",
"request",
"metainfo",
"pieces",
"from",
"peers",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/peerclient.rb#L1003-L1038 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/peerclient.rb | QuartzTorrent.PeerClientHandler.withPeersIo | def withPeersIo(peer, what = nil)
io = findIoByMetainfo(peer)
if io
yield io
else
s = ""
s = "when #{what}" if what
@logger.warn "Couldn't find the io for peer #{peer} #{what}"
end
end | ruby | def withPeersIo(peer, what = nil)
io = findIoByMetainfo(peer)
if io
yield io
else
s = ""
s = "when #{what}" if what
@logger.warn "Couldn't find the io for peer #{peer} #{what}"
end
end | [
"def",
"withPeersIo",
"(",
"peer",
",",
"what",
"=",
"nil",
")",
"io",
"=",
"findIoByMetainfo",
"(",
"peer",
")",
"if",
"io",
"yield",
"io",
"else",
"s",
"=",
"\"\"",
"s",
"=",
"\"when #{what}\"",
"if",
"what",
"@logger",
".",
"warn",
"\"Couldn't find the io for peer #{peer} #{what}\"",
"end",
"end"
] | Find the io associated with the peer and yield it to the passed block.
If no io is found an error is logged. | [
"Find",
"the",
"io",
"associated",
"with",
"the",
"peer",
"and",
"yield",
"it",
"to",
"the",
"passed",
"block",
".",
"If",
"no",
"io",
"is",
"found",
"an",
"error",
"is",
"logged",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/peerclient.rb#L1451-L1460 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/peerclient.rb | QuartzTorrent.PeerClientHandler.getPeersFromTracker | def getPeersFromTracker(torrentData, infoHash)
addPeer = Proc.new do |trackerPeer|
peer = Peer.new(trackerPeer)
peer.infoHash = infoHash
torrentData.peers.add peer
true
end
classifiedPeers = nil
replaceDisconnectedPeer = Proc.new do |trackerPeer|
classifiedPeers = ClassifiedPeers.new(torrentData.peers.all) if ! classifiedPeers
if classifiedPeers.disconnectedPeers.size > 0
torrentData.peers.delete classifiedPeers.disconnectedPeers.pop
addPeer.call trackerPeer
true
else
false
end
end
trackerclient = torrentData.trackerClient
addProc = addPeer
flipped = false
trackerclient.peers.each do |p|
if ! flipped && torrentData.peers.size >= @maxPeerCount
addProc = replaceDisconnectedPeer
flipped = true
end
# Don't treat ourself as a peer.
next if p.id && p.id == trackerclient.peerId
if ! torrentData.peers.findByAddr(p.ip, p.port)
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Adding tracker peer #{p} to peers list"
break if ! addProc.call(p)
end
end
end | ruby | def getPeersFromTracker(torrentData, infoHash)
addPeer = Proc.new do |trackerPeer|
peer = Peer.new(trackerPeer)
peer.infoHash = infoHash
torrentData.peers.add peer
true
end
classifiedPeers = nil
replaceDisconnectedPeer = Proc.new do |trackerPeer|
classifiedPeers = ClassifiedPeers.new(torrentData.peers.all) if ! classifiedPeers
if classifiedPeers.disconnectedPeers.size > 0
torrentData.peers.delete classifiedPeers.disconnectedPeers.pop
addPeer.call trackerPeer
true
else
false
end
end
trackerclient = torrentData.trackerClient
addProc = addPeer
flipped = false
trackerclient.peers.each do |p|
if ! flipped && torrentData.peers.size >= @maxPeerCount
addProc = replaceDisconnectedPeer
flipped = true
end
# Don't treat ourself as a peer.
next if p.id && p.id == trackerclient.peerId
if ! torrentData.peers.findByAddr(p.ip, p.port)
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Adding tracker peer #{p} to peers list"
break if ! addProc.call(p)
end
end
end | [
"def",
"getPeersFromTracker",
"(",
"torrentData",
",",
"infoHash",
")",
"addPeer",
"=",
"Proc",
".",
"new",
"do",
"|",
"trackerPeer",
"|",
"peer",
"=",
"Peer",
".",
"new",
"(",
"trackerPeer",
")",
"peer",
".",
"infoHash",
"=",
"infoHash",
"torrentData",
".",
"peers",
".",
"add",
"peer",
"true",
"end",
"classifiedPeers",
"=",
"nil",
"replaceDisconnectedPeer",
"=",
"Proc",
".",
"new",
"do",
"|",
"trackerPeer",
"|",
"classifiedPeers",
"=",
"ClassifiedPeers",
".",
"new",
"(",
"torrentData",
".",
"peers",
".",
"all",
")",
"if",
"!",
"classifiedPeers",
"if",
"classifiedPeers",
".",
"disconnectedPeers",
".",
"size",
">",
"0",
"torrentData",
".",
"peers",
".",
"delete",
"classifiedPeers",
".",
"disconnectedPeers",
".",
"pop",
"addPeer",
".",
"call",
"trackerPeer",
"true",
"else",
"false",
"end",
"end",
"trackerclient",
"=",
"torrentData",
".",
"trackerClient",
"addProc",
"=",
"addPeer",
"flipped",
"=",
"false",
"trackerclient",
".",
"peers",
".",
"each",
"do",
"|",
"p",
"|",
"if",
"!",
"flipped",
"&&",
"torrentData",
".",
"peers",
".",
"size",
">=",
"@maxPeerCount",
"addProc",
"=",
"replaceDisconnectedPeer",
"flipped",
"=",
"true",
"end",
"next",
"if",
"p",
".",
"id",
"&&",
"p",
".",
"id",
"==",
"trackerclient",
".",
"peerId",
"if",
"!",
"torrentData",
".",
"peers",
".",
"findByAddr",
"(",
"p",
".",
"ip",
",",
"p",
".",
"port",
")",
"@logger",
".",
"debug",
"\"#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Adding tracker peer #{p} to peers list\"",
"break",
"if",
"!",
"addProc",
".",
"call",
"(",
"p",
")",
"end",
"end",
"end"
] | Update our internal peer list for this torrent from the tracker client | [
"Update",
"our",
"internal",
"peer",
"list",
"for",
"this",
"torrent",
"from",
"the",
"tracker",
"client"
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/peerclient.rb#L1517-L1556 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/peerclient.rb | QuartzTorrent.PeerClientHandler.handleRemoveTorrent | def handleRemoveTorrent(infoHash, deleteFiles)
torrentData = @torrentData.delete infoHash
if ! torrentData
@logger.warn "Asked to remove a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}"
return
end
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent. #{deleteFiles ? "Will" : "Wont"} delete downloaded files."
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.metainfoRequestTimer" if ! torrentData.metainfoRequestTimer
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.managePeersTimer" if ! torrentData.managePeersTimer
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.checkMetadataPieceManagerTimer" if ! torrentData.checkMetadataPieceManagerTimer
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.checkPieceManagerTimer" if ! torrentData.checkPieceManagerTimer
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.requestBlocksTimer" if ! torrentData.requestBlocksTimer
# Stop all timers
cancelTimer torrentData.metainfoRequestTimer if torrentData.metainfoRequestTimer
cancelTimer torrentData.managePeersTimer if torrentData.managePeersTimer
cancelTimer torrentData.checkMetadataPieceManagerTimer if torrentData.checkMetadataPieceManagerTimer
cancelTimer torrentData.checkPieceManagerTimer if torrentData.checkPieceManagerTimer
cancelTimer torrentData.requestBlocksTimer if torrentData.requestBlocksTimer
torrentData.trackerClient.removePeersChangedListener(torrentData.peerChangeListener)
# Remove all the peers for this torrent.
torrentData.peers.all.each do |peer|
if peer.state != :disconnected
# Close socket
withPeersIo(peer, "when removing torrent") do |io|
setPeerDisconnected(peer)
close(io)
@logger.debug "Closing connection to peer #{peer}"
end
end
torrentData.peers.delete peer
end
# Stop tracker client
torrentData.trackerClient.stop if torrentData.trackerClient
# Stop PieceManagers
torrentData.pieceManager.stop if torrentData.pieceManager
torrentData.metainfoPieceState.stop if torrentData.metainfoPieceState
# Remove metainfo file if it exists
begin
torrentData.metainfoPieceState.remove if torrentData.metainfoPieceState
rescue
@logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Deleting metainfo file for torrent #{QuartzTorrent.bytesToHex(infoHash)} failed: #{$!}"
end
if deleteFiles
if torrentData.info
begin
path = @baseDirectory + File::SEPARATOR + torrentData.info.name
if File.exists? path
FileUtils.rm_r path
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Deleted #{path}"
else
@logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Deleting '#{path}' for torrent #{QuartzTorrent.bytesToHex(infoHash)} failed: #{$!}"
end
rescue
@logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: When removing torrent, deleting '#{path}' failed because it doesn't exist"
end
end
end
dequeue
end | ruby | def handleRemoveTorrent(infoHash, deleteFiles)
torrentData = @torrentData.delete infoHash
if ! torrentData
@logger.warn "Asked to remove a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}"
return
end
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent. #{deleteFiles ? "Will" : "Wont"} delete downloaded files."
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.metainfoRequestTimer" if ! torrentData.metainfoRequestTimer
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.managePeersTimer" if ! torrentData.managePeersTimer
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.checkMetadataPieceManagerTimer" if ! torrentData.checkMetadataPieceManagerTimer
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.checkPieceManagerTimer" if ! torrentData.checkPieceManagerTimer
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.requestBlocksTimer" if ! torrentData.requestBlocksTimer
# Stop all timers
cancelTimer torrentData.metainfoRequestTimer if torrentData.metainfoRequestTimer
cancelTimer torrentData.managePeersTimer if torrentData.managePeersTimer
cancelTimer torrentData.checkMetadataPieceManagerTimer if torrentData.checkMetadataPieceManagerTimer
cancelTimer torrentData.checkPieceManagerTimer if torrentData.checkPieceManagerTimer
cancelTimer torrentData.requestBlocksTimer if torrentData.requestBlocksTimer
torrentData.trackerClient.removePeersChangedListener(torrentData.peerChangeListener)
# Remove all the peers for this torrent.
torrentData.peers.all.each do |peer|
if peer.state != :disconnected
# Close socket
withPeersIo(peer, "when removing torrent") do |io|
setPeerDisconnected(peer)
close(io)
@logger.debug "Closing connection to peer #{peer}"
end
end
torrentData.peers.delete peer
end
# Stop tracker client
torrentData.trackerClient.stop if torrentData.trackerClient
# Stop PieceManagers
torrentData.pieceManager.stop if torrentData.pieceManager
torrentData.metainfoPieceState.stop if torrentData.metainfoPieceState
# Remove metainfo file if it exists
begin
torrentData.metainfoPieceState.remove if torrentData.metainfoPieceState
rescue
@logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Deleting metainfo file for torrent #{QuartzTorrent.bytesToHex(infoHash)} failed: #{$!}"
end
if deleteFiles
if torrentData.info
begin
path = @baseDirectory + File::SEPARATOR + torrentData.info.name
if File.exists? path
FileUtils.rm_r path
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Deleted #{path}"
else
@logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Deleting '#{path}' for torrent #{QuartzTorrent.bytesToHex(infoHash)} failed: #{$!}"
end
rescue
@logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: When removing torrent, deleting '#{path}' failed because it doesn't exist"
end
end
end
dequeue
end | [
"def",
"handleRemoveTorrent",
"(",
"infoHash",
",",
"deleteFiles",
")",
"torrentData",
"=",
"@torrentData",
".",
"delete",
"infoHash",
"if",
"!",
"torrentData",
"@logger",
".",
"warn",
"\"Asked to remove a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}\"",
"return",
"end",
"@logger",
".",
"info",
"\"#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent. #{deleteFiles ? \"Will\" : \"Wont\"} delete downloaded files.\"",
"@logger",
".",
"info",
"\"#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.metainfoRequestTimer\"",
"if",
"!",
"torrentData",
".",
"metainfoRequestTimer",
"@logger",
".",
"info",
"\"#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.managePeersTimer\"",
"if",
"!",
"torrentData",
".",
"managePeersTimer",
"@logger",
".",
"info",
"\"#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.checkMetadataPieceManagerTimer\"",
"if",
"!",
"torrentData",
".",
"checkMetadataPieceManagerTimer",
"@logger",
".",
"info",
"\"#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.checkPieceManagerTimer\"",
"if",
"!",
"torrentData",
".",
"checkPieceManagerTimer",
"@logger",
".",
"info",
"\"#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.requestBlocksTimer\"",
"if",
"!",
"torrentData",
".",
"requestBlocksTimer",
"cancelTimer",
"torrentData",
".",
"metainfoRequestTimer",
"if",
"torrentData",
".",
"metainfoRequestTimer",
"cancelTimer",
"torrentData",
".",
"managePeersTimer",
"if",
"torrentData",
".",
"managePeersTimer",
"cancelTimer",
"torrentData",
".",
"checkMetadataPieceManagerTimer",
"if",
"torrentData",
".",
"checkMetadataPieceManagerTimer",
"cancelTimer",
"torrentData",
".",
"checkPieceManagerTimer",
"if",
"torrentData",
".",
"checkPieceManagerTimer",
"cancelTimer",
"torrentData",
".",
"requestBlocksTimer",
"if",
"torrentData",
".",
"requestBlocksTimer",
"torrentData",
".",
"trackerClient",
".",
"removePeersChangedListener",
"(",
"torrentData",
".",
"peerChangeListener",
")",
"torrentData",
".",
"peers",
".",
"all",
".",
"each",
"do",
"|",
"peer",
"|",
"if",
"peer",
".",
"state",
"!=",
":disconnected",
"withPeersIo",
"(",
"peer",
",",
"\"when removing torrent\"",
")",
"do",
"|",
"io",
"|",
"setPeerDisconnected",
"(",
"peer",
")",
"close",
"(",
"io",
")",
"@logger",
".",
"debug",
"\"Closing connection to peer #{peer}\"",
"end",
"end",
"torrentData",
".",
"peers",
".",
"delete",
"peer",
"end",
"torrentData",
".",
"trackerClient",
".",
"stop",
"if",
"torrentData",
".",
"trackerClient",
"torrentData",
".",
"pieceManager",
".",
"stop",
"if",
"torrentData",
".",
"pieceManager",
"torrentData",
".",
"metainfoPieceState",
".",
"stop",
"if",
"torrentData",
".",
"metainfoPieceState",
"begin",
"torrentData",
".",
"metainfoPieceState",
".",
"remove",
"if",
"torrentData",
".",
"metainfoPieceState",
"rescue",
"@logger",
".",
"warn",
"\"#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Deleting metainfo file for torrent #{QuartzTorrent.bytesToHex(infoHash)} failed: #{$!}\"",
"end",
"if",
"deleteFiles",
"if",
"torrentData",
".",
"info",
"begin",
"path",
"=",
"@baseDirectory",
"+",
"File",
"::",
"SEPARATOR",
"+",
"torrentData",
".",
"info",
".",
"name",
"if",
"File",
".",
"exists?",
"path",
"FileUtils",
".",
"rm_r",
"path",
"@logger",
".",
"info",
"\"#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Deleted #{path}\"",
"else",
"@logger",
".",
"warn",
"\"#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Deleting '#{path}' for torrent #{QuartzTorrent.bytesToHex(infoHash)} failed: #{$!}\"",
"end",
"rescue",
"@logger",
".",
"warn",
"\"#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: When removing torrent, deleting '#{path}' failed because it doesn't exist\"",
"end",
"end",
"end",
"dequeue",
"end"
] | Remove a torrent that we are downloading. | [
"Remove",
"a",
"torrent",
"that",
"we",
"are",
"downloading",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/peerclient.rb#L1559-L1627 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/peerclient.rb | QuartzTorrent.PeerClientHandler.handlePause | def handlePause(infoHash, value)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.warn "Asked to pause a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}"
return
end
return if torrentData.paused == value
torrentData.paused = value
if !value
# On unpause, queue the torrent since there might not be room for it to run.
# Make sure it goes to the head of the queue.
queue(torrentData, :unshift)
end
setFrozen infoHash, value if ! torrentData.queued
dequeue
end | ruby | def handlePause(infoHash, value)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.warn "Asked to pause a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}"
return
end
return if torrentData.paused == value
torrentData.paused = value
if !value
# On unpause, queue the torrent since there might not be room for it to run.
# Make sure it goes to the head of the queue.
queue(torrentData, :unshift)
end
setFrozen infoHash, value if ! torrentData.queued
dequeue
end | [
"def",
"handlePause",
"(",
"infoHash",
",",
"value",
")",
"torrentData",
"=",
"@torrentData",
"[",
"infoHash",
"]",
"if",
"!",
"torrentData",
"@logger",
".",
"warn",
"\"Asked to pause a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}\"",
"return",
"end",
"return",
"if",
"torrentData",
".",
"paused",
"==",
"value",
"torrentData",
".",
"paused",
"=",
"value",
"if",
"!",
"value",
"queue",
"(",
"torrentData",
",",
":unshift",
")",
"end",
"setFrozen",
"infoHash",
",",
"value",
"if",
"!",
"torrentData",
".",
"queued",
"dequeue",
"end"
] | Pause or unpause a torrent that we are downloading. | [
"Pause",
"or",
"unpause",
"a",
"torrent",
"that",
"we",
"are",
"downloading",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/peerclient.rb#L1630-L1650 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/peerclient.rb | QuartzTorrent.PeerClientHandler.queue | def queue(torrentData, mode = :queue)
return if torrentData.queued
# Queue the torrent
if mode == :unshift
@torrentQueue.unshift torrentData
else
@torrentQueue.push torrentData
end
setFrozen torrentData, true if ! torrentData.paused
end | ruby | def queue(torrentData, mode = :queue)
return if torrentData.queued
# Queue the torrent
if mode == :unshift
@torrentQueue.unshift torrentData
else
@torrentQueue.push torrentData
end
setFrozen torrentData, true if ! torrentData.paused
end | [
"def",
"queue",
"(",
"torrentData",
",",
"mode",
"=",
":queue",
")",
"return",
"if",
"torrentData",
".",
"queued",
"if",
"mode",
"==",
":unshift",
"@torrentQueue",
".",
"unshift",
"torrentData",
"else",
"@torrentQueue",
".",
"push",
"torrentData",
"end",
"setFrozen",
"torrentData",
",",
"true",
"if",
"!",
"torrentData",
".",
"paused",
"end"
] | Queue a torrent | [
"Queue",
"a",
"torrent"
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/peerclient.rb#L1653-L1664 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/peerclient.rb | QuartzTorrent.PeerClientHandler.dequeue | def dequeue
torrents = @torrentQueue.dequeue(@torrentData.values)
torrents.each do |torrentData|
if torrentData.state == :initializing
initTorrent torrentData
else
setFrozen torrentData, false if ! torrentData.paused
end
end
end | ruby | def dequeue
torrents = @torrentQueue.dequeue(@torrentData.values)
torrents.each do |torrentData|
if torrentData.state == :initializing
initTorrent torrentData
else
setFrozen torrentData, false if ! torrentData.paused
end
end
end | [
"def",
"dequeue",
"torrents",
"=",
"@torrentQueue",
".",
"dequeue",
"(",
"@torrentData",
".",
"values",
")",
"torrents",
".",
"each",
"do",
"|",
"torrentData",
"|",
"if",
"torrentData",
".",
"state",
"==",
":initializing",
"initTorrent",
"torrentData",
"else",
"setFrozen",
"torrentData",
",",
"false",
"if",
"!",
"torrentData",
".",
"paused",
"end",
"end",
"end"
] | Dequeue any torrents that can now run based on available space | [
"Dequeue",
"any",
"torrents",
"that",
"can",
"now",
"run",
"based",
"on",
"available",
"space"
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/peerclient.rb#L1667-L1676 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/peerclient.rb | QuartzTorrent.PeerClientHandler.setFrozen | def setFrozen(torrent, value)
torrentData = torrent
if ! torrent.is_a?(TorrentData)
torrentData = @torrentData[torrent]
if ! torrentData
@logger.warn "Asked to freeze a non-existent torrent #{QuartzTorrent.bytesToHex(torrent)}"
return
end
end
if value
# Disconnect from all peers so we won't reply to any messages.
torrentData.peers.all.each do |peer|
if peer.state != :disconnected
# Close socket
withPeersIo(peer, "when removing torrent") do |io|
setPeerDisconnected(peer)
close(io)
end
end
torrentData.peers.delete peer
end
else
# Get our list of peers and start connecting right away
# Non-recurring and immediate timer
torrentData.managePeersTimer =
@reactor.scheduleTimer(@managePeersPeriod, [:manage_peers, torrentData.infoHash], false, true)
end
end | ruby | def setFrozen(torrent, value)
torrentData = torrent
if ! torrent.is_a?(TorrentData)
torrentData = @torrentData[torrent]
if ! torrentData
@logger.warn "Asked to freeze a non-existent torrent #{QuartzTorrent.bytesToHex(torrent)}"
return
end
end
if value
# Disconnect from all peers so we won't reply to any messages.
torrentData.peers.all.each do |peer|
if peer.state != :disconnected
# Close socket
withPeersIo(peer, "when removing torrent") do |io|
setPeerDisconnected(peer)
close(io)
end
end
torrentData.peers.delete peer
end
else
# Get our list of peers and start connecting right away
# Non-recurring and immediate timer
torrentData.managePeersTimer =
@reactor.scheduleTimer(@managePeersPeriod, [:manage_peers, torrentData.infoHash], false, true)
end
end | [
"def",
"setFrozen",
"(",
"torrent",
",",
"value",
")",
"torrentData",
"=",
"torrent",
"if",
"!",
"torrent",
".",
"is_a?",
"(",
"TorrentData",
")",
"torrentData",
"=",
"@torrentData",
"[",
"torrent",
"]",
"if",
"!",
"torrentData",
"@logger",
".",
"warn",
"\"Asked to freeze a non-existent torrent #{QuartzTorrent.bytesToHex(torrent)}\"",
"return",
"end",
"end",
"if",
"value",
"torrentData",
".",
"peers",
".",
"all",
".",
"each",
"do",
"|",
"peer",
"|",
"if",
"peer",
".",
"state",
"!=",
":disconnected",
"withPeersIo",
"(",
"peer",
",",
"\"when removing torrent\"",
")",
"do",
"|",
"io",
"|",
"setPeerDisconnected",
"(",
"peer",
")",
"close",
"(",
"io",
")",
"end",
"end",
"torrentData",
".",
"peers",
".",
"delete",
"peer",
"end",
"else",
"torrentData",
".",
"managePeersTimer",
"=",
"@reactor",
".",
"scheduleTimer",
"(",
"@managePeersPeriod",
",",
"[",
":manage_peers",
",",
"torrentData",
".",
"infoHash",
"]",
",",
"false",
",",
"true",
")",
"end",
"end"
] | Freeze or unfreeze a torrent. If value is true, then we disconnect from all peers for this torrent and forget
the peers. If value is false, we start reconnecting to peers.
Parameter torrent can be an infoHash or TorrentData | [
"Freeze",
"or",
"unfreeze",
"a",
"torrent",
".",
"If",
"value",
"is",
"true",
"then",
"we",
"disconnect",
"from",
"all",
"peers",
"for",
"this",
"torrent",
"and",
"forget",
"the",
"peers",
".",
"If",
"value",
"is",
"false",
"we",
"start",
"reconnecting",
"to",
"peers",
".",
"Parameter",
"torrent",
"can",
"be",
"an",
"infoHash",
"or",
"TorrentData"
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/peerclient.rb#L1681-L1709 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/peerclient.rb | QuartzTorrent.PeerClient.addTorrentByMetainfo | def addTorrentByMetainfo(metainfo)
raise "addTorrentByMetainfo should be called with a Metainfo object, not #{metainfo.class}" if ! metainfo.is_a?(Metainfo)
trackerclient = TrackerClient.createFromMetainfo(metainfo, false)
addTorrent(trackerclient, metainfo.infoHash, metainfo.info)
end | ruby | def addTorrentByMetainfo(metainfo)
raise "addTorrentByMetainfo should be called with a Metainfo object, not #{metainfo.class}" if ! metainfo.is_a?(Metainfo)
trackerclient = TrackerClient.createFromMetainfo(metainfo, false)
addTorrent(trackerclient, metainfo.infoHash, metainfo.info)
end | [
"def",
"addTorrentByMetainfo",
"(",
"metainfo",
")",
"raise",
"\"addTorrentByMetainfo should be called with a Metainfo object, not #{metainfo.class}\"",
"if",
"!",
"metainfo",
".",
"is_a?",
"(",
"Metainfo",
")",
"trackerclient",
"=",
"TrackerClient",
".",
"createFromMetainfo",
"(",
"metainfo",
",",
"false",
")",
"addTorrent",
"(",
"trackerclient",
",",
"metainfo",
".",
"infoHash",
",",
"metainfo",
".",
"info",
")",
"end"
] | Add a new torrent to manage described by a Metainfo object. This is generally the
method to call if you have a .torrent file.
Returns the infoHash of the newly added torrent. | [
"Add",
"a",
"new",
"torrent",
"to",
"manage",
"described",
"by",
"a",
"Metainfo",
"object",
".",
"This",
"is",
"generally",
"the",
"method",
"to",
"call",
"if",
"you",
"have",
"a",
".",
"torrent",
"file",
".",
"Returns",
"the",
"infoHash",
"of",
"the",
"newly",
"added",
"torrent",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/peerclient.rb#L1780-L1784 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/peerclient.rb | QuartzTorrent.PeerClient.addTorrentWithoutMetainfo | def addTorrentWithoutMetainfo(announceUrl, infoHash, magnet = nil)
raise "addTorrentWithoutMetainfo should be called with a Magnet object, not a #{magnet.class}" if magnet && ! magnet.is_a?(MagnetURI)
trackerclient = TrackerClient.create(announceUrl, infoHash, 0, false)
addTorrent(trackerclient, infoHash, nil, magnet)
end | ruby | def addTorrentWithoutMetainfo(announceUrl, infoHash, magnet = nil)
raise "addTorrentWithoutMetainfo should be called with a Magnet object, not a #{magnet.class}" if magnet && ! magnet.is_a?(MagnetURI)
trackerclient = TrackerClient.create(announceUrl, infoHash, 0, false)
addTorrent(trackerclient, infoHash, nil, magnet)
end | [
"def",
"addTorrentWithoutMetainfo",
"(",
"announceUrl",
",",
"infoHash",
",",
"magnet",
"=",
"nil",
")",
"raise",
"\"addTorrentWithoutMetainfo should be called with a Magnet object, not a #{magnet.class}\"",
"if",
"magnet",
"&&",
"!",
"magnet",
".",
"is_a?",
"(",
"MagnetURI",
")",
"trackerclient",
"=",
"TrackerClient",
".",
"create",
"(",
"announceUrl",
",",
"infoHash",
",",
"0",
",",
"false",
")",
"addTorrent",
"(",
"trackerclient",
",",
"infoHash",
",",
"nil",
",",
"magnet",
")",
"end"
] | Add a new torrent to manage given an announceUrl and an infoHash. The announceUrl may be a list.
Returns the infoHash of the newly added torrent. | [
"Add",
"a",
"new",
"torrent",
"to",
"manage",
"given",
"an",
"announceUrl",
"and",
"an",
"infoHash",
".",
"The",
"announceUrl",
"may",
"be",
"a",
"list",
".",
"Returns",
"the",
"infoHash",
"of",
"the",
"newly",
"added",
"torrent",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/peerclient.rb#L1788-L1792 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/peerclient.rb | QuartzTorrent.PeerClient.addTorrentByMagnetURI | def addTorrentByMagnetURI(magnet)
raise "addTorrentByMagnetURI should be called with a MagnetURI object, not a #{magnet.class}" if ! magnet.is_a?(MagnetURI)
trackerUrl = magnet.trackers
raise "addTorrentByMagnetURI can't handle magnet links that don't have a tracker URL." if !trackerUrl
addTorrentWithoutMetainfo(trackerUrl, magnet.btInfoHash, magnet)
end | ruby | def addTorrentByMagnetURI(magnet)
raise "addTorrentByMagnetURI should be called with a MagnetURI object, not a #{magnet.class}" if ! magnet.is_a?(MagnetURI)
trackerUrl = magnet.trackers
raise "addTorrentByMagnetURI can't handle magnet links that don't have a tracker URL." if !trackerUrl
addTorrentWithoutMetainfo(trackerUrl, magnet.btInfoHash, magnet)
end | [
"def",
"addTorrentByMagnetURI",
"(",
"magnet",
")",
"raise",
"\"addTorrentByMagnetURI should be called with a MagnetURI object, not a #{magnet.class}\"",
"if",
"!",
"magnet",
".",
"is_a?",
"(",
"MagnetURI",
")",
"trackerUrl",
"=",
"magnet",
".",
"trackers",
"raise",
"\"addTorrentByMagnetURI can't handle magnet links that don't have a tracker URL.\"",
"if",
"!",
"trackerUrl",
"addTorrentWithoutMetainfo",
"(",
"trackerUrl",
",",
"magnet",
".",
"btInfoHash",
",",
"magnet",
")",
"end"
] | Add a new torrent to manage given a MagnetURI object. This is generally the
method to call if you have a magnet link.
Returns the infoHash of the newly added torrent. | [
"Add",
"a",
"new",
"torrent",
"to",
"manage",
"given",
"a",
"MagnetURI",
"object",
".",
"This",
"is",
"generally",
"the",
"method",
"to",
"call",
"if",
"you",
"have",
"a",
"magnet",
"link",
".",
"Returns",
"the",
"infoHash",
"of",
"the",
"newly",
"added",
"torrent",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/peerclient.rb#L1797-L1804 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/peerclient.rb | QuartzTorrent.PeerClient.adjustBytesDownloaded | def adjustBytesDownloaded(infoHash, adjustment)
return if ! adjustment
raise "Bytes downloaded adjustment must be an Integer, not a #{adjustment.class}" if !adjustment.is_a?(Integer)
@handler.adjustBytesDownloaded(infoHash, adjustment)
end | ruby | def adjustBytesDownloaded(infoHash, adjustment)
return if ! adjustment
raise "Bytes downloaded adjustment must be an Integer, not a #{adjustment.class}" if !adjustment.is_a?(Integer)
@handler.adjustBytesDownloaded(infoHash, adjustment)
end | [
"def",
"adjustBytesDownloaded",
"(",
"infoHash",
",",
"adjustment",
")",
"return",
"if",
"!",
"adjustment",
"raise",
"\"Bytes downloaded adjustment must be an Integer, not a #{adjustment.class}\"",
"if",
"!",
"adjustment",
".",
"is_a?",
"(",
"Integer",
")",
"@handler",
".",
"adjustBytesDownloaded",
"(",
"infoHash",
",",
"adjustment",
")",
"end"
] | Adjust the bytesDownloaded property of the specified torrent by the passed amount.
Adjustment should be an integer. It is added to the current bytesUploaded amount. | [
"Adjust",
"the",
"bytesDownloaded",
"property",
"of",
"the",
"specified",
"torrent",
"by",
"the",
"passed",
"amount",
".",
"Adjustment",
"should",
"be",
"an",
"integer",
".",
"It",
"is",
"added",
"to",
"the",
"current",
"bytesUploaded",
"amount",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/peerclient.rb#L1854-L1858 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/peerclient.rb | QuartzTorrent.PeerClient.addTorrent | def addTorrent(trackerclient, infoHash, info, magnet = nil)
trackerclient.port = @port
torrentData = @handler.addTrackerClient(infoHash, info, trackerclient)
torrentData.magnet = magnet
trackerclient.dynamicRequestParamsBuilder = Proc.new do
torrentData = @handler.torrentData[infoHash]
dataLength = (info ? info.dataLength : nil)
result = TrackerDynamicRequestParams.new(dataLength)
if torrentData && torrentData.blockState
result.left = torrentData.blockState.totalLength - torrentData.blockState.completedLength
result.downloaded = torrentData.bytesDownloadedDataOnly
result.uploaded = torrentData.bytesUploadedDataOnly
end
result
end
# If we haven't started yet then add this trackerclient to a queue of
# trackerclients to start once we are started. If we start too soon we
# will connect to the tracker, and it will try to connect back to us before we are listening.
if ! trackerclient.started?
if @stopped
@toStart.push trackerclient
else
trackerclient.start
end
end
torrentData.infoHash
end | ruby | def addTorrent(trackerclient, infoHash, info, magnet = nil)
trackerclient.port = @port
torrentData = @handler.addTrackerClient(infoHash, info, trackerclient)
torrentData.magnet = magnet
trackerclient.dynamicRequestParamsBuilder = Proc.new do
torrentData = @handler.torrentData[infoHash]
dataLength = (info ? info.dataLength : nil)
result = TrackerDynamicRequestParams.new(dataLength)
if torrentData && torrentData.blockState
result.left = torrentData.blockState.totalLength - torrentData.blockState.completedLength
result.downloaded = torrentData.bytesDownloadedDataOnly
result.uploaded = torrentData.bytesUploadedDataOnly
end
result
end
# If we haven't started yet then add this trackerclient to a queue of
# trackerclients to start once we are started. If we start too soon we
# will connect to the tracker, and it will try to connect back to us before we are listening.
if ! trackerclient.started?
if @stopped
@toStart.push trackerclient
else
trackerclient.start
end
end
torrentData.infoHash
end | [
"def",
"addTorrent",
"(",
"trackerclient",
",",
"infoHash",
",",
"info",
",",
"magnet",
"=",
"nil",
")",
"trackerclient",
".",
"port",
"=",
"@port",
"torrentData",
"=",
"@handler",
".",
"addTrackerClient",
"(",
"infoHash",
",",
"info",
",",
"trackerclient",
")",
"torrentData",
".",
"magnet",
"=",
"magnet",
"trackerclient",
".",
"dynamicRequestParamsBuilder",
"=",
"Proc",
".",
"new",
"do",
"torrentData",
"=",
"@handler",
".",
"torrentData",
"[",
"infoHash",
"]",
"dataLength",
"=",
"(",
"info",
"?",
"info",
".",
"dataLength",
":",
"nil",
")",
"result",
"=",
"TrackerDynamicRequestParams",
".",
"new",
"(",
"dataLength",
")",
"if",
"torrentData",
"&&",
"torrentData",
".",
"blockState",
"result",
".",
"left",
"=",
"torrentData",
".",
"blockState",
".",
"totalLength",
"-",
"torrentData",
".",
"blockState",
".",
"completedLength",
"result",
".",
"downloaded",
"=",
"torrentData",
".",
"bytesDownloadedDataOnly",
"result",
".",
"uploaded",
"=",
"torrentData",
".",
"bytesUploadedDataOnly",
"end",
"result",
"end",
"if",
"!",
"trackerclient",
".",
"started?",
"if",
"@stopped",
"@toStart",
".",
"push",
"trackerclient",
"else",
"trackerclient",
".",
"start",
"end",
"end",
"torrentData",
".",
"infoHash",
"end"
] | Helper method for adding a torrent. | [
"Helper",
"method",
"for",
"adding",
"a",
"torrent",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/peerclient.rb#L1867-L1897 | train |
rjurado01/rapidoc | lib/rapidoc/config.rb | Rapidoc.Config.target_dir | def target_dir( f = nil )
if File.exists?( config_file_path )
form_file_name( target_dir_from_config, f )
else
form_file_name( File.join( ::Rails.root.to_s, 'public/docs' ), f )
end
end | ruby | def target_dir( f = nil )
if File.exists?( config_file_path )
form_file_name( target_dir_from_config, f )
else
form_file_name( File.join( ::Rails.root.to_s, 'public/docs' ), f )
end
end | [
"def",
"target_dir",
"(",
"f",
"=",
"nil",
")",
"if",
"File",
".",
"exists?",
"(",
"config_file_path",
")",
"form_file_name",
"(",
"target_dir_from_config",
",",
"f",
")",
"else",
"form_file_name",
"(",
"File",
".",
"join",
"(",
"::",
"Rails",
".",
"root",
".",
"to_s",
",",
"'public/docs'",
")",
",",
"f",
")",
"end",
"end"
] | return the directory where rapidoc generates the doc | [
"return",
"the",
"directory",
"where",
"rapidoc",
"generates",
"the",
"doc"
] | 03b7a8f29a37dd03f4ed5036697b48551d3b4ae6 | https://github.com/rjurado01/rapidoc/blob/03b7a8f29a37dd03f4ed5036697b48551d3b4ae6/lib/rapidoc/config.rb#L79-L85 | train |
rjurado01/rapidoc | lib/rapidoc/config.rb | Rapidoc.Config.examples_dir | def examples_dir( f = nil )
if File.exists?( config_file_path )
form_file_name( examples_dir_from_config_file, f )
else
form_file_name( config_dir( '/examples' ), f )
end
end | ruby | def examples_dir( f = nil )
if File.exists?( config_file_path )
form_file_name( examples_dir_from_config_file, f )
else
form_file_name( config_dir( '/examples' ), f )
end
end | [
"def",
"examples_dir",
"(",
"f",
"=",
"nil",
")",
"if",
"File",
".",
"exists?",
"(",
"config_file_path",
")",
"form_file_name",
"(",
"examples_dir_from_config_file",
",",
"f",
")",
"else",
"form_file_name",
"(",
"config_dir",
"(",
"'/examples'",
")",
",",
"f",
")",
"end",
"end"
] | returns the directory where rapidoc searches for examples | [
"returns",
"the",
"directory",
"where",
"rapidoc",
"searches",
"for",
"examples"
] | 03b7a8f29a37dd03f4ed5036697b48551d3b4ae6 | https://github.com/rjurado01/rapidoc/blob/03b7a8f29a37dd03f4ed5036697b48551d3b4ae6/lib/rapidoc/config.rb#L93-L99 | train |
philm/twilio | lib/twilio/verb.rb | Twilio.Verb.say | def say(*args)
options = {:voice => 'man', :language => 'en', :loop => 1}
args.each do |arg|
case arg
when String
options[:text_to_speak] = arg
when Hash
options.merge!(arg)
else
raise ArgumentError, 'say expects String or Hash argument'
end
end
output {
if options[:pause]
loop_with_pause(options[:loop], @xml) do
@xml.Say(options[:text_to_speak], :voice => options[:voice], :language => options[:language])
end
else
@xml.Say(options[:text_to_speak], :voice => options[:voice], :language => options[:language], :loop => options[:loop])
end
}
end | ruby | def say(*args)
options = {:voice => 'man', :language => 'en', :loop => 1}
args.each do |arg|
case arg
when String
options[:text_to_speak] = arg
when Hash
options.merge!(arg)
else
raise ArgumentError, 'say expects String or Hash argument'
end
end
output {
if options[:pause]
loop_with_pause(options[:loop], @xml) do
@xml.Say(options[:text_to_speak], :voice => options[:voice], :language => options[:language])
end
else
@xml.Say(options[:text_to_speak], :voice => options[:voice], :language => options[:language], :loop => options[:loop])
end
}
end | [
"def",
"say",
"(",
"*",
"args",
")",
"options",
"=",
"{",
":voice",
"=>",
"'man'",
",",
":language",
"=>",
"'en'",
",",
":loop",
"=>",
"1",
"}",
"args",
".",
"each",
"do",
"|",
"arg",
"|",
"case",
"arg",
"when",
"String",
"options",
"[",
":text_to_speak",
"]",
"=",
"arg",
"when",
"Hash",
"options",
".",
"merge!",
"(",
"arg",
")",
"else",
"raise",
"ArgumentError",
",",
"'say expects String or Hash argument'",
"end",
"end",
"output",
"{",
"if",
"options",
"[",
":pause",
"]",
"loop_with_pause",
"(",
"options",
"[",
":loop",
"]",
",",
"@xml",
")",
"do",
"@xml",
".",
"Say",
"(",
"options",
"[",
":text_to_speak",
"]",
",",
":voice",
"=>",
"options",
"[",
":voice",
"]",
",",
":language",
"=>",
"options",
"[",
":language",
"]",
")",
"end",
"else",
"@xml",
".",
"Say",
"(",
"options",
"[",
":text_to_speak",
"]",
",",
":voice",
"=>",
"options",
"[",
":voice",
"]",
",",
":language",
"=>",
"options",
"[",
":language",
"]",
",",
":loop",
"=>",
"options",
"[",
":loop",
"]",
")",
"end",
"}",
"end"
] | The Say verb converts text to speech that is read back to the caller.
Say is useful for dynamic text that is difficult to prerecord.
Examples:
Twilio::Verb.say 'The time is 9:35 PM.'
Twilio::Verb.say 'The time is 9:35 PM.', :loop => 3
With numbers, 12345 will be spoken as "twelve thousand three hundred forty five" while
1 2 3 4 5 will be spoken as "one two three four five."
Twilio::Verb.say 'Your PIN is 1234', :loop => 4
Twilio::Verb.say 'Your PIN is 1 2 3 4', :loop => 4
If you need a longer pause between each loop, instead of explicitly calling the Pause
verb within a block, you can set the convenient pause option:
Twilio::Verb.say 'Your PIN is 1 2 3 4', :loop => 4, :pause => true
Options (see http://www.twilio.com/docs/api_reference/TwiML/say) are passed in as a hash:
Twilio::Verb.say 'The time is 9:35 PM.', :voice => 'woman'
Twilio::Verb.say 'The time is 9:35 PM.', :voice => 'woman', :language => 'es' | [
"The",
"Say",
"verb",
"converts",
"text",
"to",
"speech",
"that",
"is",
"read",
"back",
"to",
"the",
"caller",
".",
"Say",
"is",
"useful",
"for",
"dynamic",
"text",
"that",
"is",
"difficult",
"to",
"prerecord",
"."
] | 81c05795924bbfa780ea44efd52d7ca5670bcb55 | https://github.com/philm/twilio/blob/81c05795924bbfa780ea44efd52d7ca5670bcb55/lib/twilio/verb.rb#L62-L84 | train |
philm/twilio | lib/twilio/verb.rb | Twilio.Verb.gather | def gather(*args, &block)
options = args.shift || {}
output {
if block_given?
@xml.Gather(options) { block.call}
else
@xml.Gather(options)
end
}
end | ruby | def gather(*args, &block)
options = args.shift || {}
output {
if block_given?
@xml.Gather(options) { block.call}
else
@xml.Gather(options)
end
}
end | [
"def",
"gather",
"(",
"*",
"args",
",",
"&",
"block",
")",
"options",
"=",
"args",
".",
"shift",
"||",
"{",
"}",
"output",
"{",
"if",
"block_given?",
"@xml",
".",
"Gather",
"(",
"options",
")",
"{",
"block",
".",
"call",
"}",
"else",
"@xml",
".",
"Gather",
"(",
"options",
")",
"end",
"}",
"end"
] | The Gather verb collects digits entered by a caller into their telephone keypad.
When the caller is done entering data, Twilio submits that data to a provided URL,
as either a HTTP GET or POST request, just like a web browser submits data from an HTML form.
Options (see http://www.twilio.com/docs/api_reference/TwiML/gather) are passed in as a hash
Examples:
Twilio::Verb.gather
Twilio::Verb.gather :action => 'http://foobar.com'
Twilio::Verb.gather :finishOnKey => '*'
Twilio::Verb.gather :action => 'http://foobar.com', :finishOnKey => '*'
Gather also lets you nest the Play, Say, and Pause verbs:
verb = Twilio::Verb.new { |v|
v.gather(:action => '/process_gather', :method => 'GET) {
v.say 'Please enter your account number followed by the pound sign'
}
v.say "We didn't receive any input. Goodbye!"
}
verb.response # represents the final xml output | [
"The",
"Gather",
"verb",
"collects",
"digits",
"entered",
"by",
"a",
"caller",
"into",
"their",
"telephone",
"keypad",
".",
"When",
"the",
"caller",
"is",
"done",
"entering",
"data",
"Twilio",
"submits",
"that",
"data",
"to",
"a",
"provided",
"URL",
"as",
"either",
"a",
"HTTP",
"GET",
"or",
"POST",
"request",
"just",
"like",
"a",
"web",
"browser",
"submits",
"data",
"from",
"an",
"HTML",
"form",
"."
] | 81c05795924bbfa780ea44efd52d7ca5670bcb55 | https://github.com/philm/twilio/blob/81c05795924bbfa780ea44efd52d7ca5670bcb55/lib/twilio/verb.rb#L143-L152 | train |
philm/twilio | lib/twilio/verb.rb | Twilio.Verb.dial | def dial(*args, &block)
number_to_dial = ''
options = {}
args.each do |arg|
case arg
when String
number_to_dial = arg
when Hash
options.merge!(arg)
else
raise ArgumentError, 'dial expects String or Hash argument'
end
end
output {
if block_given?
@xml.Dial(options) { block.call }
else
@xml.Dial(number_to_dial, options)
end
}
end | ruby | def dial(*args, &block)
number_to_dial = ''
options = {}
args.each do |arg|
case arg
when String
number_to_dial = arg
when Hash
options.merge!(arg)
else
raise ArgumentError, 'dial expects String or Hash argument'
end
end
output {
if block_given?
@xml.Dial(options) { block.call }
else
@xml.Dial(number_to_dial, options)
end
}
end | [
"def",
"dial",
"(",
"*",
"args",
",",
"&",
"block",
")",
"number_to_dial",
"=",
"''",
"options",
"=",
"{",
"}",
"args",
".",
"each",
"do",
"|",
"arg",
"|",
"case",
"arg",
"when",
"String",
"number_to_dial",
"=",
"arg",
"when",
"Hash",
"options",
".",
"merge!",
"(",
"arg",
")",
"else",
"raise",
"ArgumentError",
",",
"'dial expects String or Hash argument'",
"end",
"end",
"output",
"{",
"if",
"block_given?",
"@xml",
".",
"Dial",
"(",
"options",
")",
"{",
"block",
".",
"call",
"}",
"else",
"@xml",
".",
"Dial",
"(",
"number_to_dial",
",",
"options",
")",
"end",
"}",
"end"
] | The Dial verb connects the current caller to an another phone. If the called party picks up,
the two parties are connected and can communicate until one hangs up. If the called party does
not pick up, if a busy signal is received, or the number doesn't exist, the dial verb will finish.
If an action verb is provided, Twilio will submit the outcome of the call attempt to the action URL.
If no action is provided, Dial will fall through to the next verb in the document.
Note: this is different than the behavior of Record and Gather. Dial does not submit back to the
current document URL if no action is provided.
Options (see http://www.twilio.com/docs/api_reference/TwiML/dial) are passed in as a hash
Examples:
Twilio::Verb.dial '415-123-4567'
Twilio::Verb.dial '415-123-4567', :action => 'http://foobar.com'
Twilio::Verb.dial '415-123-4567', :timeout => 10, :callerId => '858-987-6543'
Twilio also supports an alternate form in which a Number object is nested inside Dial:
verb = Twilio::Verb.new { |v|
v.dial { |v|
v.number '415-123-4567'
v.number '415-123-4568'
v.number '415-123-4569'
}
}
verb.response # represents the final xml output | [
"The",
"Dial",
"verb",
"connects",
"the",
"current",
"caller",
"to",
"an",
"another",
"phone",
".",
"If",
"the",
"called",
"party",
"picks",
"up",
"the",
"two",
"parties",
"are",
"connected",
"and",
"can",
"communicate",
"until",
"one",
"hangs",
"up",
".",
"If",
"the",
"called",
"party",
"does",
"not",
"pick",
"up",
"if",
"a",
"busy",
"signal",
"is",
"received",
"or",
"the",
"number",
"doesn",
"t",
"exist",
"the",
"dial",
"verb",
"will",
"finish",
"."
] | 81c05795924bbfa780ea44efd52d7ca5670bcb55 | https://github.com/philm/twilio/blob/81c05795924bbfa780ea44efd52d7ca5670bcb55/lib/twilio/verb.rb#L198-L219 | train |
rjurado01/rapidoc | lib/rapidoc/resource_doc.rb | Rapidoc.ResourceDoc.generate_info | def generate_info( routes_info )
if routes_info
extractor = get_controller_extractor
@description = extractor.get_resource_info['description'] if extractor
@actions_doc = get_actions_doc( routes_info, extractor )
# template need that description will be an array
@description = [ @description ] unless @description.class == Array
end
end | ruby | def generate_info( routes_info )
if routes_info
extractor = get_controller_extractor
@description = extractor.get_resource_info['description'] if extractor
@actions_doc = get_actions_doc( routes_info, extractor )
# template need that description will be an array
@description = [ @description ] unless @description.class == Array
end
end | [
"def",
"generate_info",
"(",
"routes_info",
")",
"if",
"routes_info",
"extractor",
"=",
"get_controller_extractor",
"@description",
"=",
"extractor",
".",
"get_resource_info",
"[",
"'description'",
"]",
"if",
"extractor",
"@actions_doc",
"=",
"get_actions_doc",
"(",
"routes_info",
",",
"extractor",
")",
"@description",
"=",
"[",
"@description",
"]",
"unless",
"@description",
".",
"class",
"==",
"Array",
"end",
"end"
] | Create description and actions_doc | [
"Create",
"description",
"and",
"actions_doc"
] | 03b7a8f29a37dd03f4ed5036697b48551d3b4ae6 | https://github.com/rjurado01/rapidoc/blob/03b7a8f29a37dd03f4ed5036697b48551d3b4ae6/lib/rapidoc/resource_doc.rb#L36-L45 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/filemanager.rb | QuartzTorrent.PieceMapper.findBlock | def findBlock(pieceIndex, offset, length)
leftOffset = @pieceSize*pieceIndex + offset
rightOffset = leftOffset + length-1
findPart(leftOffset, rightOffset)
end | ruby | def findBlock(pieceIndex, offset, length)
leftOffset = @pieceSize*pieceIndex + offset
rightOffset = leftOffset + length-1
findPart(leftOffset, rightOffset)
end | [
"def",
"findBlock",
"(",
"pieceIndex",
",",
"offset",
",",
"length",
")",
"leftOffset",
"=",
"@pieceSize",
"*",
"pieceIndex",
"+",
"offset",
"rightOffset",
"=",
"leftOffset",
"+",
"length",
"-",
"1",
"findPart",
"(",
"leftOffset",
",",
"rightOffset",
")",
"end"
] | Return a list of FileRegion objects. The FileRegion offsets specify
in order which regions of files the piece covers. | [
"Return",
"a",
"list",
"of",
"FileRegion",
"objects",
".",
"The",
"FileRegion",
"offsets",
"specify",
"in",
"order",
"which",
"regions",
"of",
"files",
"the",
"piece",
"covers",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/filemanager.rb#L75-L80 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/filemanager.rb | QuartzTorrent.PieceIO.writeBlock | def writeBlock(pieceIndex, offset, block)
regions = @pieceMapper.findBlock(pieceIndex, offset, block.length)
indexInBlock = 0
regions.each do |region|
# Get the IO for the file with path 'path'. If we are being used in a reactor, this is the IO facade. If we
# are not then this is a real IO.
io = @ioManager.get(region.path)
if ! io
# No IO for this file.
raise "This process doesn't have write permission for the file #{region.path}" if File.exists?(region.path) && ! File.writable?(region.path)
# Ensure parent directories exist.
dir = File.dirname region.path
FileUtils.mkdir_p dir if ! File.directory?(dir)
begin
io = @ioManager.open(region.path)
rescue
@logger.error "Opening file #{region.path} failed: #{$!}"
raise "Opening file #{region.path} failed"
end
end
io.seek region.offset, IO::SEEK_SET
begin
io.write(block[indexInBlock, region.length])
indexInBlock += region.length
rescue
# Error when writing...
@logger.error "Writing block to file #{region.path} failed: #{$!}"
piece = nil
break
end
break if indexInBlock >= block.length
end
end | ruby | def writeBlock(pieceIndex, offset, block)
regions = @pieceMapper.findBlock(pieceIndex, offset, block.length)
indexInBlock = 0
regions.each do |region|
# Get the IO for the file with path 'path'. If we are being used in a reactor, this is the IO facade. If we
# are not then this is a real IO.
io = @ioManager.get(region.path)
if ! io
# No IO for this file.
raise "This process doesn't have write permission for the file #{region.path}" if File.exists?(region.path) && ! File.writable?(region.path)
# Ensure parent directories exist.
dir = File.dirname region.path
FileUtils.mkdir_p dir if ! File.directory?(dir)
begin
io = @ioManager.open(region.path)
rescue
@logger.error "Opening file #{region.path} failed: #{$!}"
raise "Opening file #{region.path} failed"
end
end
io.seek region.offset, IO::SEEK_SET
begin
io.write(block[indexInBlock, region.length])
indexInBlock += region.length
rescue
# Error when writing...
@logger.error "Writing block to file #{region.path} failed: #{$!}"
piece = nil
break
end
break if indexInBlock >= block.length
end
end | [
"def",
"writeBlock",
"(",
"pieceIndex",
",",
"offset",
",",
"block",
")",
"regions",
"=",
"@pieceMapper",
".",
"findBlock",
"(",
"pieceIndex",
",",
"offset",
",",
"block",
".",
"length",
")",
"indexInBlock",
"=",
"0",
"regions",
".",
"each",
"do",
"|",
"region",
"|",
"io",
"=",
"@ioManager",
".",
"get",
"(",
"region",
".",
"path",
")",
"if",
"!",
"io",
"raise",
"\"This process doesn't have write permission for the file #{region.path}\"",
"if",
"File",
".",
"exists?",
"(",
"region",
".",
"path",
")",
"&&",
"!",
"File",
".",
"writable?",
"(",
"region",
".",
"path",
")",
"dir",
"=",
"File",
".",
"dirname",
"region",
".",
"path",
"FileUtils",
".",
"mkdir_p",
"dir",
"if",
"!",
"File",
".",
"directory?",
"(",
"dir",
")",
"begin",
"io",
"=",
"@ioManager",
".",
"open",
"(",
"region",
".",
"path",
")",
"rescue",
"@logger",
".",
"error",
"\"Opening file #{region.path} failed: #{$!}\"",
"raise",
"\"Opening file #{region.path} failed\"",
"end",
"end",
"io",
".",
"seek",
"region",
".",
"offset",
",",
"IO",
"::",
"SEEK_SET",
"begin",
"io",
".",
"write",
"(",
"block",
"[",
"indexInBlock",
",",
"region",
".",
"length",
"]",
")",
"indexInBlock",
"+=",
"region",
".",
"length",
"rescue",
"@logger",
".",
"error",
"\"Writing block to file #{region.path} failed: #{$!}\"",
"piece",
"=",
"nil",
"break",
"end",
"break",
"if",
"indexInBlock",
">=",
"block",
".",
"length",
"end",
"end"
] | Write a block to an in-progress piece. The block is written to
piece 'peiceIndex', at offset 'offset'. The block data is in block.
Throws exceptions on failure. | [
"Write",
"a",
"block",
"to",
"an",
"in",
"-",
"progress",
"piece",
".",
"The",
"block",
"is",
"written",
"to",
"piece",
"peiceIndex",
"at",
"offset",
"offset",
".",
"The",
"block",
"data",
"is",
"in",
"block",
".",
"Throws",
"exceptions",
"on",
"failure",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/filemanager.rb#L163-L199 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/filemanager.rb | QuartzTorrent.PieceIO.readRegions | def readRegions(regions)
piece = ""
regions.each do |region|
# Get the IO for the file with path 'path'. If we are being used in a reactor, this is the IO facade. If we
# are not then this is a real IO.
io = @ioManager.get(region.path)
if ! io
# No IO for this file.
if ! File.exists?(region.path)
# This file hasn't been created yet by having blocks written to it.
piece = nil
break
end
raise "This process doesn't have read permission for the file #{region.path}" if ! File.readable?(region.path)
begin
io = @ioManager.open(region.path)
rescue
@logger.error "Opening file #{region.path} failed: #{$!}"
raise "Opening file #{region.path} failed"
end
end
io.seek region.offset, IO::SEEK_SET
begin
piece << io.read(region.length)
rescue
# Error when reading. Likely EOF, meaning this peice isn't all there yet.
piece = nil
break
end
end
piece
end | ruby | def readRegions(regions)
piece = ""
regions.each do |region|
# Get the IO for the file with path 'path'. If we are being used in a reactor, this is the IO facade. If we
# are not then this is a real IO.
io = @ioManager.get(region.path)
if ! io
# No IO for this file.
if ! File.exists?(region.path)
# This file hasn't been created yet by having blocks written to it.
piece = nil
break
end
raise "This process doesn't have read permission for the file #{region.path}" if ! File.readable?(region.path)
begin
io = @ioManager.open(region.path)
rescue
@logger.error "Opening file #{region.path} failed: #{$!}"
raise "Opening file #{region.path} failed"
end
end
io.seek region.offset, IO::SEEK_SET
begin
piece << io.read(region.length)
rescue
# Error when reading. Likely EOF, meaning this peice isn't all there yet.
piece = nil
break
end
end
piece
end | [
"def",
"readRegions",
"(",
"regions",
")",
"piece",
"=",
"\"\"",
"regions",
".",
"each",
"do",
"|",
"region",
"|",
"io",
"=",
"@ioManager",
".",
"get",
"(",
"region",
".",
"path",
")",
"if",
"!",
"io",
"if",
"!",
"File",
".",
"exists?",
"(",
"region",
".",
"path",
")",
"piece",
"=",
"nil",
"break",
"end",
"raise",
"\"This process doesn't have read permission for the file #{region.path}\"",
"if",
"!",
"File",
".",
"readable?",
"(",
"region",
".",
"path",
")",
"begin",
"io",
"=",
"@ioManager",
".",
"open",
"(",
"region",
".",
"path",
")",
"rescue",
"@logger",
".",
"error",
"\"Opening file #{region.path} failed: #{$!}\"",
"raise",
"\"Opening file #{region.path} failed\"",
"end",
"end",
"io",
".",
"seek",
"region",
".",
"offset",
",",
"IO",
"::",
"SEEK_SET",
"begin",
"piece",
"<<",
"io",
".",
"read",
"(",
"region",
".",
"length",
")",
"rescue",
"piece",
"=",
"nil",
"break",
"end",
"end",
"piece",
"end"
] | Pass an ordered list of FileRegions to load. | [
"Pass",
"an",
"ordered",
"list",
"of",
"FileRegions",
"to",
"load",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/filemanager.rb#L220-L253 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/filemanager.rb | QuartzTorrent.PieceManager.readBlock | def readBlock(pieceIndex, offset, length)
id = returnAndIncrRequestId
return id if @state == :after_stop
@requests.push [id, :read_block, pieceIndex, offset, length]
@requestsSemaphore.signal
id
end | ruby | def readBlock(pieceIndex, offset, length)
id = returnAndIncrRequestId
return id if @state == :after_stop
@requests.push [id, :read_block, pieceIndex, offset, length]
@requestsSemaphore.signal
id
end | [
"def",
"readBlock",
"(",
"pieceIndex",
",",
"offset",
",",
"length",
")",
"id",
"=",
"returnAndIncrRequestId",
"return",
"id",
"if",
"@state",
"==",
":after_stop",
"@requests",
".",
"push",
"[",
"id",
",",
":read_block",
",",
"pieceIndex",
",",
"offset",
",",
"length",
"]",
"@requestsSemaphore",
".",
"signal",
"id",
"end"
] | Read a block from the torrent asynchronously. When the operation
is complete the result is stored in the 'results' list.
This method returns an id that can be used to match the response
to the request.
The readBlock and writeBlock methods are not threadsafe with respect to callers;
they shouldn't be called by multiple threads concurrently. | [
"Read",
"a",
"block",
"from",
"the",
"torrent",
"asynchronously",
".",
"When",
"the",
"operation",
"is",
"complete",
"the",
"result",
"is",
"stored",
"in",
"the",
"results",
"list",
".",
"This",
"method",
"returns",
"an",
"id",
"that",
"can",
"be",
"used",
"to",
"match",
"the",
"response",
"to",
"the",
"request",
".",
"The",
"readBlock",
"and",
"writeBlock",
"methods",
"are",
"not",
"threadsafe",
"with",
"respect",
"to",
"callers",
";",
"they",
"shouldn",
"t",
"be",
"called",
"by",
"multiple",
"threads",
"concurrently",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/filemanager.rb#L319-L325 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/filemanager.rb | QuartzTorrent.PieceManager.writeBlock | def writeBlock(pieceIndex, offset, block)
id = returnAndIncrRequestId
return id if @state == :after_stop
@requests.push [id, :write_block, pieceIndex, offset, block]
@requestsSemaphore.signal
id
end | ruby | def writeBlock(pieceIndex, offset, block)
id = returnAndIncrRequestId
return id if @state == :after_stop
@requests.push [id, :write_block, pieceIndex, offset, block]
@requestsSemaphore.signal
id
end | [
"def",
"writeBlock",
"(",
"pieceIndex",
",",
"offset",
",",
"block",
")",
"id",
"=",
"returnAndIncrRequestId",
"return",
"id",
"if",
"@state",
"==",
":after_stop",
"@requests",
".",
"push",
"[",
"id",
",",
":write_block",
",",
"pieceIndex",
",",
"offset",
",",
"block",
"]",
"@requestsSemaphore",
".",
"signal",
"id",
"end"
] | Write a block to the torrent asynchronously. | [
"Write",
"a",
"block",
"to",
"the",
"torrent",
"asynchronously",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/filemanager.rb#L328-L334 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/filemanager.rb | QuartzTorrent.PieceManager.readPiece | def readPiece(pieceIndex)
id = returnAndIncrRequestId
return id if @state == :after_stop
@requests.push [id, :read_piece, pieceIndex]
@requestsSemaphore.signal
id
end | ruby | def readPiece(pieceIndex)
id = returnAndIncrRequestId
return id if @state == :after_stop
@requests.push [id, :read_piece, pieceIndex]
@requestsSemaphore.signal
id
end | [
"def",
"readPiece",
"(",
"pieceIndex",
")",
"id",
"=",
"returnAndIncrRequestId",
"return",
"id",
"if",
"@state",
"==",
":after_stop",
"@requests",
".",
"push",
"[",
"id",
",",
":read_piece",
",",
"pieceIndex",
"]",
"@requestsSemaphore",
".",
"signal",
"id",
"end"
] | Read a block of the torrent asynchronously. | [
"Read",
"a",
"block",
"of",
"the",
"torrent",
"asynchronously",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/filemanager.rb#L337-L343 | train |
jeffwilliams/quartz-torrent | lib/quartz_torrent/filemanager.rb | QuartzTorrent.PieceManager.checkPieceHash | def checkPieceHash(pieceIndex)
id = returnAndIncrRequestId
return id if @state == :after_stop
@requests.push [id, :hash_piece, pieceIndex]
@requestsSemaphore.signal
id
end | ruby | def checkPieceHash(pieceIndex)
id = returnAndIncrRequestId
return id if @state == :after_stop
@requests.push [id, :hash_piece, pieceIndex]
@requestsSemaphore.signal
id
end | [
"def",
"checkPieceHash",
"(",
"pieceIndex",
")",
"id",
"=",
"returnAndIncrRequestId",
"return",
"id",
"if",
"@state",
"==",
":after_stop",
"@requests",
".",
"push",
"[",
"id",
",",
":hash_piece",
",",
"pieceIndex",
"]",
"@requestsSemaphore",
".",
"signal",
"id",
"end"
] | Validate that the hash of the downloaded piece matches the hash from the metainfo.
The result is successful? if the hash matches, false otherwise. The data of the result is
set to the piece index. | [
"Validate",
"that",
"the",
"hash",
"of",
"the",
"downloaded",
"piece",
"matches",
"the",
"hash",
"from",
"the",
"metainfo",
".",
"The",
"result",
"is",
"successful?",
"if",
"the",
"hash",
"matches",
"false",
"otherwise",
".",
"The",
"data",
"of",
"the",
"result",
"is",
"set",
"to",
"the",
"piece",
"index",
"."
] | 7aeb40125d886dd60d7481deb5129282fc3e3c06 | https://github.com/jeffwilliams/quartz-torrent/blob/7aeb40125d886dd60d7481deb5129282fc3e3c06/lib/quartz_torrent/filemanager.rb#L360-L366 | train |
poise/halite | lib/halite/gem.rb | Halite.Gem.license_header | def license_header
IO.readlines(spec_file).take_while { |line| line.strip.empty? || line.strip.start_with?('#') }.join('')
end | ruby | def license_header
IO.readlines(spec_file).take_while { |line| line.strip.empty? || line.strip.start_with?('#') }.join('')
end | [
"def",
"license_header",
"IO",
".",
"readlines",
"(",
"spec_file",
")",
".",
"take_while",
"{",
"|",
"line",
"|",
"line",
".",
"strip",
".",
"empty?",
"||",
"line",
".",
"strip",
".",
"start_with?",
"(",
"'#'",
")",
"}",
".",
"join",
"(",
"''",
")",
"end"
] | License header extacted from the gemspec. Suitable for inclusion in other
Ruby source files.
@return [String] | [
"License",
"header",
"extacted",
"from",
"the",
"gemspec",
".",
"Suitable",
"for",
"inclusion",
"in",
"other",
"Ruby",
"source",
"files",
"."
] | 9ae174e6b7c5d4674f3301394e14567fa89a8b3e | https://github.com/poise/halite/blob/9ae174e6b7c5d4674f3301394e14567fa89a8b3e/lib/halite/gem.rb#L104-L106 | train |
poise/halite | lib/halite/gem.rb | Halite.Gem.issues_url | def issues_url
if spec.metadata['issues_url']
spec.metadata['issues_url']
elsif spec.homepage =~ /^http(s)?:\/\/(www\.)?github\.com/
spec.homepage.chomp('/') + '/issues'
end
end | ruby | def issues_url
if spec.metadata['issues_url']
spec.metadata['issues_url']
elsif spec.homepage =~ /^http(s)?:\/\/(www\.)?github\.com/
spec.homepage.chomp('/') + '/issues'
end
end | [
"def",
"issues_url",
"if",
"spec",
".",
"metadata",
"[",
"'issues_url'",
"]",
"spec",
".",
"metadata",
"[",
"'issues_url'",
"]",
"elsif",
"spec",
".",
"homepage",
"=~",
"/",
"\\/",
"\\/",
"\\.",
"\\.",
"/",
"spec",
".",
"homepage",
".",
"chomp",
"(",
"'/'",
")",
"+",
"'/issues'",
"end",
"end"
] | URL to the issue tracker for this project.
@return [String, nil] | [
"URL",
"to",
"the",
"issue",
"tracker",
"for",
"this",
"project",
"."
] | 9ae174e6b7c5d4674f3301394e14567fa89a8b3e | https://github.com/poise/halite/blob/9ae174e6b7c5d4674f3301394e14567fa89a8b3e/lib/halite/gem.rb#L111-L117 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.