repo
stringlengths 5
58
| path
stringlengths 9
168
| func_name
stringlengths 9
130
| original_string
stringlengths 66
10.5k
| language
stringclasses 1
value | code
stringlengths 66
10.5k
| code_tokens
sequence | docstring
stringlengths 8
16k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 94
266
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
blambeau/yargi | lib/yargi/digraph.rb | Yargi.Digraph.to_edges | def to_edges(*args)
selected = args.collect do |arg|
case arg
when Integer
[@edges[arg]]
when EdgeSet
arg
when Array
arg.collect{|v| to_edges(v)}.flatten.uniq
when Digraph::Edge
[arg]
else
pred = Predicate.to_predicate(arg)
edges(pred)
end
end.flatten.uniq
EdgeSet.new(selected)
end | ruby | def to_edges(*args)
selected = args.collect do |arg|
case arg
when Integer
[@edges[arg]]
when EdgeSet
arg
when Array
arg.collect{|v| to_edges(v)}.flatten.uniq
when Digraph::Edge
[arg]
else
pred = Predicate.to_predicate(arg)
edges(pred)
end
end.flatten.uniq
EdgeSet.new(selected)
end | [
"def",
"to_edges",
"(",
"*",
"args",
")",
"selected",
"=",
"args",
".",
"collect",
"do",
"|",
"arg",
"|",
"case",
"arg",
"when",
"Integer",
"[",
"@edges",
"[",
"arg",
"]",
"]",
"when",
"EdgeSet",
"arg",
"when",
"Array",
"arg",
".",
"collect",
"{",
"|",
"v",
"|",
"to_edges",
"(",
"v",
")",
"}",
".",
"flatten",
".",
"uniq",
"when",
"Digraph",
"::",
"Edge",
"[",
"arg",
"]",
"else",
"pred",
"=",
"Predicate",
".",
"to_predicate",
"(",
"arg",
")",
"edges",
"(",
"pred",
")",
"end",
"end",
".",
"flatten",
".",
"uniq",
"EdgeSet",
".",
"new",
"(",
"selected",
")",
"end"
] | Applies argument conventions about selection of edges | [
"Applies",
"argument",
"conventions",
"about",
"selection",
"of",
"edges"
] | 100141e96d245a0a8211cd4f7590909be149bc3c | https://github.com/blambeau/yargi/blob/100141e96d245a0a8211cd4f7590909be149bc3c/lib/yargi/digraph.rb#L285-L302 | train |
blambeau/yargi | lib/yargi/digraph.rb | Yargi.Digraph.apply_arg_conventions | def apply_arg_conventions(element, args)
args.each do |arg|
case arg
when Module
element.tag(arg)
when Hash
element.add_marks(arg)
else
raise ArgumentError, "Unable to apply argument conventions on #{arg.inspect}", caller
end
end
element
end | ruby | def apply_arg_conventions(element, args)
args.each do |arg|
case arg
when Module
element.tag(arg)
when Hash
element.add_marks(arg)
else
raise ArgumentError, "Unable to apply argument conventions on #{arg.inspect}", caller
end
end
element
end | [
"def",
"apply_arg_conventions",
"(",
"element",
",",
"args",
")",
"args",
".",
"each",
"do",
"|",
"arg",
"|",
"case",
"arg",
"when",
"Module",
"element",
".",
"tag",
"(",
"arg",
")",
"when",
"Hash",
"element",
".",
"add_marks",
"(",
"arg",
")",
"else",
"raise",
"ArgumentError",
",",
"\"Unable to apply argument conventions on #{arg.inspect}\"",
",",
"caller",
"end",
"end",
"element",
"end"
] | Applies argument conventions on _element_ | [
"Applies",
"argument",
"conventions",
"on",
"_element_"
] | 100141e96d245a0a8211cd4f7590909be149bc3c | https://github.com/blambeau/yargi/blob/100141e96d245a0a8211cd4f7590909be149bc3c/lib/yargi/digraph.rb#L305-L317 | train |
pjb3/curtain | lib/curtain/html_helpers.rb | Curtain.HTMLHelpers.content_tag | def content_tag(name, content=nil, attrs={}, &body)
if content.is_a?(Hash)
attrs = content
content = nil
end
if block_given?
content = capture(&body)
end
tag = tag_opening(name, attrs)
tag << ">".html_safe
tag << content
tag << "</#{name}>".html_safe
end | ruby | def content_tag(name, content=nil, attrs={}, &body)
if content.is_a?(Hash)
attrs = content
content = nil
end
if block_given?
content = capture(&body)
end
tag = tag_opening(name, attrs)
tag << ">".html_safe
tag << content
tag << "</#{name}>".html_safe
end | [
"def",
"content_tag",
"(",
"name",
",",
"content",
"=",
"nil",
",",
"attrs",
"=",
"{",
"}",
",",
"&",
"body",
")",
"if",
"content",
".",
"is_a?",
"(",
"Hash",
")",
"attrs",
"=",
"content",
"content",
"=",
"nil",
"end",
"if",
"block_given?",
"content",
"=",
"capture",
"(",
"&",
"body",
")",
"end",
"tag",
"=",
"tag_opening",
"(",
"name",
",",
"attrs",
")",
"tag",
"<<",
"\">\"",
".",
"html_safe",
"tag",
"<<",
"content",
"tag",
"<<",
"\"</#{name}>\"",
".",
"html_safe",
"end"
] | Generates a with opening and closing tags and potentially content.
@example Tag with no attributes, no content
content_tag(:p) # => "<p></p>"
@example Tag with content
content_tag(:p, "Hello") # => "<p>Hello</p>"
@example Tag with block content
content_tag(:p) { "Hello" } # => "<p>Hello</p>"
@example Tag with content and attributes
content_tag(:a, "Log In", href: "/log_in") # => "<a href="/log_in">Log In</a>"
@example Tag with content block and attributes
content_tag(:a, href: "/log_in") { "Log In" } # => "<a href="/log_in">Log In</a>"
@param name [Symbol, String] The name of the tag
@param attrs [Hash] The attributes of the tag
@return [String] The HTML tag | [
"Generates",
"a",
"with",
"opening",
"and",
"closing",
"tags",
"and",
"potentially",
"content",
"."
] | ab4f3dccea9b887148689084137f1375278f2dcf | https://github.com/pjb3/curtain/blob/ab4f3dccea9b887148689084137f1375278f2dcf/lib/curtain/html_helpers.rb#L50-L64 | train |
barkerest/barkest_core | app/helpers/barkest_core/application_helper.rb | BarkestCore.ApplicationHelper.render_alert | def render_alert(type, message)
if type.to_s.index('safe_')
type = type.to_s[5..-1]
message = message.to_s.html_safe
end
type = type.to_sym
type = :info if type == :notice
type = :danger if type == :alert
return nil unless [:info, :success, :danger, :warning].include?(type)
"<div class=\"alert alert-#{type} alert-dismissible\"><button type=\"button\" class=\"close\" data-dismiss=\"alert\" aria-label=\"Close\"><span aria-hidden=\"true\">×</span></button>#{render_alert_message(message)}</div>".html_safe
end | ruby | def render_alert(type, message)
if type.to_s.index('safe_')
type = type.to_s[5..-1]
message = message.to_s.html_safe
end
type = type.to_sym
type = :info if type == :notice
type = :danger if type == :alert
return nil unless [:info, :success, :danger, :warning].include?(type)
"<div class=\"alert alert-#{type} alert-dismissible\"><button type=\"button\" class=\"close\" data-dismiss=\"alert\" aria-label=\"Close\"><span aria-hidden=\"true\">×</span></button>#{render_alert_message(message)}</div>".html_safe
end | [
"def",
"render_alert",
"(",
"type",
",",
"message",
")",
"if",
"type",
".",
"to_s",
".",
"index",
"(",
"'safe_'",
")",
"type",
"=",
"type",
".",
"to_s",
"[",
"5",
"..",
"-",
"1",
"]",
"message",
"=",
"message",
".",
"to_s",
".",
"html_safe",
"end",
"type",
"=",
"type",
".",
"to_sym",
"type",
"=",
":info",
"if",
"type",
"==",
":notice",
"type",
"=",
":danger",
"if",
"type",
"==",
":alert",
"return",
"nil",
"unless",
"[",
":info",
",",
":success",
",",
":danger",
",",
":warning",
"]",
".",
"include?",
"(",
"type",
")",
"\"<div class=\\\"alert alert-#{type} alert-dismissible\\\"><button type=\\\"button\\\" class=\\\"close\\\" data-dismiss=\\\"alert\\\" aria-label=\\\"Close\\\"><span aria-hidden=\\\"true\\\">×</span></button>#{render_alert_message(message)}</div>\"",
".",
"html_safe",
"end"
] | Renders an alert message.
* +type+ The type of message [info, success, warn, error, danger, etc]
* +message+ The message to display.
To provide messages including HTML, you need to prefix the type with 'safe_'.
render_alert(safe_info, '<strong>This</strong> is a message containing <code>HTML</code> content.')
The message can be a string, hash, or array. When an array is specified, then each array element is enumerated and
joined together. The real power comes in when you specify a hash. A hash will print the key as a label and then
enumerate the value (string, hash, or array) in an unordered list. Hash values are processed recursively, allowing
you to create alerts with lists within lists.
render_alert(info, { 'Section 1' => [ 'Line 1', 'Line 2', 'Line 3' ] })
<label>Section 1</label>
<ul>
<li>Line 1</li>
<li>Line 2</li>
<li>Line 3</li>
</ul>
render_alert(info, { 'Block A' => { 'Block A:1' => [ 'Line 1', 'Line 2' ] }})
<label>Block A</label>
<ul>
<li>
<label>Block A:1</label>
<ul>
<li>Line 1</li>
<li>Line 2</li>
</ul>
</li>
</ul> | [
"Renders",
"an",
"alert",
"message",
"."
] | 3eeb025ec870888cacbc9bae252a39ebf9295f61 | https://github.com/barkerest/barkest_core/blob/3eeb025ec870888cacbc9bae252a39ebf9295f61/app/helpers/barkest_core/application_helper.rb#L74-L88 | train |
Democracy-for-America/ActionKitApi | lib/action_kit_api/event_campaign.rb | ActionKitApi.EventCampaign.create_event | def create_event(*args)
raise "EventCampaign needs to be saved before Event creation" if self.id.nil?
(args[0]).merge!(:campaign_id => self.id)
event = ActionKitApi::Event.new(*args)
end | ruby | def create_event(*args)
raise "EventCampaign needs to be saved before Event creation" if self.id.nil?
(args[0]).merge!(:campaign_id => self.id)
event = ActionKitApi::Event.new(*args)
end | [
"def",
"create_event",
"(",
"*",
"args",
")",
"raise",
"\"EventCampaign needs to be saved before Event creation\"",
"if",
"self",
".",
"id",
".",
"nil?",
"(",
"args",
"[",
"0",
"]",
")",
".",
"merge!",
"(",
":campaign_id",
"=>",
"self",
".",
"id",
")",
"event",
"=",
"ActionKitApi",
"::",
"Event",
".",
"new",
"(",
"*",
"args",
")",
"end"
] | Requires at a minimum the creator_id | [
"Requires",
"at",
"a",
"minimum",
"the",
"creator_id"
] | 81a9e1f84c5e3facbfec0203d453377da7034a26 | https://github.com/Democracy-for-America/ActionKitApi/blob/81a9e1f84c5e3facbfec0203d453377da7034a26/lib/action_kit_api/event_campaign.rb#L30-L36 | train |
Democracy-for-America/ActionKitApi | lib/action_kit_api/event_campaign.rb | ActionKitApi.EventCampaign.public_search | def public_search(*args)
(args[0]).merge!(:campaign_id => self.id)
results = ActionKitApi.connection.call("Event.public_search", *args)
results.map do |r|
Event.new(r)
end
results
end | ruby | def public_search(*args)
(args[0]).merge!(:campaign_id => self.id)
results = ActionKitApi.connection.call("Event.public_search", *args)
results.map do |r|
Event.new(r)
end
results
end | [
"def",
"public_search",
"(",
"*",
"args",
")",
"(",
"args",
"[",
"0",
"]",
")",
".",
"merge!",
"(",
":campaign_id",
"=>",
"self",
".",
"id",
")",
"results",
"=",
"ActionKitApi",
".",
"connection",
".",
"call",
"(",
"\"Event.public_search\"",
",",
"*",
"args",
")",
"results",
".",
"map",
"do",
"|",
"r",
"|",
"Event",
".",
"new",
"(",
"r",
")",
"end",
"results",
"end"
] | Will not return private events, events that are full, deleted, or in the past
and doesn't return extra fields | [
"Will",
"not",
"return",
"private",
"events",
"events",
"that",
"are",
"full",
"deleted",
"or",
"in",
"the",
"past",
"and",
"doesn",
"t",
"return",
"extra",
"fields"
] | 81a9e1f84c5e3facbfec0203d453377da7034a26 | https://github.com/Democracy-for-America/ActionKitApi/blob/81a9e1f84c5e3facbfec0203d453377da7034a26/lib/action_kit_api/event_campaign.rb#L45-L54 | train |
roberthoner/encrypted_store | lib/encrypted_store/crypto_hash.rb | EncryptedStore.CryptoHash.encrypt | def encrypt(dek, salt, iter_mag=10)
return nil if empty?
raise Errors::InvalidSaltSize, 'too long' if salt.bytes.length > 255
key, iv = _keyiv_gen(dek, salt, iter_mag)
encryptor = OpenSSL::Cipher::AES256.new(:CBC).encrypt
encryptor.key = key
encryptor.iv = iv
data_packet = _encrypted_data_header_v2(salt, iter_mag) + encryptor.update(self.to_json) + encryptor.final
_append_crc32(data_packet)
end | ruby | def encrypt(dek, salt, iter_mag=10)
return nil if empty?
raise Errors::InvalidSaltSize, 'too long' if salt.bytes.length > 255
key, iv = _keyiv_gen(dek, salt, iter_mag)
encryptor = OpenSSL::Cipher::AES256.new(:CBC).encrypt
encryptor.key = key
encryptor.iv = iv
data_packet = _encrypted_data_header_v2(salt, iter_mag) + encryptor.update(self.to_json) + encryptor.final
_append_crc32(data_packet)
end | [
"def",
"encrypt",
"(",
"dek",
",",
"salt",
",",
"iter_mag",
"=",
"10",
")",
"return",
"nil",
"if",
"empty?",
"raise",
"Errors",
"::",
"InvalidSaltSize",
",",
"'too long'",
"if",
"salt",
".",
"bytes",
".",
"length",
">",
"255",
"key",
",",
"iv",
"=",
"_keyiv_gen",
"(",
"dek",
",",
"salt",
",",
"iter_mag",
")",
"encryptor",
"=",
"OpenSSL",
"::",
"Cipher",
"::",
"AES256",
".",
"new",
"(",
":CBC",
")",
".",
"encrypt",
"encryptor",
".",
"key",
"=",
"key",
"encryptor",
".",
"iv",
"=",
"iv",
"data_packet",
"=",
"_encrypted_data_header_v2",
"(",
"salt",
",",
"iter_mag",
")",
"+",
"encryptor",
".",
"update",
"(",
"self",
".",
"to_json",
")",
"+",
"encryptor",
".",
"final",
"_append_crc32",
"(",
"data_packet",
")",
"end"
] | Encrypts the hash using the data encryption key and salt.
Returns a blob:
| Byte 0 | Byte 1 | Byte 2 | Bytes 3...S | Bytes S+1...E | Bytes E+1..E+4 |
------------------------------------------------------------------------------------------------
| Version | Salt Length | Iteration Magnitude | Salt | Encrypted Data | CRC32 | | [
"Encrypts",
"the",
"hash",
"using",
"the",
"data",
"encryption",
"key",
"and",
"salt",
"."
] | 89e78eb19e0cb710b08b71209e42eda085dcaa8a | https://github.com/roberthoner/encrypted_store/blob/89e78eb19e0cb710b08b71209e42eda085dcaa8a/lib/encrypted_store/crypto_hash.rb#L19-L31 | train |
barkerest/incline | lib/incline/user_manager.rb | Incline.UserManager.authenticate | def authenticate(email, password, client_ip)
return nil unless Incline::EmailValidator.valid?(email)
email = email.downcase
# If an engine is registered for the email domain, then use it.
engine = get_auth_engine(email)
if engine
return engine.authenticate(email, password, client_ip)
end
# Otherwise we will be using the database.
user = User.find_by(email: email)
if user
# user must be enabled and the password must match.
unless user.enabled?
add_failure_to user, '(DB) account disabled', client_ip
return nil
end
if user.authenticate(password)
add_success_to user, '(DB)', client_ip
return user
else
add_failure_to user, '(DB) invalid password', client_ip
return nil
end
end
add_failure_to email, 'invalid email', client_ip
nil
end | ruby | def authenticate(email, password, client_ip)
return nil unless Incline::EmailValidator.valid?(email)
email = email.downcase
# If an engine is registered for the email domain, then use it.
engine = get_auth_engine(email)
if engine
return engine.authenticate(email, password, client_ip)
end
# Otherwise we will be using the database.
user = User.find_by(email: email)
if user
# user must be enabled and the password must match.
unless user.enabled?
add_failure_to user, '(DB) account disabled', client_ip
return nil
end
if user.authenticate(password)
add_success_to user, '(DB)', client_ip
return user
else
add_failure_to user, '(DB) invalid password', client_ip
return nil
end
end
add_failure_to email, 'invalid email', client_ip
nil
end | [
"def",
"authenticate",
"(",
"email",
",",
"password",
",",
"client_ip",
")",
"return",
"nil",
"unless",
"Incline",
"::",
"EmailValidator",
".",
"valid?",
"(",
"email",
")",
"email",
"=",
"email",
".",
"downcase",
"engine",
"=",
"get_auth_engine",
"(",
"email",
")",
"if",
"engine",
"return",
"engine",
".",
"authenticate",
"(",
"email",
",",
"password",
",",
"client_ip",
")",
"end",
"user",
"=",
"User",
".",
"find_by",
"(",
"email",
":",
"email",
")",
"if",
"user",
"unless",
"user",
".",
"enabled?",
"add_failure_to",
"user",
",",
"'(DB) account disabled'",
",",
"client_ip",
"return",
"nil",
"end",
"if",
"user",
".",
"authenticate",
"(",
"password",
")",
"add_success_to",
"user",
",",
"'(DB)'",
",",
"client_ip",
"return",
"user",
"else",
"add_failure_to",
"user",
",",
"'(DB) invalid password'",
",",
"client_ip",
"return",
"nil",
"end",
"end",
"add_failure_to",
"email",
",",
"'invalid email'",
",",
"client_ip",
"nil",
"end"
] | Creates a new user manager.
The user manager itself takes no options, however options will be passed to
any registered authentication engines when they are instantiated.
The options can be used to pre-register engines and provide configuration for them.
The engines will have specific configurations, but the UserManager class recognizes
the 'engines' key.
{
:engines => {
'example.com' => {
:engine => MySuperAuthEngine.new(...)
},
'example.org' => {
:engine => 'incline_ldap/auth_engine',
:config => {
:host => 'ldap.example.org',
:port => 636,
:base_dn => 'DC=ldap,DC=example,DC=org'
}
}
}
}
When an 'engines' key is processed, the configuration options for the engines are pulled
from the subkeys. Once the processing of the 'engines' key is complete, it will be removed
from the options hash so any engines registered in the future will not receive the extra options.
Attempts to authenticate the user and returns the model on success. | [
"Creates",
"a",
"new",
"user",
"manager",
"."
] | 1ff08db7aa8ab7f86b223268b700bc67d15bb8aa | https://github.com/barkerest/incline/blob/1ff08db7aa8ab7f86b223268b700bc67d15bb8aa/lib/incline/user_manager.rb#L81-L109 | train |
barkerest/incline | lib/incline/user_manager.rb | Incline.UserManager.begin_external_authentication | def begin_external_authentication(request)
# We don't have an email domain to work from.
# Instead, we'll call each engine's authenticate_external method.
# If one of them returns a user, then we return that value and skip further processing.
auth_engines.each do |dom,engine|
unless engine.nil?
url = engine.begin_external_authentication(request)
return url unless url.blank?
end
end
nil
end | ruby | def begin_external_authentication(request)
# We don't have an email domain to work from.
# Instead, we'll call each engine's authenticate_external method.
# If one of them returns a user, then we return that value and skip further processing.
auth_engines.each do |dom,engine|
unless engine.nil?
url = engine.begin_external_authentication(request)
return url unless url.blank?
end
end
nil
end | [
"def",
"begin_external_authentication",
"(",
"request",
")",
"auth_engines",
".",
"each",
"do",
"|",
"dom",
",",
"engine",
"|",
"unless",
"engine",
".",
"nil?",
"url",
"=",
"engine",
".",
"begin_external_authentication",
"(",
"request",
")",
"return",
"url",
"unless",
"url",
".",
"blank?",
"end",
"end",
"nil",
"end"
] | The begin_external_authentication method takes a request object to determine if it should process a login
or return nil. If it decides to process authentication, it should return a URL to redirect to. | [
"The",
"begin_external_authentication",
"method",
"takes",
"a",
"request",
"object",
"to",
"determine",
"if",
"it",
"should",
"process",
"a",
"login",
"or",
"return",
"nil",
".",
"If",
"it",
"decides",
"to",
"process",
"authentication",
"it",
"should",
"return",
"a",
"URL",
"to",
"redirect",
"to",
"."
] | 1ff08db7aa8ab7f86b223268b700bc67d15bb8aa | https://github.com/barkerest/incline/blob/1ff08db7aa8ab7f86b223268b700bc67d15bb8aa/lib/incline/user_manager.rb#L114-L125 | train |
barkerest/incline | lib/incline/user_manager.rb | Incline.UserManager.register_auth_engine | def register_auth_engine(engine, *domains)
unless engine.nil?
unless engine.is_a?(::Incline::AuthEngineBase)
raise ArgumentError, "The 'engine' parameter must be an instance of an auth engine or a class defining an auth engine." unless engine.is_a?(::Class)
engine = engine.new(@options)
raise ArgumentError, "The 'engine' parameter must be an instance of an auth engine or a class defining an auth engine." unless engine.is_a?(::Incline::AuthEngineBase)
end
end
domains.map do |dom|
dom = dom.to_s.downcase.strip
raise ArgumentError, "The domain #{dom.inspect} does not appear to be a valid domain." unless dom =~ /\A[a-z0-9]+(?:[-.][a-z0-9]+)*\.[a-z]+\Z/
dom
end.each do |dom|
auth_engines[dom] = engine
end
end | ruby | def register_auth_engine(engine, *domains)
unless engine.nil?
unless engine.is_a?(::Incline::AuthEngineBase)
raise ArgumentError, "The 'engine' parameter must be an instance of an auth engine or a class defining an auth engine." unless engine.is_a?(::Class)
engine = engine.new(@options)
raise ArgumentError, "The 'engine' parameter must be an instance of an auth engine or a class defining an auth engine." unless engine.is_a?(::Incline::AuthEngineBase)
end
end
domains.map do |dom|
dom = dom.to_s.downcase.strip
raise ArgumentError, "The domain #{dom.inspect} does not appear to be a valid domain." unless dom =~ /\A[a-z0-9]+(?:[-.][a-z0-9]+)*\.[a-z]+\Z/
dom
end.each do |dom|
auth_engines[dom] = engine
end
end | [
"def",
"register_auth_engine",
"(",
"engine",
",",
"*",
"domains",
")",
"unless",
"engine",
".",
"nil?",
"unless",
"engine",
".",
"is_a?",
"(",
"::",
"Incline",
"::",
"AuthEngineBase",
")",
"raise",
"ArgumentError",
",",
"\"The 'engine' parameter must be an instance of an auth engine or a class defining an auth engine.\"",
"unless",
"engine",
".",
"is_a?",
"(",
"::",
"Class",
")",
"engine",
"=",
"engine",
".",
"new",
"(",
"@options",
")",
"raise",
"ArgumentError",
",",
"\"The 'engine' parameter must be an instance of an auth engine or a class defining an auth engine.\"",
"unless",
"engine",
".",
"is_a?",
"(",
"::",
"Incline",
"::",
"AuthEngineBase",
")",
"end",
"end",
"domains",
".",
"map",
"do",
"|",
"dom",
"|",
"dom",
"=",
"dom",
".",
"to_s",
".",
"downcase",
".",
"strip",
"raise",
"ArgumentError",
",",
"\"The domain #{dom.inspect} does not appear to be a valid domain.\"",
"unless",
"dom",
"=~",
"/",
"\\A",
"\\.",
"\\Z",
"/",
"dom",
"end",
".",
"each",
"do",
"|",
"dom",
"|",
"auth_engines",
"[",
"dom",
"]",
"=",
"engine",
"end",
"end"
] | Registers an authentication engine for one or more domains.
The +engine+ passed in should take an options hash as the only argument to +initialize+
and should provide an +authenticate+ method that takes the +email+, +password+, and
+client_ip+. You can optionally define an +authenticate_external+ method that takes the
current +request+ as the only parameter.
The +authenticate+ method of the engine should return an Incline::User object on success or nil on failure.
The +begin_external_authentication+ method of the engine should return a URL to redirect to on success
or nil on failure.
class MyAuthEngine
def initialize(options = {})
...
end
def authenticate(email, password, client_ip)
...
end
def begin_external_authentication(request)
...
end
end
Incline::UserManager.register_auth_engine(MyAuthEngine, 'example.com', 'example.net', 'example.org') | [
"Registers",
"an",
"authentication",
"engine",
"for",
"one",
"or",
"more",
"domains",
"."
] | 1ff08db7aa8ab7f86b223268b700bc67d15bb8aa | https://github.com/barkerest/incline/blob/1ff08db7aa8ab7f86b223268b700bc67d15bb8aa/lib/incline/user_manager.rb#L189-L204 | train |
jomalley2112/controller_scaffolding | lib/generators/controller_generator_base.rb | Generators.ControllerGeneratorBase.copy_view_files | def copy_view_files #do NOT change the name of this method
# it must be overriding an existing one in a parent class
base_path = File.join("app/views", class_path, file_name)
#binding.pry
empty_directory base_path
@actions = actions.nil? || actions.empty? ? %w(index new create edit update destroy) : actions
@attr_cols = GeneratorUtils::attr_cols(table_name)
@col_count = @attr_cols.count
@col_count += 1 if @actions.include?("edit")
@col_count += 1 if @actions.include?("destroy")
@search_sort = options.search_sort?
(@actions - %w(create update destroy)).each do |action|
@action = action
formats.each do |format|
@path = File.join(base_path, filename_with_extensions(action, format))
set_template(@action, @path)
end
end
end | ruby | def copy_view_files #do NOT change the name of this method
# it must be overriding an existing one in a parent class
base_path = File.join("app/views", class_path, file_name)
#binding.pry
empty_directory base_path
@actions = actions.nil? || actions.empty? ? %w(index new create edit update destroy) : actions
@attr_cols = GeneratorUtils::attr_cols(table_name)
@col_count = @attr_cols.count
@col_count += 1 if @actions.include?("edit")
@col_count += 1 if @actions.include?("destroy")
@search_sort = options.search_sort?
(@actions - %w(create update destroy)).each do |action|
@action = action
formats.each do |format|
@path = File.join(base_path, filename_with_extensions(action, format))
set_template(@action, @path)
end
end
end | [
"def",
"copy_view_files",
"base_path",
"=",
"File",
".",
"join",
"(",
"\"app/views\"",
",",
"class_path",
",",
"file_name",
")",
"empty_directory",
"base_path",
"@actions",
"=",
"actions",
".",
"nil?",
"||",
"actions",
".",
"empty?",
"?",
"%w(",
"index",
"new",
"create",
"edit",
"update",
"destroy",
")",
":",
"actions",
"@attr_cols",
"=",
"GeneratorUtils",
"::",
"attr_cols",
"(",
"table_name",
")",
"@col_count",
"=",
"@attr_cols",
".",
"count",
"@col_count",
"+=",
"1",
"if",
"@actions",
".",
"include?",
"(",
"\"edit\"",
")",
"@col_count",
"+=",
"1",
"if",
"@actions",
".",
"include?",
"(",
"\"destroy\"",
")",
"@search_sort",
"=",
"options",
".",
"search_sort?",
"(",
"@actions",
"-",
"%w(",
"create",
"update",
"destroy",
")",
")",
".",
"each",
"do",
"|",
"action",
"|",
"@action",
"=",
"action",
"formats",
".",
"each",
"do",
"|",
"format",
"|",
"@path",
"=",
"File",
".",
"join",
"(",
"base_path",
",",
"filename_with_extensions",
"(",
"action",
",",
"format",
")",
")",
"set_template",
"(",
"@action",
",",
"@path",
")",
"end",
"end",
"end"
] | This method seems to always get run first | [
"This",
"method",
"seems",
"to",
"always",
"get",
"run",
"first"
] | 380d37962fa84d0911e86fe01a8bca158c0b6b10 | https://github.com/jomalley2112/controller_scaffolding/blob/380d37962fa84d0911e86fe01a8bca158c0b6b10/lib/generators/controller_generator_base.rb#L15-L33 | train |
hopsoft/footing | lib/footing/hash.rb | Footing.Hash.to_h | def to_h
copied_object.each_with_object({}) do |pair, memo|
value = pair.last
if value.is_a?(Footing::Hash)
memo[pair.first] = value.to_h
elsif value.is_a?(::Array)
memo[pair.first] = value.map do |val|
if val.is_a?(Footing::Hash)
val.to_h
else
val
end
end
else
memo[pair.first] = value
end
end
end | ruby | def to_h
copied_object.each_with_object({}) do |pair, memo|
value = pair.last
if value.is_a?(Footing::Hash)
memo[pair.first] = value.to_h
elsif value.is_a?(::Array)
memo[pair.first] = value.map do |val|
if val.is_a?(Footing::Hash)
val.to_h
else
val
end
end
else
memo[pair.first] = value
end
end
end | [
"def",
"to_h",
"copied_object",
".",
"each_with_object",
"(",
"{",
"}",
")",
"do",
"|",
"pair",
",",
"memo",
"|",
"value",
"=",
"pair",
".",
"last",
"if",
"value",
".",
"is_a?",
"(",
"Footing",
"::",
"Hash",
")",
"memo",
"[",
"pair",
".",
"first",
"]",
"=",
"value",
".",
"to_h",
"elsif",
"value",
".",
"is_a?",
"(",
"::",
"Array",
")",
"memo",
"[",
"pair",
".",
"first",
"]",
"=",
"value",
".",
"map",
"do",
"|",
"val",
"|",
"if",
"val",
".",
"is_a?",
"(",
"Footing",
"::",
"Hash",
")",
"val",
".",
"to_h",
"else",
"val",
"end",
"end",
"else",
"memo",
"[",
"pair",
".",
"first",
"]",
"=",
"value",
"end",
"end",
"end"
] | Returns a standard ruby Hash representation of the wrapped Hash.
@return [Hash] | [
"Returns",
"a",
"standard",
"ruby",
"Hash",
"representation",
"of",
"the",
"wrapped",
"Hash",
"."
] | fa37cbde4a75b774f65c3367245c41a8607fe67a | https://github.com/hopsoft/footing/blob/fa37cbde4a75b774f65c3367245c41a8607fe67a/lib/footing/hash.rb#L18-L35 | train |
eyecuelab/smarteru | lib/smarteru/client.rb | Smarteru.Client.request | def request(operation, data)
opts = {
method: :post,
url: api_url,
payload: { 'Package' => body(operation, data) },
content_type: :xml,
verify_ssl: verify_ssl,
ssl_ca_file: ssl_ca_file }
response = RestClient::Request.execute(opts)
response = Response.new(response)
if !response.success? && fail_on_error
fail Error.new(response)
end
response
end | ruby | def request(operation, data)
opts = {
method: :post,
url: api_url,
payload: { 'Package' => body(operation, data) },
content_type: :xml,
verify_ssl: verify_ssl,
ssl_ca_file: ssl_ca_file }
response = RestClient::Request.execute(opts)
response = Response.new(response)
if !response.success? && fail_on_error
fail Error.new(response)
end
response
end | [
"def",
"request",
"(",
"operation",
",",
"data",
")",
"opts",
"=",
"{",
"method",
":",
":post",
",",
"url",
":",
"api_url",
",",
"payload",
":",
"{",
"'Package'",
"=>",
"body",
"(",
"operation",
",",
"data",
")",
"}",
",",
"content_type",
":",
":xml",
",",
"verify_ssl",
":",
"verify_ssl",
",",
"ssl_ca_file",
":",
"ssl_ca_file",
"}",
"response",
"=",
"RestClient",
"::",
"Request",
".",
"execute",
"(",
"opts",
")",
"response",
"=",
"Response",
".",
"new",
"(",
"response",
")",
"if",
"!",
"response",
".",
"success?",
"&&",
"fail_on_error",
"fail",
"Error",
".",
"new",
"(",
"response",
")",
"end",
"response",
"end"
] | Create an instance of an API client
==== Attributes
* +options+ - Access credentials and options hash, required keys are: account_api_key, user_api_key
==== Example
client = Smarteru::Client.new({account_api_key: 'abc', user_api_key: 'abc'})
Make an API request
==== Attributes
* +operation+ - Operation method eg getGroup
* +data+ - Data hash
==== Example
client.request("getGroup", {
group: {
name: 'MyGroup'
}
}) | [
"Create",
"an",
"instance",
"of",
"an",
"API",
"client"
] | a5b1c92f2d938d29b032520dcfdb9eb7be5fa020 | https://github.com/eyecuelab/smarteru/blob/a5b1c92f2d938d29b032520dcfdb9eb7be5fa020/lib/smarteru/client.rb#L33-L49 | train |
eyecuelab/smarteru | lib/smarteru/client.rb | Smarteru.Client.body_parameters | def body_parameters(parameters)
parameters_xml = ''
parameters.each_pair do |k, v|
key = parameter_key(k)
val = case v
when Hash
body_parameters(v)
when Array
v.map { |i| body_parameters(i) }.join('')
when nil
''
else
"<![CDATA[#{v}]]>"
end
parameters_xml << "<#{key}>#{val}</#{key}>"
end
parameters_xml
end | ruby | def body_parameters(parameters)
parameters_xml = ''
parameters.each_pair do |k, v|
key = parameter_key(k)
val = case v
when Hash
body_parameters(v)
when Array
v.map { |i| body_parameters(i) }.join('')
when nil
''
else
"<![CDATA[#{v}]]>"
end
parameters_xml << "<#{key}>#{val}</#{key}>"
end
parameters_xml
end | [
"def",
"body_parameters",
"(",
"parameters",
")",
"parameters_xml",
"=",
"''",
"parameters",
".",
"each_pair",
"do",
"|",
"k",
",",
"v",
"|",
"key",
"=",
"parameter_key",
"(",
"k",
")",
"val",
"=",
"case",
"v",
"when",
"Hash",
"body_parameters",
"(",
"v",
")",
"when",
"Array",
"v",
".",
"map",
"{",
"|",
"i",
"|",
"body_parameters",
"(",
"i",
")",
"}",
".",
"join",
"(",
"''",
")",
"when",
"nil",
"''",
"else",
"\"<![CDATA[#{v}]]>\"",
"end",
"parameters_xml",
"<<",
"\"<#{key}>#{val}</#{key}>\"",
"end",
"parameters_xml",
"end"
] | Build body parameteres xml
==== Attributes
* +parameters+ - Parameters hash | [
"Build",
"body",
"parameteres",
"xml"
] | a5b1c92f2d938d29b032520dcfdb9eb7be5fa020 | https://github.com/eyecuelab/smarteru/blob/a5b1c92f2d938d29b032520dcfdb9eb7be5fa020/lib/smarteru/client.rb#L78-L98 | train |
eyecuelab/smarteru | lib/smarteru/client.rb | Smarteru.Client.parameter_key | def parameter_key(term)
string = term.to_s
string = string.sub(/^[a-z\d]*/) { $&.capitalize }
string.gsub!(/(?:_|(\/))([a-z\d]*)/i) { "#{$1}#{$2.capitalize}" }
string
end | ruby | def parameter_key(term)
string = term.to_s
string = string.sub(/^[a-z\d]*/) { $&.capitalize }
string.gsub!(/(?:_|(\/))([a-z\d]*)/i) { "#{$1}#{$2.capitalize}" }
string
end | [
"def",
"parameter_key",
"(",
"term",
")",
"string",
"=",
"term",
".",
"to_s",
"string",
"=",
"string",
".",
"sub",
"(",
"/",
"\\d",
"/",
")",
"{",
"$&",
".",
"capitalize",
"}",
"string",
".",
"gsub!",
"(",
"/",
"\\/",
"\\d",
"/i",
")",
"{",
"\"#{$1}#{$2.capitalize}\"",
"}",
"string",
"end"
] | Prepare parameter key
==== Attributes
* +parameters+ - Parameters hash | [
"Prepare",
"parameter",
"key"
] | a5b1c92f2d938d29b032520dcfdb9eb7be5fa020 | https://github.com/eyecuelab/smarteru/blob/a5b1c92f2d938d29b032520dcfdb9eb7be5fa020/lib/smarteru/client.rb#L104-L109 | train |
jns/Aims | lib/aims/zinc_blende.rb | Aims.ZincBlende.get_bulk | def get_bulk
b = 0.25*self.lattice_const
a1 = Atom.new(0, 0, 0, self.cation)
a2 = Atom.new(b, b, b, self.anion)
v1 = Vector[0.5, 0.5, 0.0]*self.lattice_const
v2 = Vector[0.5, 0.0, 0.5]*self.lattice_const
v3 = Vector[0.0, 0.5, 0.5]*self.lattice_const
zb = Geometry.new([a1, a2], [v1, v2, v3])
millerx = [1, 0, 0]
millery = [0, 1, 0]
millerz = [0, 0, 1]
zb.set_miller_indices(millerx, millery, millerz)
return zb
end | ruby | def get_bulk
b = 0.25*self.lattice_const
a1 = Atom.new(0, 0, 0, self.cation)
a2 = Atom.new(b, b, b, self.anion)
v1 = Vector[0.5, 0.5, 0.0]*self.lattice_const
v2 = Vector[0.5, 0.0, 0.5]*self.lattice_const
v3 = Vector[0.0, 0.5, 0.5]*self.lattice_const
zb = Geometry.new([a1, a2], [v1, v2, v3])
millerx = [1, 0, 0]
millery = [0, 1, 0]
millerz = [0, 0, 1]
zb.set_miller_indices(millerx, millery, millerz)
return zb
end | [
"def",
"get_bulk",
"b",
"=",
"0.25",
"*",
"self",
".",
"lattice_const",
"a1",
"=",
"Atom",
".",
"new",
"(",
"0",
",",
"0",
",",
"0",
",",
"self",
".",
"cation",
")",
"a2",
"=",
"Atom",
".",
"new",
"(",
"b",
",",
"b",
",",
"b",
",",
"self",
".",
"anion",
")",
"v1",
"=",
"Vector",
"[",
"0.5",
",",
"0.5",
",",
"0.0",
"]",
"*",
"self",
".",
"lattice_const",
"v2",
"=",
"Vector",
"[",
"0.5",
",",
"0.0",
",",
"0.5",
"]",
"*",
"self",
".",
"lattice_const",
"v3",
"=",
"Vector",
"[",
"0.0",
",",
"0.5",
",",
"0.5",
"]",
"*",
"self",
".",
"lattice_const",
"zb",
"=",
"Geometry",
".",
"new",
"(",
"[",
"a1",
",",
"a2",
"]",
",",
"[",
"v1",
",",
"v2",
",",
"v3",
"]",
")",
"millerx",
"=",
"[",
"1",
",",
"0",
",",
"0",
"]",
"millery",
"=",
"[",
"0",
",",
"1",
",",
"0",
"]",
"millerz",
"=",
"[",
"0",
",",
"0",
",",
"1",
"]",
"zb",
".",
"set_miller_indices",
"(",
"millerx",
",",
"millery",
",",
"millerz",
")",
"return",
"zb",
"end"
] | Initialize the zinc-blende Geometry
cation and anion are the atomic
species occupying the two different sub-lattices.
lattice_const specifies the lattice constant
Return the traditional unit cell of bulk zinc blende | [
"Initialize",
"the",
"zinc",
"-",
"blende",
"Geometry",
"cation",
"and",
"anion",
"are",
"the",
"atomic",
"species",
"occupying",
"the",
"two",
"different",
"sub",
"-",
"lattices",
".",
"lattice_const",
"specifies",
"the",
"lattice",
"constant",
"Return",
"the",
"traditional",
"unit",
"cell",
"of",
"bulk",
"zinc",
"blende"
] | 2dcb6c02cd05b2d0c8ab72be4e85d60375df296c | https://github.com/jns/Aims/blob/2dcb6c02cd05b2d0c8ab72be4e85d60375df296c/lib/aims/zinc_blende.rb#L29-L46 | train |
jns/Aims | lib/aims/zinc_blende.rb | Aims.ZincBlende.fill_volume | def fill_volume(volume)
# First fill a cube that bounds the volume
max = volume.max_point
min = volume.min_point
dx = max[0] - min[0]
dy = max[1] - min[1]
dz = max[2] - min[2]
bulk = get_bulk
# This inverse matrix gives the number of repetitions
m = Matrix[[dx,0,0], [0,dy,0], [0,0,dz]]
v = Matrix[bulk.lattice_vectors[0].to_a,
bulk.lattice_vectors[1].to_a,
bulk.lattice_vectors[2].to_a]
rep_mat = m*(v.inverse)
# The only way I can figure out how to do this for an
# arbitrary set of lattice vectors is to fill the volume
# out along each edge of the super-cube and then eliminate duplicates
atoms = []
3.times do |i|
# this vector is the number of repetitions in the unit cell
# to fill the volume out along the i-th edge of the super-cube
n_repeat = rep_mat.row(i)
# Give the proper sign to the repeat
nx = (n_repeat[0] < 0) ? n_repeat[0].floor-1 : n_repeat[0].ceil+1
ny = (n_repeat[1] < 0) ? n_repeat[1].floor-1 : n_repeat[1].ceil+1
nz = (n_repeat[2] < 0) ? n_repeat[2].floor-1 : n_repeat[2].ceil+1
atoms += bulk.repeat(nx, ny, nz).atoms.find_all{|a| volume.contains_point(a.x, a.y, a.z)}
end
Geometry.new(atoms.uniq)
end | ruby | def fill_volume(volume)
# First fill a cube that bounds the volume
max = volume.max_point
min = volume.min_point
dx = max[0] - min[0]
dy = max[1] - min[1]
dz = max[2] - min[2]
bulk = get_bulk
# This inverse matrix gives the number of repetitions
m = Matrix[[dx,0,0], [0,dy,0], [0,0,dz]]
v = Matrix[bulk.lattice_vectors[0].to_a,
bulk.lattice_vectors[1].to_a,
bulk.lattice_vectors[2].to_a]
rep_mat = m*(v.inverse)
# The only way I can figure out how to do this for an
# arbitrary set of lattice vectors is to fill the volume
# out along each edge of the super-cube and then eliminate duplicates
atoms = []
3.times do |i|
# this vector is the number of repetitions in the unit cell
# to fill the volume out along the i-th edge of the super-cube
n_repeat = rep_mat.row(i)
# Give the proper sign to the repeat
nx = (n_repeat[0] < 0) ? n_repeat[0].floor-1 : n_repeat[0].ceil+1
ny = (n_repeat[1] < 0) ? n_repeat[1].floor-1 : n_repeat[1].ceil+1
nz = (n_repeat[2] < 0) ? n_repeat[2].floor-1 : n_repeat[2].ceil+1
atoms += bulk.repeat(nx, ny, nz).atoms.find_all{|a| volume.contains_point(a.x, a.y, a.z)}
end
Geometry.new(atoms.uniq)
end | [
"def",
"fill_volume",
"(",
"volume",
")",
"max",
"=",
"volume",
".",
"max_point",
"min",
"=",
"volume",
".",
"min_point",
"dx",
"=",
"max",
"[",
"0",
"]",
"-",
"min",
"[",
"0",
"]",
"dy",
"=",
"max",
"[",
"1",
"]",
"-",
"min",
"[",
"1",
"]",
"dz",
"=",
"max",
"[",
"2",
"]",
"-",
"min",
"[",
"2",
"]",
"bulk",
"=",
"get_bulk",
"m",
"=",
"Matrix",
"[",
"[",
"dx",
",",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"dy",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
",",
"dz",
"]",
"]",
"v",
"=",
"Matrix",
"[",
"bulk",
".",
"lattice_vectors",
"[",
"0",
"]",
".",
"to_a",
",",
"bulk",
".",
"lattice_vectors",
"[",
"1",
"]",
".",
"to_a",
",",
"bulk",
".",
"lattice_vectors",
"[",
"2",
"]",
".",
"to_a",
"]",
"rep_mat",
"=",
"m",
"*",
"(",
"v",
".",
"inverse",
")",
"atoms",
"=",
"[",
"]",
"3",
".",
"times",
"do",
"|",
"i",
"|",
"n_repeat",
"=",
"rep_mat",
".",
"row",
"(",
"i",
")",
"nx",
"=",
"(",
"n_repeat",
"[",
"0",
"]",
"<",
"0",
")",
"?",
"n_repeat",
"[",
"0",
"]",
".",
"floor",
"-",
"1",
":",
"n_repeat",
"[",
"0",
"]",
".",
"ceil",
"+",
"1",
"ny",
"=",
"(",
"n_repeat",
"[",
"1",
"]",
"<",
"0",
")",
"?",
"n_repeat",
"[",
"1",
"]",
".",
"floor",
"-",
"1",
":",
"n_repeat",
"[",
"1",
"]",
".",
"ceil",
"+",
"1",
"nz",
"=",
"(",
"n_repeat",
"[",
"2",
"]",
"<",
"0",
")",
"?",
"n_repeat",
"[",
"2",
"]",
".",
"floor",
"-",
"1",
":",
"n_repeat",
"[",
"2",
"]",
".",
"ceil",
"+",
"1",
"atoms",
"+=",
"bulk",
".",
"repeat",
"(",
"nx",
",",
"ny",
",",
"nz",
")",
".",
"atoms",
".",
"find_all",
"{",
"|",
"a",
"|",
"volume",
".",
"contains_point",
"(",
"a",
".",
"x",
",",
"a",
".",
"y",
",",
"a",
".",
"z",
")",
"}",
"end",
"Geometry",
".",
"new",
"(",
"atoms",
".",
"uniq",
")",
"end"
] | Fill the given volume with atoms | [
"Fill",
"the",
"given",
"volume",
"with",
"atoms"
] | 2dcb6c02cd05b2d0c8ab72be4e85d60375df296c | https://github.com/jns/Aims/blob/2dcb6c02cd05b2d0c8ab72be4e85d60375df296c/lib/aims/zinc_blende.rb#L49-L85 | train |
jns/Aims | lib/aims/zinc_blende.rb | Aims.ZincBlende.get_001_surface | def get_001_surface(monolayers, vacuum, constrain_layers = 0)
anion = Atom.new(0,0,0,self.cation)
cation = Atom.new(0.25*self.lattice_const, 0.25*self.lattice_const, 0.25*self.lattice_const, self.anion)
v1 = Vector[0.5, 0.5, 0]*self.lattice_const
v2 = Vector[-0.5,0.5,0]*self.lattice_const
v3 = Vector[0.5, 0, 0.5]*self.lattice_const
zb = Geometry.new([anion, cation], [v1,v2,v3])
millerX = [1,0,0]
millerY = [0,1,0]
millerZ = [0,0,1]
zb.set_miller_indices(millerX, millerY, millerZ)
# Repeat the unit cell. The unit cell is a bi-layer so divide by 2
zb = zb.repeat(1,1,(monolayers/2).ceil)
if 0 < vacuum
# Add vacuum
monolayerSep = v3[2]/2
zb.lattice_vectors[2] = Vector[0, 0, (monolayers-1)*monolayerSep.abs + vacuum.to_f]
# Move everything into a nice tidy unit cell.
zb = zb.correct
end
minZ = zb.atoms.min{|a,b| a.z <=> b.z}.z
# Reject the top layer of atoms if an odd number of monolayers was requested.
# This is necessary because the primitive cell is a bilayer
zb.atoms.reject! {|a|
a.z >= (minZ + monolayerSep.abs*monolayers)
}
# Constrain the bottom layers
zb.atoms.each{|a|
if (a.z < minZ + monolayerSep.abs*constrain_layers)
a.constrain = ".true."
end
}
# Return the completed unit cell
return zb
end | ruby | def get_001_surface(monolayers, vacuum, constrain_layers = 0)
anion = Atom.new(0,0,0,self.cation)
cation = Atom.new(0.25*self.lattice_const, 0.25*self.lattice_const, 0.25*self.lattice_const, self.anion)
v1 = Vector[0.5, 0.5, 0]*self.lattice_const
v2 = Vector[-0.5,0.5,0]*self.lattice_const
v3 = Vector[0.5, 0, 0.5]*self.lattice_const
zb = Geometry.new([anion, cation], [v1,v2,v3])
millerX = [1,0,0]
millerY = [0,1,0]
millerZ = [0,0,1]
zb.set_miller_indices(millerX, millerY, millerZ)
# Repeat the unit cell. The unit cell is a bi-layer so divide by 2
zb = zb.repeat(1,1,(monolayers/2).ceil)
if 0 < vacuum
# Add vacuum
monolayerSep = v3[2]/2
zb.lattice_vectors[2] = Vector[0, 0, (monolayers-1)*monolayerSep.abs + vacuum.to_f]
# Move everything into a nice tidy unit cell.
zb = zb.correct
end
minZ = zb.atoms.min{|a,b| a.z <=> b.z}.z
# Reject the top layer of atoms if an odd number of monolayers was requested.
# This is necessary because the primitive cell is a bilayer
zb.atoms.reject! {|a|
a.z >= (minZ + monolayerSep.abs*monolayers)
}
# Constrain the bottom layers
zb.atoms.each{|a|
if (a.z < minZ + monolayerSep.abs*constrain_layers)
a.constrain = ".true."
end
}
# Return the completed unit cell
return zb
end | [
"def",
"get_001_surface",
"(",
"monolayers",
",",
"vacuum",
",",
"constrain_layers",
"=",
"0",
")",
"anion",
"=",
"Atom",
".",
"new",
"(",
"0",
",",
"0",
",",
"0",
",",
"self",
".",
"cation",
")",
"cation",
"=",
"Atom",
".",
"new",
"(",
"0.25",
"*",
"self",
".",
"lattice_const",
",",
"0.25",
"*",
"self",
".",
"lattice_const",
",",
"0.25",
"*",
"self",
".",
"lattice_const",
",",
"self",
".",
"anion",
")",
"v1",
"=",
"Vector",
"[",
"0.5",
",",
"0.5",
",",
"0",
"]",
"*",
"self",
".",
"lattice_const",
"v2",
"=",
"Vector",
"[",
"-",
"0.5",
",",
"0.5",
",",
"0",
"]",
"*",
"self",
".",
"lattice_const",
"v3",
"=",
"Vector",
"[",
"0.5",
",",
"0",
",",
"0.5",
"]",
"*",
"self",
".",
"lattice_const",
"zb",
"=",
"Geometry",
".",
"new",
"(",
"[",
"anion",
",",
"cation",
"]",
",",
"[",
"v1",
",",
"v2",
",",
"v3",
"]",
")",
"millerX",
"=",
"[",
"1",
",",
"0",
",",
"0",
"]",
"millerY",
"=",
"[",
"0",
",",
"1",
",",
"0",
"]",
"millerZ",
"=",
"[",
"0",
",",
"0",
",",
"1",
"]",
"zb",
".",
"set_miller_indices",
"(",
"millerX",
",",
"millerY",
",",
"millerZ",
")",
"zb",
"=",
"zb",
".",
"repeat",
"(",
"1",
",",
"1",
",",
"(",
"monolayers",
"/",
"2",
")",
".",
"ceil",
")",
"if",
"0",
"<",
"vacuum",
"monolayerSep",
"=",
"v3",
"[",
"2",
"]",
"/",
"2",
"zb",
".",
"lattice_vectors",
"[",
"2",
"]",
"=",
"Vector",
"[",
"0",
",",
"0",
",",
"(",
"monolayers",
"-",
"1",
")",
"*",
"monolayerSep",
".",
"abs",
"+",
"vacuum",
".",
"to_f",
"]",
"zb",
"=",
"zb",
".",
"correct",
"end",
"minZ",
"=",
"zb",
".",
"atoms",
".",
"min",
"{",
"|",
"a",
",",
"b",
"|",
"a",
".",
"z",
"<=>",
"b",
".",
"z",
"}",
".",
"z",
"zb",
".",
"atoms",
".",
"reject!",
"{",
"|",
"a",
"|",
"a",
".",
"z",
">=",
"(",
"minZ",
"+",
"monolayerSep",
".",
"abs",
"*",
"monolayers",
")",
"}",
"zb",
".",
"atoms",
".",
"each",
"{",
"|",
"a",
"|",
"if",
"(",
"a",
".",
"z",
"<",
"minZ",
"+",
"monolayerSep",
".",
"abs",
"*",
"constrain_layers",
")",
"a",
".",
"constrain",
"=",
"\".true.\"",
"end",
"}",
"return",
"zb",
"end"
] | Return a unit cell for a slab of 001
Specify the number of atomic monolayers,
the vacuum thickness in angstrom,
and the number of layers to constrain at the base of the slab | [
"Return",
"a",
"unit",
"cell",
"for",
"a",
"slab",
"of",
"001",
"Specify",
"the",
"number",
"of",
"atomic",
"monolayers",
"the",
"vacuum",
"thickness",
"in",
"angstrom",
"and",
"the",
"number",
"of",
"layers",
"to",
"constrain",
"at",
"the",
"base",
"of",
"the",
"slab"
] | 2dcb6c02cd05b2d0c8ab72be4e85d60375df296c | https://github.com/jns/Aims/blob/2dcb6c02cd05b2d0c8ab72be4e85d60375df296c/lib/aims/zinc_blende.rb#L91-L132 | train |
jns/Aims | lib/aims/zinc_blende.rb | Aims.ZincBlende.get_111_surface | def get_111_surface(dir, monolayers, vacuum, constrain_layers = 0)
if dir == "A"
top_atom = self.anion
bot_atom = self.cation
elsif dir == "B"
top_atom = self.cation
bot_atom = self.anion
else
raise "Direction must be either A or B"
end
# The atoms on a FCC
as1 = Atom.new(0.0, 0.0, 0.0, top_atom)
ga1 = Atom.new(0.0, 0.0, -sqrt(3)/4*self.lattice_const, bot_atom)
# The lattice Vectors
v1 = Vector[0.5*sqrt(2), 0.0, 0.0]*self.lattice_const
v2 = Vector[sqrt(2)*0.25, sqrt(6)*0.25, 0.0]*self.lattice_const
v3 = Vector[sqrt(2)*0.25, sqrt(2.0/3.0)*0.25, -1*sqrt(4.0/3.0)*0.5]*self.lattice_const
# The unit cell
zb = Geometry.new([as1, ga1], [v1, v2, v3])
# The Miller Indices
millerX = [-1, 1, 0] # Orientation of the crystal pointing in the cartesian +x axis
millerY = [1, 1, -2] # Orientation of the crystal pointing in the cartesian +y axis
millerZ = [-1, -1, -1] # Orientation of the crystal pointing in the cartesian +z axis
zb.set_miller_indices(millerX, millerY, millerZ)
# Repeat the unit cell and add vacuum
if 0 < vacuum
# We actually repeat the unit cell monolayers+1 times because
# I will strip off the top and bottom atoms to make the proper surface
zb = zb.repeat(1,1,monolayers+1)
bilayerSep = v3[2]
zb.lattice_vectors[2] = Vector[0, 0, (monolayers-1)*(bilayerSep.abs) + vacuum]
# Strip off the top and bottom atom
minZ = zb.atoms.min{|a,b| a.z <=> b.z}.z
maxZ = zb.atoms.max{|a,b| a.z <=> b.z}.z
zb.atoms.reject!{|a| a.z == maxZ}
zb.atoms.reject!{|a| a.z == minZ}
# Constrain the bottom layers if requested
if 0 < constrain_layers
# get the min again because we removed the atoms at minZ above
minZ = zb.atoms.min{|a,b| a.z <=> b.z}.z
constrain_below = minZ + bilayerSep.abs*constrain_layers
zb.atoms.each{|a|
if (a.z < constrain_below)
a.constrain = ".true."
end
}
end
end
zb
end | ruby | def get_111_surface(dir, monolayers, vacuum, constrain_layers = 0)
if dir == "A"
top_atom = self.anion
bot_atom = self.cation
elsif dir == "B"
top_atom = self.cation
bot_atom = self.anion
else
raise "Direction must be either A or B"
end
# The atoms on a FCC
as1 = Atom.new(0.0, 0.0, 0.0, top_atom)
ga1 = Atom.new(0.0, 0.0, -sqrt(3)/4*self.lattice_const, bot_atom)
# The lattice Vectors
v1 = Vector[0.5*sqrt(2), 0.0, 0.0]*self.lattice_const
v2 = Vector[sqrt(2)*0.25, sqrt(6)*0.25, 0.0]*self.lattice_const
v3 = Vector[sqrt(2)*0.25, sqrt(2.0/3.0)*0.25, -1*sqrt(4.0/3.0)*0.5]*self.lattice_const
# The unit cell
zb = Geometry.new([as1, ga1], [v1, v2, v3])
# The Miller Indices
millerX = [-1, 1, 0] # Orientation of the crystal pointing in the cartesian +x axis
millerY = [1, 1, -2] # Orientation of the crystal pointing in the cartesian +y axis
millerZ = [-1, -1, -1] # Orientation of the crystal pointing in the cartesian +z axis
zb.set_miller_indices(millerX, millerY, millerZ)
# Repeat the unit cell and add vacuum
if 0 < vacuum
# We actually repeat the unit cell monolayers+1 times because
# I will strip off the top and bottom atoms to make the proper surface
zb = zb.repeat(1,1,monolayers+1)
bilayerSep = v3[2]
zb.lattice_vectors[2] = Vector[0, 0, (monolayers-1)*(bilayerSep.abs) + vacuum]
# Strip off the top and bottom atom
minZ = zb.atoms.min{|a,b| a.z <=> b.z}.z
maxZ = zb.atoms.max{|a,b| a.z <=> b.z}.z
zb.atoms.reject!{|a| a.z == maxZ}
zb.atoms.reject!{|a| a.z == minZ}
# Constrain the bottom layers if requested
if 0 < constrain_layers
# get the min again because we removed the atoms at minZ above
minZ = zb.atoms.min{|a,b| a.z <=> b.z}.z
constrain_below = minZ + bilayerSep.abs*constrain_layers
zb.atoms.each{|a|
if (a.z < constrain_below)
a.constrain = ".true."
end
}
end
end
zb
end | [
"def",
"get_111_surface",
"(",
"dir",
",",
"monolayers",
",",
"vacuum",
",",
"constrain_layers",
"=",
"0",
")",
"if",
"dir",
"==",
"\"A\"",
"top_atom",
"=",
"self",
".",
"anion",
"bot_atom",
"=",
"self",
".",
"cation",
"elsif",
"dir",
"==",
"\"B\"",
"top_atom",
"=",
"self",
".",
"cation",
"bot_atom",
"=",
"self",
".",
"anion",
"else",
"raise",
"\"Direction must be either A or B\"",
"end",
"as1",
"=",
"Atom",
".",
"new",
"(",
"0.0",
",",
"0.0",
",",
"0.0",
",",
"top_atom",
")",
"ga1",
"=",
"Atom",
".",
"new",
"(",
"0.0",
",",
"0.0",
",",
"-",
"sqrt",
"(",
"3",
")",
"/",
"4",
"*",
"self",
".",
"lattice_const",
",",
"bot_atom",
")",
"v1",
"=",
"Vector",
"[",
"0.5",
"*",
"sqrt",
"(",
"2",
")",
",",
"0.0",
",",
"0.0",
"]",
"*",
"self",
".",
"lattice_const",
"v2",
"=",
"Vector",
"[",
"sqrt",
"(",
"2",
")",
"*",
"0.25",
",",
"sqrt",
"(",
"6",
")",
"*",
"0.25",
",",
"0.0",
"]",
"*",
"self",
".",
"lattice_const",
"v3",
"=",
"Vector",
"[",
"sqrt",
"(",
"2",
")",
"*",
"0.25",
",",
"sqrt",
"(",
"2.0",
"/",
"3.0",
")",
"*",
"0.25",
",",
"-",
"1",
"*",
"sqrt",
"(",
"4.0",
"/",
"3.0",
")",
"*",
"0.5",
"]",
"*",
"self",
".",
"lattice_const",
"zb",
"=",
"Geometry",
".",
"new",
"(",
"[",
"as1",
",",
"ga1",
"]",
",",
"[",
"v1",
",",
"v2",
",",
"v3",
"]",
")",
"millerX",
"=",
"[",
"-",
"1",
",",
"1",
",",
"0",
"]",
"millerY",
"=",
"[",
"1",
",",
"1",
",",
"-",
"2",
"]",
"millerZ",
"=",
"[",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
"]",
"zb",
".",
"set_miller_indices",
"(",
"millerX",
",",
"millerY",
",",
"millerZ",
")",
"if",
"0",
"<",
"vacuum",
"zb",
"=",
"zb",
".",
"repeat",
"(",
"1",
",",
"1",
",",
"monolayers",
"+",
"1",
")",
"bilayerSep",
"=",
"v3",
"[",
"2",
"]",
"zb",
".",
"lattice_vectors",
"[",
"2",
"]",
"=",
"Vector",
"[",
"0",
",",
"0",
",",
"(",
"monolayers",
"-",
"1",
")",
"*",
"(",
"bilayerSep",
".",
"abs",
")",
"+",
"vacuum",
"]",
"minZ",
"=",
"zb",
".",
"atoms",
".",
"min",
"{",
"|",
"a",
",",
"b",
"|",
"a",
".",
"z",
"<=>",
"b",
".",
"z",
"}",
".",
"z",
"maxZ",
"=",
"zb",
".",
"atoms",
".",
"max",
"{",
"|",
"a",
",",
"b",
"|",
"a",
".",
"z",
"<=>",
"b",
".",
"z",
"}",
".",
"z",
"zb",
".",
"atoms",
".",
"reject!",
"{",
"|",
"a",
"|",
"a",
".",
"z",
"==",
"maxZ",
"}",
"zb",
".",
"atoms",
".",
"reject!",
"{",
"|",
"a",
"|",
"a",
".",
"z",
"==",
"minZ",
"}",
"if",
"0",
"<",
"constrain_layers",
"minZ",
"=",
"zb",
".",
"atoms",
".",
"min",
"{",
"|",
"a",
",",
"b",
"|",
"a",
".",
"z",
"<=>",
"b",
".",
"z",
"}",
".",
"z",
"constrain_below",
"=",
"minZ",
"+",
"bilayerSep",
".",
"abs",
"*",
"constrain_layers",
"zb",
".",
"atoms",
".",
"each",
"{",
"|",
"a",
"|",
"if",
"(",
"a",
".",
"z",
"<",
"constrain_below",
")",
"a",
".",
"constrain",
"=",
"\".true.\"",
"end",
"}",
"end",
"end",
"zb",
"end"
] | Return a unit cell for a slab of 111
dir is either "A" or "B" for the cation or anion terminated slab
specify the number of atomic monolayers
and the vacuum thickness in angstrom | [
"Return",
"a",
"unit",
"cell",
"for",
"a",
"slab",
"of",
"111",
"dir",
"is",
"either",
"A",
"or",
"B",
"for",
"the",
"cation",
"or",
"anion",
"terminated",
"slab",
"specify",
"the",
"number",
"of",
"atomic",
"monolayers",
"and",
"the",
"vacuum",
"thickness",
"in",
"angstrom"
] | 2dcb6c02cd05b2d0c8ab72be4e85d60375df296c | https://github.com/jns/Aims/blob/2dcb6c02cd05b2d0c8ab72be4e85d60375df296c/lib/aims/zinc_blende.rb#L157-L218 | train |
jns/Aims | lib/aims/zinc_blende.rb | Aims.ZincBlende.get_112_surface | def get_112_surface(monolayers, vacuum=0, constrain_layers = 0)
atom1 = Atom.new(0,0,0,self.cation)
atom2 = Atom.new(self.lattice_const*sqrt(3)/2, 0, 0, self.anion)
v1 = Vector[sqrt(3), 0, 0]*self.lattice_const
v2 = Vector[0, sqrt(2)/2, 0]*self.lattice_const
v3 = Vector[1/sqrt(3), 1/(sqrt(3)*2), -1/(sqrt(3)*2)]*self.lattice_const
millerX = Vector[1, 1, -2];
millerY = Vector[-1, 1, 0];
millerZ = Vector[-1, -1, -1]
# The unit cell
zb = Geometry.new([atom1, atom2], [v1, v2, v3])
zb.set_miller_indices(millerX, millerY, millerZ)
# Repeat the unit cell
zb = zb.repeat(1,1,monolayers)
if 0 < vacuum
# Add vacuum
monolayerSep = v3[2]
zb.lattice_vectors[2] = Vector[0, 0, (monolayers*monolayerSep).abs + vacuum.to_f]
# Move everything into a nice tidy unit cell.
zb = zb.correct
end
# # Constrain the bottom 2 layers
# zb.atoms.each{|a|
# if (a.z < monolayerSep*2)
# a.constrain = ".true."
# end
# }
# Return the completed unit cell
return zb
end | ruby | def get_112_surface(monolayers, vacuum=0, constrain_layers = 0)
atom1 = Atom.new(0,0,0,self.cation)
atom2 = Atom.new(self.lattice_const*sqrt(3)/2, 0, 0, self.anion)
v1 = Vector[sqrt(3), 0, 0]*self.lattice_const
v2 = Vector[0, sqrt(2)/2, 0]*self.lattice_const
v3 = Vector[1/sqrt(3), 1/(sqrt(3)*2), -1/(sqrt(3)*2)]*self.lattice_const
millerX = Vector[1, 1, -2];
millerY = Vector[-1, 1, 0];
millerZ = Vector[-1, -1, -1]
# The unit cell
zb = Geometry.new([atom1, atom2], [v1, v2, v3])
zb.set_miller_indices(millerX, millerY, millerZ)
# Repeat the unit cell
zb = zb.repeat(1,1,monolayers)
if 0 < vacuum
# Add vacuum
monolayerSep = v3[2]
zb.lattice_vectors[2] = Vector[0, 0, (monolayers*monolayerSep).abs + vacuum.to_f]
# Move everything into a nice tidy unit cell.
zb = zb.correct
end
# # Constrain the bottom 2 layers
# zb.atoms.each{|a|
# if (a.z < monolayerSep*2)
# a.constrain = ".true."
# end
# }
# Return the completed unit cell
return zb
end | [
"def",
"get_112_surface",
"(",
"monolayers",
",",
"vacuum",
"=",
"0",
",",
"constrain_layers",
"=",
"0",
")",
"atom1",
"=",
"Atom",
".",
"new",
"(",
"0",
",",
"0",
",",
"0",
",",
"self",
".",
"cation",
")",
"atom2",
"=",
"Atom",
".",
"new",
"(",
"self",
".",
"lattice_const",
"*",
"sqrt",
"(",
"3",
")",
"/",
"2",
",",
"0",
",",
"0",
",",
"self",
".",
"anion",
")",
"v1",
"=",
"Vector",
"[",
"sqrt",
"(",
"3",
")",
",",
"0",
",",
"0",
"]",
"*",
"self",
".",
"lattice_const",
"v2",
"=",
"Vector",
"[",
"0",
",",
"sqrt",
"(",
"2",
")",
"/",
"2",
",",
"0",
"]",
"*",
"self",
".",
"lattice_const",
"v3",
"=",
"Vector",
"[",
"1",
"/",
"sqrt",
"(",
"3",
")",
",",
"1",
"/",
"(",
"sqrt",
"(",
"3",
")",
"*",
"2",
")",
",",
"-",
"1",
"/",
"(",
"sqrt",
"(",
"3",
")",
"*",
"2",
")",
"]",
"*",
"self",
".",
"lattice_const",
"millerX",
"=",
"Vector",
"[",
"1",
",",
"1",
",",
"-",
"2",
"]",
";",
"millerY",
"=",
"Vector",
"[",
"-",
"1",
",",
"1",
",",
"0",
"]",
";",
"millerZ",
"=",
"Vector",
"[",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
"]",
"zb",
"=",
"Geometry",
".",
"new",
"(",
"[",
"atom1",
",",
"atom2",
"]",
",",
"[",
"v1",
",",
"v2",
",",
"v3",
"]",
")",
"zb",
".",
"set_miller_indices",
"(",
"millerX",
",",
"millerY",
",",
"millerZ",
")",
"zb",
"=",
"zb",
".",
"repeat",
"(",
"1",
",",
"1",
",",
"monolayers",
")",
"if",
"0",
"<",
"vacuum",
"monolayerSep",
"=",
"v3",
"[",
"2",
"]",
"zb",
".",
"lattice_vectors",
"[",
"2",
"]",
"=",
"Vector",
"[",
"0",
",",
"0",
",",
"(",
"monolayers",
"*",
"monolayerSep",
")",
".",
"abs",
"+",
"vacuum",
".",
"to_f",
"]",
"zb",
"=",
"zb",
".",
"correct",
"end",
"return",
"zb",
"end"
] | return a unit cell for a slab of 112
specify the number of atomic monolayers and the vacuum thickness in angstrom | [
"return",
"a",
"unit",
"cell",
"for",
"a",
"slab",
"of",
"112",
"specify",
"the",
"number",
"of",
"atomic",
"monolayers",
"and",
"the",
"vacuum",
"thickness",
"in",
"angstrom"
] | 2dcb6c02cd05b2d0c8ab72be4e85d60375df296c | https://github.com/jns/Aims/blob/2dcb6c02cd05b2d0c8ab72be4e85d60375df296c/lib/aims/zinc_blende.rb#L222-L260 | train |
jns/Aims | lib/aims/zinc_blende.rb | Aims.ZincBlende.get_110_surface | def get_110_surface(monolayers, vacuum=0, constrain_layers = 0)
# The atoms on a FCC
atom1 = Atom.new(0,0,0,self.cation)
atom2 = Atom.new(self.lattice_const*1/(2*sqrt(2)), self.lattice_const*0.25, 0.0, self.anion)
# The lattice Vectors
v1 = Vector[1/sqrt(2), 0.0, 0.0]*self.lattice_const
v2 = Vector[0.0, 1.0, 0.0]*self.lattice_const
v3 = Vector[1/(2*sqrt(2)), -0.5, 1/(2*sqrt(2))]*self.lattice_const
# The miller indices for each primitive cartesian direction
millerX = Vector[1, -1, 0]
millerY = Vector[0, 0, 1]
millerZ = Vector[1, 1, 0]
# The unit cell
zb = Geometry.new([atom1, atom2], [v1, v2, v3])
zb.set_miller_indices(millerX, millerY, millerZ)
# Repeat the unit cell
zb = zb.repeat(1,1,monolayers)
monolayerSep = v3[2]
if 0 < vacuum
# Add vacuum
zb.lattice_vectors[2] = Vector[0, 0, (monolayers-1)*monolayerSep.abs + vacuum.to_f]
# Move everything into a nice tidy unit cell.
zb = zb.correct
end
# # Constrain the bottom layers
zb.atoms.each{|a|
if (a.z < monolayerSep*constrain_layers)
a.constrain = ".true."
end
}
# Return the completed unit cell
return zb
end | ruby | def get_110_surface(monolayers, vacuum=0, constrain_layers = 0)
# The atoms on a FCC
atom1 = Atom.new(0,0,0,self.cation)
atom2 = Atom.new(self.lattice_const*1/(2*sqrt(2)), self.lattice_const*0.25, 0.0, self.anion)
# The lattice Vectors
v1 = Vector[1/sqrt(2), 0.0, 0.0]*self.lattice_const
v2 = Vector[0.0, 1.0, 0.0]*self.lattice_const
v3 = Vector[1/(2*sqrt(2)), -0.5, 1/(2*sqrt(2))]*self.lattice_const
# The miller indices for each primitive cartesian direction
millerX = Vector[1, -1, 0]
millerY = Vector[0, 0, 1]
millerZ = Vector[1, 1, 0]
# The unit cell
zb = Geometry.new([atom1, atom2], [v1, v2, v3])
zb.set_miller_indices(millerX, millerY, millerZ)
# Repeat the unit cell
zb = zb.repeat(1,1,monolayers)
monolayerSep = v3[2]
if 0 < vacuum
# Add vacuum
zb.lattice_vectors[2] = Vector[0, 0, (monolayers-1)*monolayerSep.abs + vacuum.to_f]
# Move everything into a nice tidy unit cell.
zb = zb.correct
end
# # Constrain the bottom layers
zb.atoms.each{|a|
if (a.z < monolayerSep*constrain_layers)
a.constrain = ".true."
end
}
# Return the completed unit cell
return zb
end | [
"def",
"get_110_surface",
"(",
"monolayers",
",",
"vacuum",
"=",
"0",
",",
"constrain_layers",
"=",
"0",
")",
"atom1",
"=",
"Atom",
".",
"new",
"(",
"0",
",",
"0",
",",
"0",
",",
"self",
".",
"cation",
")",
"atom2",
"=",
"Atom",
".",
"new",
"(",
"self",
".",
"lattice_const",
"*",
"1",
"/",
"(",
"2",
"*",
"sqrt",
"(",
"2",
")",
")",
",",
"self",
".",
"lattice_const",
"*",
"0.25",
",",
"0.0",
",",
"self",
".",
"anion",
")",
"v1",
"=",
"Vector",
"[",
"1",
"/",
"sqrt",
"(",
"2",
")",
",",
"0.0",
",",
"0.0",
"]",
"*",
"self",
".",
"lattice_const",
"v2",
"=",
"Vector",
"[",
"0.0",
",",
"1.0",
",",
"0.0",
"]",
"*",
"self",
".",
"lattice_const",
"v3",
"=",
"Vector",
"[",
"1",
"/",
"(",
"2",
"*",
"sqrt",
"(",
"2",
")",
")",
",",
"-",
"0.5",
",",
"1",
"/",
"(",
"2",
"*",
"sqrt",
"(",
"2",
")",
")",
"]",
"*",
"self",
".",
"lattice_const",
"millerX",
"=",
"Vector",
"[",
"1",
",",
"-",
"1",
",",
"0",
"]",
"millerY",
"=",
"Vector",
"[",
"0",
",",
"0",
",",
"1",
"]",
"millerZ",
"=",
"Vector",
"[",
"1",
",",
"1",
",",
"0",
"]",
"zb",
"=",
"Geometry",
".",
"new",
"(",
"[",
"atom1",
",",
"atom2",
"]",
",",
"[",
"v1",
",",
"v2",
",",
"v3",
"]",
")",
"zb",
".",
"set_miller_indices",
"(",
"millerX",
",",
"millerY",
",",
"millerZ",
")",
"zb",
"=",
"zb",
".",
"repeat",
"(",
"1",
",",
"1",
",",
"monolayers",
")",
"monolayerSep",
"=",
"v3",
"[",
"2",
"]",
"if",
"0",
"<",
"vacuum",
"zb",
".",
"lattice_vectors",
"[",
"2",
"]",
"=",
"Vector",
"[",
"0",
",",
"0",
",",
"(",
"monolayers",
"-",
"1",
")",
"*",
"monolayerSep",
".",
"abs",
"+",
"vacuum",
".",
"to_f",
"]",
"zb",
"=",
"zb",
".",
"correct",
"end",
"zb",
".",
"atoms",
".",
"each",
"{",
"|",
"a",
"|",
"if",
"(",
"a",
".",
"z",
"<",
"monolayerSep",
"*",
"constrain_layers",
")",
"a",
".",
"constrain",
"=",
"\".true.\"",
"end",
"}",
"return",
"zb",
"end"
] | Return a unit cell for a slab of 110
specify the number of atomic monolayers
and the vacuum thickness in angstrom | [
"Return",
"a",
"unit",
"cell",
"for",
"a",
"slab",
"of",
"110",
"specify",
"the",
"number",
"of",
"atomic",
"monolayers",
"and",
"the",
"vacuum",
"thickness",
"in",
"angstrom"
] | 2dcb6c02cd05b2d0c8ab72be4e85d60375df296c | https://github.com/jns/Aims/blob/2dcb6c02cd05b2d0c8ab72be4e85d60375df296c/lib/aims/zinc_blende.rb#L266-L308 | train |
jinx/core | lib/jinx/helpers/collection.rb | Jinx.Collection.to_compact_hash_with_index | def to_compact_hash_with_index
hash = {}
self.each_with_index do |item, index|
next if item.nil?
value = yield(item, index)
next if value.nil_or_empty?
hash[item] = value
end
hash
end | ruby | def to_compact_hash_with_index
hash = {}
self.each_with_index do |item, index|
next if item.nil?
value = yield(item, index)
next if value.nil_or_empty?
hash[item] = value
end
hash
end | [
"def",
"to_compact_hash_with_index",
"hash",
"=",
"{",
"}",
"self",
".",
"each_with_index",
"do",
"|",
"item",
",",
"index",
"|",
"next",
"if",
"item",
".",
"nil?",
"value",
"=",
"yield",
"(",
"item",
",",
"index",
")",
"next",
"if",
"value",
".",
"nil_or_empty?",
"hash",
"[",
"item",
"]",
"=",
"value",
"end",
"hash",
"end"
] | Returns a new Hash generated from this Collection with a block whose arguments include the enumerated item
and its index. Every value which is nil or empty is excluded.
@example
[1, 2, 3].to_compact_hash_with_index { |item, index| item + index } #=> { 1 => 1, 2 => 3, 3 => 5 }
@yield [item, index] the hash value
@yieldparam item the enumerated value
@yieldparam index the enumeration index
@return [Hash] this {Enumerable} converted to a hash by the given block | [
"Returns",
"a",
"new",
"Hash",
"generated",
"from",
"this",
"Collection",
"with",
"a",
"block",
"whose",
"arguments",
"include",
"the",
"enumerated",
"item",
"and",
"its",
"index",
".",
"Every",
"value",
"which",
"is",
"nil",
"or",
"empty",
"is",
"excluded",
"."
] | 964a274cc9d7ab74613910e8375e12ed210a434d | https://github.com/jinx/core/blob/964a274cc9d7ab74613910e8375e12ed210a434d/lib/jinx/helpers/collection.rb#L46-L55 | train |
jinx/core | lib/jinx/helpers/collection.rb | Jinx.Collection.partial_sort! | def partial_sort!
unless block_given? then return partial_sort! { |item1, item2| item1 <=> item2 } end
# The comparison hash
h = Hash.new { |h, k| h[k] = Hash.new }
sort! do |a, b|
# * If a and b are comparable, then use the comparison result.
# * Otherwise, if there is a member c such that (a <=> c) == (c <=> b),
# then a <=> b has the transitive comparison result.
# * Otherwise, a <=> b is arbitrarily set to 1.
yield(a, b) || h[a][b] ||= -h[b][a] ||= h[a].detect_value { |c, v| v if v == yield(c, b) } || 1
end
end | ruby | def partial_sort!
unless block_given? then return partial_sort! { |item1, item2| item1 <=> item2 } end
# The comparison hash
h = Hash.new { |h, k| h[k] = Hash.new }
sort! do |a, b|
# * If a and b are comparable, then use the comparison result.
# * Otherwise, if there is a member c such that (a <=> c) == (c <=> b),
# then a <=> b has the transitive comparison result.
# * Otherwise, a <=> b is arbitrarily set to 1.
yield(a, b) || h[a][b] ||= -h[b][a] ||= h[a].detect_value { |c, v| v if v == yield(c, b) } || 1
end
end | [
"def",
"partial_sort!",
"unless",
"block_given?",
"then",
"return",
"partial_sort!",
"{",
"|",
"item1",
",",
"item2",
"|",
"item1",
"<=>",
"item2",
"}",
"end",
"h",
"=",
"Hash",
".",
"new",
"{",
"|",
"h",
",",
"k",
"|",
"h",
"[",
"k",
"]",
"=",
"Hash",
".",
"new",
"}",
"sort!",
"do",
"|",
"a",
",",
"b",
"|",
"yield",
"(",
"a",
",",
"b",
")",
"||",
"h",
"[",
"a",
"]",
"[",
"b",
"]",
"||=",
"-",
"h",
"[",
"b",
"]",
"[",
"a",
"]",
"||=",
"h",
"[",
"a",
"]",
".",
"detect_value",
"{",
"|",
"c",
",",
"v",
"|",
"v",
"if",
"v",
"==",
"yield",
"(",
"c",
",",
"b",
")",
"}",
"||",
"1",
"end",
"end"
] | Sorts this collection in-place with a partial sort operator block
@see #partial_sort
@yield (see #partial_sort)
@yieldparam (see #partial_sort)
@raise [NoMethodError] if this Collection does not support the +sort!+ sort in-place method | [
"Sorts",
"this",
"collection",
"in",
"-",
"place",
"with",
"a",
"partial",
"sort",
"operator",
"block"
] | 964a274cc9d7ab74613910e8375e12ed210a434d | https://github.com/jinx/core/blob/964a274cc9d7ab74613910e8375e12ed210a434d/lib/jinx/helpers/collection.rb#L252-L263 | train |
jinx/core | lib/jinx/resource/inversible.rb | Jinx.Inversible.set_inverse | def set_inverse(other, writer, inv_writer)
other.send(inv_writer, self) if other
send(writer, other)
end | ruby | def set_inverse(other, writer, inv_writer)
other.send(inv_writer, self) if other
send(writer, other)
end | [
"def",
"set_inverse",
"(",
"other",
",",
"writer",
",",
"inv_writer",
")",
"other",
".",
"send",
"(",
"inv_writer",
",",
"self",
")",
"if",
"other",
"send",
"(",
"writer",
",",
"other",
")",
"end"
] | Sets an attribute inverse by calling the attribute writer method with the other argument.
If other is non-nil, then the inverse writer method is called on self.
@param other [Resource] the attribute value to set
@param [Symbol] writer the attribute writer method
@param [Symbol] inv_writer the attribute inverse writer method defined for the other object
@private | [
"Sets",
"an",
"attribute",
"inverse",
"by",
"calling",
"the",
"attribute",
"writer",
"method",
"with",
"the",
"other",
"argument",
".",
"If",
"other",
"is",
"non",
"-",
"nil",
"then",
"the",
"inverse",
"writer",
"method",
"is",
"called",
"on",
"self",
"."
] | 964a274cc9d7ab74613910e8375e12ed210a434d | https://github.com/jinx/core/blob/964a274cc9d7ab74613910e8375e12ed210a434d/lib/jinx/resource/inversible.rb#L12-L15 | train |
jinx/core | lib/jinx/resource/inversible.rb | Jinx.Inversible.set_inversible_noncollection_attribute | def set_inversible_noncollection_attribute(newval, accessors, inverse_writer)
rdr, wtr = accessors
# the previous value
oldval = send(rdr)
# bail if no change
return newval if newval.equal?(oldval)
# clear the previous inverse
logger.debug { "Moving #{qp} from #{oldval.qp} to #{newval.qp}..." } if oldval and newval
if oldval then
clr_wtr = self.class === oldval && oldval.send(rdr).equal?(self) ? wtr : inverse_writer
oldval.send(clr_wtr, nil)
end
# call the writer
send(wtr, newval)
# call the inverse writer on self
if newval then
newval.send(inverse_writer, self)
logger.debug { "Moved #{qp} from #{oldval.qp} to #{newval.qp}." } if oldval
end
newval
end | ruby | def set_inversible_noncollection_attribute(newval, accessors, inverse_writer)
rdr, wtr = accessors
# the previous value
oldval = send(rdr)
# bail if no change
return newval if newval.equal?(oldval)
# clear the previous inverse
logger.debug { "Moving #{qp} from #{oldval.qp} to #{newval.qp}..." } if oldval and newval
if oldval then
clr_wtr = self.class === oldval && oldval.send(rdr).equal?(self) ? wtr : inverse_writer
oldval.send(clr_wtr, nil)
end
# call the writer
send(wtr, newval)
# call the inverse writer on self
if newval then
newval.send(inverse_writer, self)
logger.debug { "Moved #{qp} from #{oldval.qp} to #{newval.qp}." } if oldval
end
newval
end | [
"def",
"set_inversible_noncollection_attribute",
"(",
"newval",
",",
"accessors",
",",
"inverse_writer",
")",
"rdr",
",",
"wtr",
"=",
"accessors",
"oldval",
"=",
"send",
"(",
"rdr",
")",
"return",
"newval",
"if",
"newval",
".",
"equal?",
"(",
"oldval",
")",
"logger",
".",
"debug",
"{",
"\"Moving #{qp} from #{oldval.qp} to #{newval.qp}...\"",
"}",
"if",
"oldval",
"and",
"newval",
"if",
"oldval",
"then",
"clr_wtr",
"=",
"self",
".",
"class",
"===",
"oldval",
"&&",
"oldval",
".",
"send",
"(",
"rdr",
")",
".",
"equal?",
"(",
"self",
")",
"?",
"wtr",
":",
"inverse_writer",
"oldval",
".",
"send",
"(",
"clr_wtr",
",",
"nil",
")",
"end",
"send",
"(",
"wtr",
",",
"newval",
")",
"if",
"newval",
"then",
"newval",
".",
"send",
"(",
"inverse_writer",
",",
"self",
")",
"logger",
".",
"debug",
"{",
"\"Moved #{qp} from #{oldval.qp} to #{newval.qp}.\"",
"}",
"if",
"oldval",
"end",
"newval",
"end"
] | Sets a non-collection attribute value in a way which enforces inverse integrity.
@param [Object] newval the value to set
@param [(Symbol, Symbol)] accessors the reader and writer methods to use in setting the
attribute
@param [Symbol] inverse_writer the inverse attribute writer method
@private | [
"Sets",
"a",
"non",
"-",
"collection",
"attribute",
"value",
"in",
"a",
"way",
"which",
"enforces",
"inverse",
"integrity",
"."
] | 964a274cc9d7ab74613910e8375e12ed210a434d | https://github.com/jinx/core/blob/964a274cc9d7ab74613910e8375e12ed210a434d/lib/jinx/resource/inversible.rb#L24-L48 | train |
jinx/core | lib/jinx/resource/inversible.rb | Jinx.Inversible.add_to_inverse_collection | def add_to_inverse_collection(newval, accessors, inverse)
rdr, wtr = accessors
# the current inverse
oldval = send(rdr)
# no-op if no change
return newval if newval == oldval
# delete self from the current inverse reference collection
if oldval then
coll = oldval.send(inverse)
coll.delete(self) if coll
end
# call the writer on this object
send(wtr, newval)
# add self to the inverse collection
if newval then
coll = newval.send(inverse)
if coll.nil? then
coll = block_given? ? yield : Array.new
newval.set_property_value(inverse, coll)
end
coll << self
if oldval then
logger.debug { "Moved #{qp} from #{rdr} #{oldval.qp} #{inverse} to #{newval.qp}." }
else
logger.debug { "Added #{qp} to #{rdr} #{newval.qp} #{inverse}." }
end
end
newval
end | ruby | def add_to_inverse_collection(newval, accessors, inverse)
rdr, wtr = accessors
# the current inverse
oldval = send(rdr)
# no-op if no change
return newval if newval == oldval
# delete self from the current inverse reference collection
if oldval then
coll = oldval.send(inverse)
coll.delete(self) if coll
end
# call the writer on this object
send(wtr, newval)
# add self to the inverse collection
if newval then
coll = newval.send(inverse)
if coll.nil? then
coll = block_given? ? yield : Array.new
newval.set_property_value(inverse, coll)
end
coll << self
if oldval then
logger.debug { "Moved #{qp} from #{rdr} #{oldval.qp} #{inverse} to #{newval.qp}." }
else
logger.debug { "Added #{qp} to #{rdr} #{newval.qp} #{inverse}." }
end
end
newval
end | [
"def",
"add_to_inverse_collection",
"(",
"newval",
",",
"accessors",
",",
"inverse",
")",
"rdr",
",",
"wtr",
"=",
"accessors",
"oldval",
"=",
"send",
"(",
"rdr",
")",
"return",
"newval",
"if",
"newval",
"==",
"oldval",
"if",
"oldval",
"then",
"coll",
"=",
"oldval",
".",
"send",
"(",
"inverse",
")",
"coll",
".",
"delete",
"(",
"self",
")",
"if",
"coll",
"end",
"send",
"(",
"wtr",
",",
"newval",
")",
"if",
"newval",
"then",
"coll",
"=",
"newval",
".",
"send",
"(",
"inverse",
")",
"if",
"coll",
".",
"nil?",
"then",
"coll",
"=",
"block_given?",
"?",
"yield",
":",
"Array",
".",
"new",
"newval",
".",
"set_property_value",
"(",
"inverse",
",",
"coll",
")",
"end",
"coll",
"<<",
"self",
"if",
"oldval",
"then",
"logger",
".",
"debug",
"{",
"\"Moved #{qp} from #{rdr} #{oldval.qp} #{inverse} to #{newval.qp}.\"",
"}",
"else",
"logger",
".",
"debug",
"{",
"\"Added #{qp} to #{rdr} #{newval.qp} #{inverse}.\"",
"}",
"end",
"end",
"newval",
"end"
] | Sets a collection attribute value in a way which enforces inverse integrity.
The inverse of the attribute is a collection accessed by calling inverse on newval.
@param [Resource] newval the new attribute reference value
@param [(Symbol, Symbol)] accessors the reader and writer to use in setting
the attribute
@param [Symbol] inverse the inverse collection attribute to which
this domain object will be added
@yield a factory to create a new collection on demand (default is an Array)
@private | [
"Sets",
"a",
"collection",
"attribute",
"value",
"in",
"a",
"way",
"which",
"enforces",
"inverse",
"integrity",
".",
"The",
"inverse",
"of",
"the",
"attribute",
"is",
"a",
"collection",
"accessed",
"by",
"calling",
"inverse",
"on",
"newval",
"."
] | 964a274cc9d7ab74613910e8375e12ed210a434d | https://github.com/jinx/core/blob/964a274cc9d7ab74613910e8375e12ed210a434d/lib/jinx/resource/inversible.rb#L60-L92 | train |
riddopic/garcun | lib/garcon/core_ext/pathname.rb | Garcon.Pathref.expand_pathseg | def expand_pathseg(handle)
return handle unless handle.is_a?(Symbol)
pathsegs = ROOT_PATHS[handle] or raise ArgumentError,
"Don't know how to expand path reference '#{handle.inspect}'."
pathsegs.map { |ps| expand_pathseg(ps) }.flatten
end | ruby | def expand_pathseg(handle)
return handle unless handle.is_a?(Symbol)
pathsegs = ROOT_PATHS[handle] or raise ArgumentError,
"Don't know how to expand path reference '#{handle.inspect}'."
pathsegs.map { |ps| expand_pathseg(ps) }.flatten
end | [
"def",
"expand_pathseg",
"(",
"handle",
")",
"return",
"handle",
"unless",
"handle",
".",
"is_a?",
"(",
"Symbol",
")",
"pathsegs",
"=",
"ROOT_PATHS",
"[",
"handle",
"]",
"or",
"raise",
"ArgumentError",
",",
"\"Don't know how to expand path reference '#{handle.inspect}'.\"",
"pathsegs",
".",
"map",
"{",
"|",
"ps",
"|",
"expand_pathseg",
"(",
"ps",
")",
"}",
".",
"flatten",
"end"
] | A T T E N Z I O N E A R E A P R O T E T T A
Recursively expand a path handle.
@return [Array<String>]
An array of path segments, suitable for .join
@api public | [
"A",
"T",
"T",
"E",
"N",
"Z",
"I",
"O",
"N",
"E",
"A",
"R",
"E",
"A",
"P",
"R",
"O",
"T",
"E",
"T",
"T",
"A",
"Recursively",
"expand",
"a",
"path",
"handle",
"."
] | c2409bd8cf9c14b967a719810dab5269d69b42de | https://github.com/riddopic/garcun/blob/c2409bd8cf9c14b967a719810dab5269d69b42de/lib/garcon/core_ext/pathname.rb#L112-L117 | train |
riddopic/garcun | lib/garcon/task/count_down_latch.rb | Garcon.MutexCountDownLatch.wait | def wait(timeout = nil)
@mutex.synchronize do
remaining = Condition::Result.new(timeout)
while @count > 0 && remaining.can_wait?
remaining = @condition.wait(@mutex, remaining.remaining_time)
end
@count == 0
end
end | ruby | def wait(timeout = nil)
@mutex.synchronize do
remaining = Condition::Result.new(timeout)
while @count > 0 && remaining.can_wait?
remaining = @condition.wait(@mutex, remaining.remaining_time)
end
@count == 0
end
end | [
"def",
"wait",
"(",
"timeout",
"=",
"nil",
")",
"@mutex",
".",
"synchronize",
"do",
"remaining",
"=",
"Condition",
"::",
"Result",
".",
"new",
"(",
"timeout",
")",
"while",
"@count",
">",
"0",
"&&",
"remaining",
".",
"can_wait?",
"remaining",
"=",
"@condition",
".",
"wait",
"(",
"@mutex",
",",
"remaining",
".",
"remaining_time",
")",
"end",
"@count",
"==",
"0",
"end",
"end"
] | Create a new `CountDownLatch` with the initial `count`.
@param [Fixnum] count
The initial count
@raise [ArgumentError]
If `count` is not an integer or is less than zero.
Block on the latch until the counter reaches zero or until `timeout` is
reached.
@param [Fixnum] timeout
The number of seconds to wait for the counter or `nil` to block
indefinitely.
@return [Boolean]
True if the count reaches zero else false on timeout. | [
"Create",
"a",
"new",
"CountDownLatch",
"with",
"the",
"initial",
"count",
"."
] | c2409bd8cf9c14b967a719810dab5269d69b42de | https://github.com/riddopic/garcun/blob/c2409bd8cf9c14b967a719810dab5269d69b42de/lib/garcon/task/count_down_latch.rb#L61-L69 | train |
karthikv/model_schema | lib/model_schema/schema_error.rb | ModelSchema.SchemaError.dump_extra_diffs | def dump_extra_diffs(field)
extra_diffs = diffs_by_field_type(field, TYPE_EXTRA)
if extra_diffs.length > 0
header = "Table #{@table_name} has extra #{field}:\n"
diff_str = extra_diffs.map do |diff|
dump_single(field, diff[:generator], diff[:elem])
end.join("\n\t")
"#{header}\n\t#{diff_str}\n"
end
end | ruby | def dump_extra_diffs(field)
extra_diffs = diffs_by_field_type(field, TYPE_EXTRA)
if extra_diffs.length > 0
header = "Table #{@table_name} has extra #{field}:\n"
diff_str = extra_diffs.map do |diff|
dump_single(field, diff[:generator], diff[:elem])
end.join("\n\t")
"#{header}\n\t#{diff_str}\n"
end
end | [
"def",
"dump_extra_diffs",
"(",
"field",
")",
"extra_diffs",
"=",
"diffs_by_field_type",
"(",
"field",
",",
"TYPE_EXTRA",
")",
"if",
"extra_diffs",
".",
"length",
">",
"0",
"header",
"=",
"\"Table #{@table_name} has extra #{field}:\\n\"",
"diff_str",
"=",
"extra_diffs",
".",
"map",
"do",
"|",
"diff",
"|",
"dump_single",
"(",
"field",
",",
"diff",
"[",
":generator",
"]",
",",
"diff",
"[",
":elem",
"]",
")",
"end",
".",
"join",
"(",
"\"\\n\\t\"",
")",
"\"#{header}\\n\\t#{diff_str}\\n\"",
"end",
"end"
] | Dumps all diffs that have the given field and are of TYPE_EXTRA. | [
"Dumps",
"all",
"diffs",
"that",
"have",
"the",
"given",
"field",
"and",
"are",
"of",
"TYPE_EXTRA",
"."
] | d73f7d9f8b5240ad878a01d1fe7a0e01f66cf979 | https://github.com/karthikv/model_schema/blob/d73f7d9f8b5240ad878a01d1fe7a0e01f66cf979/lib/model_schema/schema_error.rb#L54-L65 | train |
karthikv/model_schema | lib/model_schema/schema_error.rb | ModelSchema.SchemaError.dump_missing_diffs | def dump_missing_diffs(field)
missing_diffs = diffs_by_field_type(field, TYPE_MISSING)
if missing_diffs.length > 0
header = "Table #{@table_name} is missing #{field}:\n"
diff_str = missing_diffs.map do |diff|
dump_single(field, diff[:generator], diff[:elem])
end.join("\n\t")
"#{header}\n\t#{diff_str}\n"
end
end | ruby | def dump_missing_diffs(field)
missing_diffs = diffs_by_field_type(field, TYPE_MISSING)
if missing_diffs.length > 0
header = "Table #{@table_name} is missing #{field}:\n"
diff_str = missing_diffs.map do |diff|
dump_single(field, diff[:generator], diff[:elem])
end.join("\n\t")
"#{header}\n\t#{diff_str}\n"
end
end | [
"def",
"dump_missing_diffs",
"(",
"field",
")",
"missing_diffs",
"=",
"diffs_by_field_type",
"(",
"field",
",",
"TYPE_MISSING",
")",
"if",
"missing_diffs",
".",
"length",
">",
"0",
"header",
"=",
"\"Table #{@table_name} is missing #{field}:\\n\"",
"diff_str",
"=",
"missing_diffs",
".",
"map",
"do",
"|",
"diff",
"|",
"dump_single",
"(",
"field",
",",
"diff",
"[",
":generator",
"]",
",",
"diff",
"[",
":elem",
"]",
")",
"end",
".",
"join",
"(",
"\"\\n\\t\"",
")",
"\"#{header}\\n\\t#{diff_str}\\n\"",
"end",
"end"
] | Dumps all diffs that have the given field and are of TYPE_MISSING. | [
"Dumps",
"all",
"diffs",
"that",
"have",
"the",
"given",
"field",
"and",
"are",
"of",
"TYPE_MISSING",
"."
] | d73f7d9f8b5240ad878a01d1fe7a0e01f66cf979 | https://github.com/karthikv/model_schema/blob/d73f7d9f8b5240ad878a01d1fe7a0e01f66cf979/lib/model_schema/schema_error.rb#L68-L79 | train |
karthikv/model_schema | lib/model_schema/schema_error.rb | ModelSchema.SchemaError.dump_mismatch_diffs | def dump_mismatch_diffs(field)
mismatch_diffs = diffs_by_field_type(field, TYPE_MISMATCH)
if mismatch_diffs.length > 0
header = "Table #{@table_name} has mismatched #{field}:\n"
diff_str = mismatch_diffs.map do |diff|
"actual: #{dump_single(field, diff[:db_generator], diff[:db_elem])}\n\t" +
"expected: #{dump_single(field, diff[:exp_generator], diff[:exp_elem])}"
end.join("\n\n\t")
"#{header}\n\t#{diff_str}\n"
end
end | ruby | def dump_mismatch_diffs(field)
mismatch_diffs = diffs_by_field_type(field, TYPE_MISMATCH)
if mismatch_diffs.length > 0
header = "Table #{@table_name} has mismatched #{field}:\n"
diff_str = mismatch_diffs.map do |diff|
"actual: #{dump_single(field, diff[:db_generator], diff[:db_elem])}\n\t" +
"expected: #{dump_single(field, diff[:exp_generator], diff[:exp_elem])}"
end.join("\n\n\t")
"#{header}\n\t#{diff_str}\n"
end
end | [
"def",
"dump_mismatch_diffs",
"(",
"field",
")",
"mismatch_diffs",
"=",
"diffs_by_field_type",
"(",
"field",
",",
"TYPE_MISMATCH",
")",
"if",
"mismatch_diffs",
".",
"length",
">",
"0",
"header",
"=",
"\"Table #{@table_name} has mismatched #{field}:\\n\"",
"diff_str",
"=",
"mismatch_diffs",
".",
"map",
"do",
"|",
"diff",
"|",
"\"actual: #{dump_single(field, diff[:db_generator], diff[:db_elem])}\\n\\t\"",
"+",
"\"expected: #{dump_single(field, diff[:exp_generator], diff[:exp_elem])}\"",
"end",
".",
"join",
"(",
"\"\\n\\n\\t\"",
")",
"\"#{header}\\n\\t#{diff_str}\\n\"",
"end",
"end"
] | Dumps all diffs that have the given field and are of TYPE_MISMATCH. | [
"Dumps",
"all",
"diffs",
"that",
"have",
"the",
"given",
"field",
"and",
"are",
"of",
"TYPE_MISMATCH",
"."
] | d73f7d9f8b5240ad878a01d1fe7a0e01f66cf979 | https://github.com/karthikv/model_schema/blob/d73f7d9f8b5240ad878a01d1fe7a0e01f66cf979/lib/model_schema/schema_error.rb#L82-L94 | train |
karthikv/model_schema | lib/model_schema/schema_error.rb | ModelSchema.SchemaError.to_s | def to_s
parts = FIELDS.flat_map do |field|
[dump_extra_diffs(field),
dump_missing_diffs(field),
dump_mismatch_diffs(field)]
end
[
"Table #{@table_name} does not match the expected schema.\n\n",
parts.compact.join("\n"),
"\nYou may disable schema checks by passing :disable => true to model_",
"schema or by setting the ENV variable #{DISABLE_MODEL_SCHEMA_KEY}=1.\n"
].join
end | ruby | def to_s
parts = FIELDS.flat_map do |field|
[dump_extra_diffs(field),
dump_missing_diffs(field),
dump_mismatch_diffs(field)]
end
[
"Table #{@table_name} does not match the expected schema.\n\n",
parts.compact.join("\n"),
"\nYou may disable schema checks by passing :disable => true to model_",
"schema or by setting the ENV variable #{DISABLE_MODEL_SCHEMA_KEY}=1.\n"
].join
end | [
"def",
"to_s",
"parts",
"=",
"FIELDS",
".",
"flat_map",
"do",
"|",
"field",
"|",
"[",
"dump_extra_diffs",
"(",
"field",
")",
",",
"dump_missing_diffs",
"(",
"field",
")",
",",
"dump_mismatch_diffs",
"(",
"field",
")",
"]",
"end",
"[",
"\"Table #{@table_name} does not match the expected schema.\\n\\n\"",
",",
"parts",
".",
"compact",
".",
"join",
"(",
"\"\\n\"",
")",
",",
"\"\\nYou may disable schema checks by passing :disable => true to model_\"",
",",
"\"schema or by setting the ENV variable #{DISABLE_MODEL_SCHEMA_KEY}=1.\\n\"",
"]",
".",
"join",
"end"
] | Combines all dumps into one cohesive error message. | [
"Combines",
"all",
"dumps",
"into",
"one",
"cohesive",
"error",
"message",
"."
] | d73f7d9f8b5240ad878a01d1fe7a0e01f66cf979 | https://github.com/karthikv/model_schema/blob/d73f7d9f8b5240ad878a01d1fe7a0e01f66cf979/lib/model_schema/schema_error.rb#L97-L110 | train |
jinx/core | lib/jinx/helpers/visitor.rb | Jinx.Visitor.filter | def filter
raise ArgumentError.new("A filter block is not given to the visitor filter method") unless block_given?
self.class.new(@options) { |node| yield(node, node_children(node)) }
end | ruby | def filter
raise ArgumentError.new("A filter block is not given to the visitor filter method") unless block_given?
self.class.new(@options) { |node| yield(node, node_children(node)) }
end | [
"def",
"filter",
"raise",
"ArgumentError",
".",
"new",
"(",
"\"A filter block is not given to the visitor filter method\"",
")",
"unless",
"block_given?",
"self",
".",
"class",
".",
"new",
"(",
"@options",
")",
"{",
"|",
"node",
"|",
"yield",
"(",
"node",
",",
"node_children",
"(",
"node",
")",
")",
"}",
"end"
] | Returns a new Visitor which determines which nodes to visit by applying the given block
to this visitor. The filter block arguments consist of a parent node and an array of
children nodes for the parent. The block can return nil, a single node to visit or a
collection of nodes to visit.
@example
visitor = Jinx::Visitor.new { |person| person.children }
# Joe has age 55 and children aged 17 and 24, who have children aged [1] and [6, 3], resp.
visitor.to_enum(joe) { |person| person.age } #=> [55, 20, 1, 24, 6, 3]
# The filter navigates to the children sorted by age of parents 21 or older.
filter = visitor.filter { |parent, children| children.sort { |c1, c2| c1.age <=> c2.age } if parent.age >= 21 }
filter.to_enum(joe) { |person| person.age } #=> [55, 24, 3, 6]
@return [Visitor] the filter visitor
@yield [parent, children] the filter to select which of the children to visit next
@yieldparam parent the currently visited node
@yieldparam [Array] children the nodes slated by this visitor to visit next
@raise [ArgumentError] if a block is not given to this method | [
"Returns",
"a",
"new",
"Visitor",
"which",
"determines",
"which",
"nodes",
"to",
"visit",
"by",
"applying",
"the",
"given",
"block",
"to",
"this",
"visitor",
".",
"The",
"filter",
"block",
"arguments",
"consist",
"of",
"a",
"parent",
"node",
"and",
"an",
"array",
"of",
"children",
"nodes",
"for",
"the",
"parent",
".",
"The",
"block",
"can",
"return",
"nil",
"a",
"single",
"node",
"to",
"visit",
"or",
"a",
"collection",
"of",
"nodes",
"to",
"visit",
"."
] | 964a274cc9d7ab74613910e8375e12ed210a434d | https://github.com/jinx/core/blob/964a274cc9d7ab74613910e8375e12ed210a434d/lib/jinx/helpers/visitor.rb#L186-L189 | train |
jinx/core | lib/jinx/helpers/visitor.rb | Jinx.Visitor.node_children | def node_children(node)
children = @navigator.call(node)
return Array::EMPTY_ARRAY if children.nil?
Enumerable === children ? children.to_a.compact : [children]
end | ruby | def node_children(node)
children = @navigator.call(node)
return Array::EMPTY_ARRAY if children.nil?
Enumerable === children ? children.to_a.compact : [children]
end | [
"def",
"node_children",
"(",
"node",
")",
"children",
"=",
"@navigator",
".",
"call",
"(",
"node",
")",
"return",
"Array",
"::",
"EMPTY_ARRAY",
"if",
"children",
".",
"nil?",
"Enumerable",
"===",
"children",
"?",
"children",
".",
"to_a",
".",
"compact",
":",
"[",
"children",
"]",
"end"
] | Returns the children to visit for the given node. | [
"Returns",
"the",
"children",
"to",
"visit",
"for",
"the",
"given",
"node",
"."
] | 964a274cc9d7ab74613910e8375e12ed210a434d | https://github.com/jinx/core/blob/964a274cc9d7ab74613910e8375e12ed210a434d/lib/jinx/helpers/visitor.rb#L202-L206 | train |
jinx/core | lib/jinx/helpers/visitor.rb | Jinx.Visitor.visit_root | def visit_root(node, &operator)
clear
# Exclude cycles if the prune cycles flag is set.
@exclude.merge!(cyclic_nodes(node)) if @prune_cycle_flag
# Visit the root node.
result = visit_recursive(node, &operator)
# Reset the exclusions if the prune cycles flag is set.
@exclude.clear if @prune_cycle_flag
result
end | ruby | def visit_root(node, &operator)
clear
# Exclude cycles if the prune cycles flag is set.
@exclude.merge!(cyclic_nodes(node)) if @prune_cycle_flag
# Visit the root node.
result = visit_recursive(node, &operator)
# Reset the exclusions if the prune cycles flag is set.
@exclude.clear if @prune_cycle_flag
result
end | [
"def",
"visit_root",
"(",
"node",
",",
"&",
"operator",
")",
"clear",
"@exclude",
".",
"merge!",
"(",
"cyclic_nodes",
"(",
"node",
")",
")",
"if",
"@prune_cycle_flag",
"result",
"=",
"visit_recursive",
"(",
"node",
",",
"&",
"operator",
")",
"@exclude",
".",
"clear",
"if",
"@prune_cycle_flag",
"result",
"end"
] | Visits the root node and all descendants. | [
"Visits",
"the",
"root",
"node",
"and",
"all",
"descendants",
"."
] | 964a274cc9d7ab74613910e8375e12ed210a434d | https://github.com/jinx/core/blob/964a274cc9d7ab74613910e8375e12ed210a434d/lib/jinx/helpers/visitor.rb#L216-L225 | train |
jinx/core | lib/jinx/helpers/visitor.rb | Jinx.Visitor.cyclic_nodes | def cyclic_nodes(root)
copts = @options.reject { |k, v| k == :prune_cycle }
cyclic = Set.new
cycler = Visitor.new(copts) do |parent|
children = @navigator.call(parent)
# Look for a cycle back to the child.
children.each do |child|
index = cycler.lineage.index(child)
if index then
# The child is also a parent: add the nodes between
# the two occurrences of the child in the lineage.
cyclic.merge!(cycler.lineage[(index + 1)..-1])
end
end
children
end
cycler.visit(root)
cyclic
end | ruby | def cyclic_nodes(root)
copts = @options.reject { |k, v| k == :prune_cycle }
cyclic = Set.new
cycler = Visitor.new(copts) do |parent|
children = @navigator.call(parent)
# Look for a cycle back to the child.
children.each do |child|
index = cycler.lineage.index(child)
if index then
# The child is also a parent: add the nodes between
# the two occurrences of the child in the lineage.
cyclic.merge!(cycler.lineage[(index + 1)..-1])
end
end
children
end
cycler.visit(root)
cyclic
end | [
"def",
"cyclic_nodes",
"(",
"root",
")",
"copts",
"=",
"@options",
".",
"reject",
"{",
"|",
"k",
",",
"v",
"|",
"k",
"==",
":prune_cycle",
"}",
"cyclic",
"=",
"Set",
".",
"new",
"cycler",
"=",
"Visitor",
".",
"new",
"(",
"copts",
")",
"do",
"|",
"parent",
"|",
"children",
"=",
"@navigator",
".",
"call",
"(",
"parent",
")",
"children",
".",
"each",
"do",
"|",
"child",
"|",
"index",
"=",
"cycler",
".",
"lineage",
".",
"index",
"(",
"child",
")",
"if",
"index",
"then",
"cyclic",
".",
"merge!",
"(",
"cycler",
".",
"lineage",
"[",
"(",
"index",
"+",
"1",
")",
"..",
"-",
"1",
"]",
")",
"end",
"end",
"children",
"end",
"cycler",
".",
"visit",
"(",
"root",
")",
"cyclic",
"end"
] | Returns the nodes which occur within a cycle, excluding the cycle entry point.
@example
graph.paths #=> a -> b -> a, a -> c -> d -> c
Visitor.new(graph, &navigator).cyclic_nodes(a) #=> [b, d]
@param root the node to visit
@return [Array] the nodes within visit cycles | [
"Returns",
"the",
"nodes",
"which",
"occur",
"within",
"a",
"cycle",
"excluding",
"the",
"cycle",
"entry",
"point",
"."
] | 964a274cc9d7ab74613910e8375e12ed210a434d | https://github.com/jinx/core/blob/964a274cc9d7ab74613910e8375e12ed210a434d/lib/jinx/helpers/visitor.rb#L234-L252 | train |
rubyworks/richunits | work/deprecated/duration.rb | RichUnits.Numeric.duration | def duration(part = nil, klass = Duration)
if [:years, :months, :weeks, :days, :hours, :minutes, :seconds].include? part
klass.new(part => self)
else
klass.new(self)
end
end | ruby | def duration(part = nil, klass = Duration)
if [:years, :months, :weeks, :days, :hours, :minutes, :seconds].include? part
klass.new(part => self)
else
klass.new(self)
end
end | [
"def",
"duration",
"(",
"part",
"=",
"nil",
",",
"klass",
"=",
"Duration",
")",
"if",
"[",
":years",
",",
":months",
",",
":weeks",
",",
":days",
",",
":hours",
",",
":minutes",
",",
":seconds",
"]",
".",
"include?",
"part",
"klass",
".",
"new",
"(",
"part",
"=>",
"self",
")",
"else",
"klass",
".",
"new",
"(",
"self",
")",
"end",
"end"
] | Create a Duration object using self where self could represent weeks, days,
hours, minutes, and seconds.
*Example*
10.duration(:weeks)
=> #<Duration: 10 weeks>
10.duration
=> #<Duration: 10 seconds> | [
"Create",
"a",
"Duration",
"object",
"using",
"self",
"where",
"self",
"could",
"represent",
"weeks",
"days",
"hours",
"minutes",
"and",
"seconds",
"."
] | c92bec173fc63798013defdd9a1727b0d1d65d46 | https://github.com/rubyworks/richunits/blob/c92bec173fc63798013defdd9a1727b0d1d65d46/work/deprecated/duration.rb#L466-L472 | train |
rubyworks/richunits | work/deprecated/duration.rb | RichUnits.Duration.seconds | def seconds(part = nil)
# Table mapping
h = {:weeks => WEEK, :days => DAY, :hours => HOUR, :minutes => MINUTE}
if [:weeks, :days, :hours, :minutes].include? part
__send__(part) * h[part]
else
@seconds
end
end | ruby | def seconds(part = nil)
# Table mapping
h = {:weeks => WEEK, :days => DAY, :hours => HOUR, :minutes => MINUTE}
if [:weeks, :days, :hours, :minutes].include? part
__send__(part) * h[part]
else
@seconds
end
end | [
"def",
"seconds",
"(",
"part",
"=",
"nil",
")",
"h",
"=",
"{",
":weeks",
"=>",
"WEEK",
",",
":days",
"=>",
"DAY",
",",
":hours",
"=>",
"HOUR",
",",
":minutes",
"=>",
"MINUTE",
"}",
"if",
"[",
":weeks",
",",
":days",
",",
":hours",
",",
":minutes",
"]",
".",
"include?",
"part",
"__send__",
"(",
"part",
")",
"*",
"h",
"[",
"part",
"]",
"else",
"@seconds",
"end",
"end"
] | Get the number of seconds of a given part, or simply just get the number of
seconds.
*Example*
d = Duration.new(:weeks => 1, :days => 1, :hours => 1, :seconds => 30)
=> #<Duration: 1 week, 1 day, 1 hour and 30 seconds>
d.seconds(:weeks)
=> 604800
d.seconds(:days)
=> 86400
d.seconds(:hours)
=> 3600
d.seconds
=> 30 | [
"Get",
"the",
"number",
"of",
"seconds",
"of",
"a",
"given",
"part",
"or",
"simply",
"just",
"get",
"the",
"number",
"of",
"seconds",
"."
] | c92bec173fc63798013defdd9a1727b0d1d65d46 | https://github.com/rubyworks/richunits/blob/c92bec173fc63798013defdd9a1727b0d1d65d46/work/deprecated/duration.rb#L134-L143 | train |
rubyworks/richunits | work/deprecated/duration.rb | RichUnits.Duration.to_s | def to_s
str = ''
each do |part, time|
# Skip any zero times.
next if time.zero?
# Concatenate the part of the time and the time itself.
str << "#{time} #{time == 1 ? part[0..-2] : part}, "
end
str.chomp(', ').sub(/(.+), (.+)/, '\1 and \2')
end | ruby | def to_s
str = ''
each do |part, time|
# Skip any zero times.
next if time.zero?
# Concatenate the part of the time and the time itself.
str << "#{time} #{time == 1 ? part[0..-2] : part}, "
end
str.chomp(', ').sub(/(.+), (.+)/, '\1 and \2')
end | [
"def",
"to_s",
"str",
"=",
"''",
"each",
"do",
"|",
"part",
",",
"time",
"|",
"next",
"if",
"time",
".",
"zero?",
"str",
"<<",
"\"#{time} #{time == 1 ? part[0..-2] : part}, \"",
"end",
"str",
".",
"chomp",
"(",
"', '",
")",
".",
"sub",
"(",
"/",
"/",
",",
"'\\1 and \\2'",
")",
"end"
] | Friendly, human-readable string representation of the duration.
*Example*
d = Duration.new(:seconds => 140)
=> #<Duration: 2 minutes and 20 seconds>
d.to_s
=> "2 minutes and 20 seconds" | [
"Friendly",
"human",
"-",
"readable",
"string",
"representation",
"of",
"the",
"duration",
"."
] | c92bec173fc63798013defdd9a1727b0d1d65d46 | https://github.com/rubyworks/richunits/blob/c92bec173fc63798013defdd9a1727b0d1d65d46/work/deprecated/duration.rb#L258-L270 | train |
syborg/mme_tools | lib/mme_tools/config.rb | MMETools.Config.dump | def dump(filename)
File.open(filename,'w') do |f|
YAML.dump(self.to_hash,f)
end
end | ruby | def dump(filename)
File.open(filename,'w') do |f|
YAML.dump(self.to_hash,f)
end
end | [
"def",
"dump",
"(",
"filename",
")",
"File",
".",
"open",
"(",
"filename",
",",
"'w'",
")",
"do",
"|",
"f",
"|",
"YAML",
".",
"dump",
"(",
"self",
".",
"to_hash",
",",
"f",
")",
"end",
"end"
] | saves configuration into a _yaml_ file named +filename+ | [
"saves",
"configuration",
"into",
"a",
"_yaml_",
"file",
"named",
"+",
"filename",
"+"
] | e93919f7fcfb408b941d6144290991a7feabaa7d | https://github.com/syborg/mme_tools/blob/e93919f7fcfb408b941d6144290991a7feabaa7d/lib/mme_tools/config.rb#L101-L105 | train |
OHSU-FM/reindeer-etl | lib/reindeer-etl/sources/multi_source.rb | ReindeerETL::Sources.MultiSource.each | def each
rows = []
all_keys = Set.new
@sources.each_with_index do |source, source_idx|
first_row = false
source.each do |row|
unless row.keys.include? @key
raise ReindeerETL::Errors::RecordInvalid.new("Path#1 missing key: #{@key}")
end
if source_idx == 0 # first source
rows << row
else
source_targets = @target_cols[source_idx - 1] unless @target_cols.nil?
rindex = rows.index{|r| r[@key] == row[@key] }
if rindex.nil?
if @expect_full_match
raise ReindeerETL::Errors::RecordInvalid.new("Expected full match")
else
next
end
end
if source_targets.nil? or source_targets.empty?
rows[rindex] = rows[rindex].merge(row)
else
source_targets.each_with_index do |tar, sidx|
underscored_tar = h_underscore_string tar
if row.keys.map {|k| k[h_regex, 1] }.include? underscored_tar
k = row.keys.select{|k| k[h_regex, 1] == underscored_tar }.first
hash = h_hash_maker tar, row[k]
rows[rindex].merge!(hash)
else
val = Object
.const_get("ReindeerETL::Mods::#{@namespace}::#{tar}")
.get(row)
rows[rindex].merge!(h_hash_maker(tar, val))
end
end
end
end
end
end
rows.each {|r| yield r}
end | ruby | def each
rows = []
all_keys = Set.new
@sources.each_with_index do |source, source_idx|
first_row = false
source.each do |row|
unless row.keys.include? @key
raise ReindeerETL::Errors::RecordInvalid.new("Path#1 missing key: #{@key}")
end
if source_idx == 0 # first source
rows << row
else
source_targets = @target_cols[source_idx - 1] unless @target_cols.nil?
rindex = rows.index{|r| r[@key] == row[@key] }
if rindex.nil?
if @expect_full_match
raise ReindeerETL::Errors::RecordInvalid.new("Expected full match")
else
next
end
end
if source_targets.nil? or source_targets.empty?
rows[rindex] = rows[rindex].merge(row)
else
source_targets.each_with_index do |tar, sidx|
underscored_tar = h_underscore_string tar
if row.keys.map {|k| k[h_regex, 1] }.include? underscored_tar
k = row.keys.select{|k| k[h_regex, 1] == underscored_tar }.first
hash = h_hash_maker tar, row[k]
rows[rindex].merge!(hash)
else
val = Object
.const_get("ReindeerETL::Mods::#{@namespace}::#{tar}")
.get(row)
rows[rindex].merge!(h_hash_maker(tar, val))
end
end
end
end
end
end
rows.each {|r| yield r}
end | [
"def",
"each",
"rows",
"=",
"[",
"]",
"all_keys",
"=",
"Set",
".",
"new",
"@sources",
".",
"each_with_index",
"do",
"|",
"source",
",",
"source_idx",
"|",
"first_row",
"=",
"false",
"source",
".",
"each",
"do",
"|",
"row",
"|",
"unless",
"row",
".",
"keys",
".",
"include?",
"@key",
"raise",
"ReindeerETL",
"::",
"Errors",
"::",
"RecordInvalid",
".",
"new",
"(",
"\"Path#1 missing key: #{@key}\"",
")",
"end",
"if",
"source_idx",
"==",
"0",
"rows",
"<<",
"row",
"else",
"source_targets",
"=",
"@target_cols",
"[",
"source_idx",
"-",
"1",
"]",
"unless",
"@target_cols",
".",
"nil?",
"rindex",
"=",
"rows",
".",
"index",
"{",
"|",
"r",
"|",
"r",
"[",
"@key",
"]",
"==",
"row",
"[",
"@key",
"]",
"}",
"if",
"rindex",
".",
"nil?",
"if",
"@expect_full_match",
"raise",
"ReindeerETL",
"::",
"Errors",
"::",
"RecordInvalid",
".",
"new",
"(",
"\"Expected full match\"",
")",
"else",
"next",
"end",
"end",
"if",
"source_targets",
".",
"nil?",
"or",
"source_targets",
".",
"empty?",
"rows",
"[",
"rindex",
"]",
"=",
"rows",
"[",
"rindex",
"]",
".",
"merge",
"(",
"row",
")",
"else",
"source_targets",
".",
"each_with_index",
"do",
"|",
"tar",
",",
"sidx",
"|",
"underscored_tar",
"=",
"h_underscore_string",
"tar",
"if",
"row",
".",
"keys",
".",
"map",
"{",
"|",
"k",
"|",
"k",
"[",
"h_regex",
",",
"1",
"]",
"}",
".",
"include?",
"underscored_tar",
"k",
"=",
"row",
".",
"keys",
".",
"select",
"{",
"|",
"k",
"|",
"k",
"[",
"h_regex",
",",
"1",
"]",
"==",
"underscored_tar",
"}",
".",
"first",
"hash",
"=",
"h_hash_maker",
"tar",
",",
"row",
"[",
"k",
"]",
"rows",
"[",
"rindex",
"]",
".",
"merge!",
"(",
"hash",
")",
"else",
"val",
"=",
"Object",
".",
"const_get",
"(",
"\"ReindeerETL::Mods::#{@namespace}::#{tar}\"",
")",
".",
"get",
"(",
"row",
")",
"rows",
"[",
"rindex",
"]",
".",
"merge!",
"(",
"h_hash_maker",
"(",
"tar",
",",
"val",
")",
")",
"end",
"end",
"end",
"end",
"end",
"end",
"rows",
".",
"each",
"{",
"|",
"r",
"|",
"yield",
"r",
"}",
"end"
] | helper methods have h_ prefix
@param key [String] col name (present in all sources) to join on
@param paths [Array[String]] list of file paths. note: order is important
@param klass [String] namespaced class name of ReindeerETL source
@param path_opts [Array[Hash]] list of hashes (count equal to the
number of sources) containing opts for each source. opt format is
determined by the expectations of the source class. order is the
same as the @paths list
@param expect_full_match [Boolean] true if every row in first
source is expected to be matched in every other source
@param target_cols [Array[Array[String]]] Optional list of lists of
column string names to be appended to initial source. order of
outer list designates which source the internal cols come from.
all cols are always added from first source, so
target_cols.count == paths.count - 1
@param namespace [String] module where methods to get target_cols reside | [
"helper",
"methods",
"have",
"h_",
"prefix"
] | bff48c999b17850681346d500f2a05900252e21f | https://github.com/OHSU-FM/reindeer-etl/blob/bff48c999b17850681346d500f2a05900252e21f/lib/reindeer-etl/sources/multi_source.rb#L36-L81 | train |
akerl/logcabin | lib/logcabin/setcollection.rb | LogCabin.SetCollection.find | def find(name)
cache(name) { @children.find { |x| safe_find(x, name) } || failure }
end | ruby | def find(name)
cache(name) { @children.find { |x| safe_find(x, name) } || failure }
end | [
"def",
"find",
"(",
"name",
")",
"cache",
"(",
"name",
")",
"{",
"@children",
".",
"find",
"{",
"|",
"x",
"|",
"safe_find",
"(",
"x",
",",
"name",
")",
"}",
"||",
"failure",
"}",
"end"
] | Method for finding modules to load | [
"Method",
"for",
"finding",
"modules",
"to",
"load"
] | a0c793f4047f3a80fd232c582ecce55139092b8e | https://github.com/akerl/logcabin/blob/a0c793f4047f3a80fd232c582ecce55139092b8e/lib/logcabin/setcollection.rb#L13-L15 | train |
knuedge/off_the_grid | lib/off_the_grid/user.rb | OffTheGrid.User.add | def add
Tempfile.open do |tmpfile|
tmpfile.puts render(Templates::User::ERB)
tmpfile.flush
system("qconf -Auser #{tmpfile.path}")
sleep 5
end
end | ruby | def add
Tempfile.open do |tmpfile|
tmpfile.puts render(Templates::User::ERB)
tmpfile.flush
system("qconf -Auser #{tmpfile.path}")
sleep 5
end
end | [
"def",
"add",
"Tempfile",
".",
"open",
"do",
"|",
"tmpfile",
"|",
"tmpfile",
".",
"puts",
"render",
"(",
"Templates",
"::",
"User",
"::",
"ERB",
")",
"tmpfile",
".",
"flush",
"system",
"(",
"\"qconf -Auser #{tmpfile.path}\"",
")",
"sleep",
"5",
"end",
"end"
] | Add an SGE user | [
"Add",
"an",
"SGE",
"user"
] | cf367b6d22de5c73da2e2550e1f45e103a219a51 | https://github.com/knuedge/off_the_grid/blob/cf367b6d22de5c73da2e2550e1f45e103a219a51/lib/off_the_grid/user.rb#L49-L56 | train |
JotaSe/undecided | lib/undecided/decider.rb | Undecided.Decider.decide | def decide(rule, values, strict = true)
rule = rule.clone
values = values.clone
error unless Undecided::Evaluator.valid?(rule, values, strict)
# Sanitize data
# Eval rules and values after process it, with safe data
final_expression = Converter.replacing_variables(rule, values)
eval final_expression
rescue => e
puts e.message
error
end | ruby | def decide(rule, values, strict = true)
rule = rule.clone
values = values.clone
error unless Undecided::Evaluator.valid?(rule, values, strict)
# Sanitize data
# Eval rules and values after process it, with safe data
final_expression = Converter.replacing_variables(rule, values)
eval final_expression
rescue => e
puts e.message
error
end | [
"def",
"decide",
"(",
"rule",
",",
"values",
",",
"strict",
"=",
"true",
")",
"rule",
"=",
"rule",
".",
"clone",
"values",
"=",
"values",
".",
"clone",
"error",
"unless",
"Undecided",
"::",
"Evaluator",
".",
"valid?",
"(",
"rule",
",",
"values",
",",
"strict",
")",
"final_expression",
"=",
"Converter",
".",
"replacing_variables",
"(",
"rule",
",",
"values",
")",
"eval",
"final_expression",
"rescue",
"=>",
"e",
"puts",
"e",
".",
"message",
"error",
"end"
] | Given a boolean expression and data to replace, return result | [
"Given",
"a",
"boolean",
"expression",
"and",
"data",
"to",
"replace",
"return",
"result"
] | 80255277d0aadb74e98835af01a3427e11c73649 | https://github.com/JotaSe/undecided/blob/80255277d0aadb74e98835af01a3427e11c73649/lib/undecided/decider.rb#L7-L18 | train |
xmatters/sensu-plugins-xmatters | lib/xmatters-sensu.rb | XMSensu.XMClient.get_default_properties | def get_default_properties(event)
client = event['client']
check = event['check']
{
server_name: client['name'],
server_ip: client['address'],
subscriptions: client['subscriptions'].join(';'),
environment: client['environment'],
check_name: check['name'],
check_command: check['command'],
check_output: check['output'],
timestamp: event['timestamp'].inspect
}
end | ruby | def get_default_properties(event)
client = event['client']
check = event['check']
{
server_name: client['name'],
server_ip: client['address'],
subscriptions: client['subscriptions'].join(';'),
environment: client['environment'],
check_name: check['name'],
check_command: check['command'],
check_output: check['output'],
timestamp: event['timestamp'].inspect
}
end | [
"def",
"get_default_properties",
"(",
"event",
")",
"client",
"=",
"event",
"[",
"'client'",
"]",
"check",
"=",
"event",
"[",
"'check'",
"]",
"{",
"server_name",
":",
"client",
"[",
"'name'",
"]",
",",
"server_ip",
":",
"client",
"[",
"'address'",
"]",
",",
"subscriptions",
":",
"client",
"[",
"'subscriptions'",
"]",
".",
"join",
"(",
"';'",
")",
",",
"environment",
":",
"client",
"[",
"'environment'",
"]",
",",
"check_name",
":",
"check",
"[",
"'name'",
"]",
",",
"check_command",
":",
"check",
"[",
"'command'",
"]",
",",
"check_output",
":",
"check",
"[",
"'output'",
"]",
",",
"timestamp",
":",
"event",
"[",
"'timestamp'",
"]",
".",
"inspect",
"}",
"end"
] | Gets a default set of properties from the event | [
"Gets",
"a",
"default",
"set",
"of",
"properties",
"from",
"the",
"event"
] | eb21b1aa6c9c5b31142dd596b01ebeade4f6638f | https://github.com/xmatters/sensu-plugins-xmatters/blob/eb21b1aa6c9c5b31142dd596b01ebeade4f6638f/lib/xmatters-sensu.rb#L46-L59 | train |
riddopic/garcun | lib/garcon/chef/secret_bag.rb | Garcon.SecretBag.data_bag_config_for | def data_bag_config_for(environment, source)
data_bag_item = encrypted_data_bag_for(environment, DATA_BAG)
if data_bag_item.has_key?(source)
data_bag_item[source]
elsif DATA_BAG == source
data_bag_item
else
{}
end
end | ruby | def data_bag_config_for(environment, source)
data_bag_item = encrypted_data_bag_for(environment, DATA_BAG)
if data_bag_item.has_key?(source)
data_bag_item[source]
elsif DATA_BAG == source
data_bag_item
else
{}
end
end | [
"def",
"data_bag_config_for",
"(",
"environment",
",",
"source",
")",
"data_bag_item",
"=",
"encrypted_data_bag_for",
"(",
"environment",
",",
"DATA_BAG",
")",
"if",
"data_bag_item",
".",
"has_key?",
"(",
"source",
")",
"data_bag_item",
"[",
"source",
"]",
"elsif",
"DATA_BAG",
"==",
"source",
"data_bag_item",
"else",
"{",
"}",
"end",
"end"
] | Loads the encrypted data bag item and returns credentials for the
environment or for a default key.
@param [String] environment
The environment
@param [String] source
The deployment source to load configuration for
@return [Chef::DataBagItem]
The data bag item | [
"Loads",
"the",
"encrypted",
"data",
"bag",
"item",
"and",
"returns",
"credentials",
"for",
"the",
"environment",
"or",
"for",
"a",
"default",
"key",
"."
] | c2409bd8cf9c14b967a719810dab5269d69b42de | https://github.com/riddopic/garcun/blob/c2409bd8cf9c14b967a719810dab5269d69b42de/lib/garcon/chef/secret_bag.rb#L129-L139 | train |
riddopic/garcun | lib/garcon/chef/secret_bag.rb | Garcon.SecretBag.encrypted_data_bag_for | def encrypted_data_bag_for(environment, data_bag)
@encrypted_data_bags = {} unless @encrypted_data_bags
if encrypted_data_bags[data_bag]
return get_from_data_bags_cache(data_bag)
else
data_bag_item = encrypted_data_bag_item(data_bag, environment)
data_bag_item ||= encrypted_data_bag_item(data_bag, WILDCARD)
data_bag_item ||= {}
@encrypted_data_bags[data_bag] = data_bag_item
return data_bag_item
end
end | ruby | def encrypted_data_bag_for(environment, data_bag)
@encrypted_data_bags = {} unless @encrypted_data_bags
if encrypted_data_bags[data_bag]
return get_from_data_bags_cache(data_bag)
else
data_bag_item = encrypted_data_bag_item(data_bag, environment)
data_bag_item ||= encrypted_data_bag_item(data_bag, WILDCARD)
data_bag_item ||= {}
@encrypted_data_bags[data_bag] = data_bag_item
return data_bag_item
end
end | [
"def",
"encrypted_data_bag_for",
"(",
"environment",
",",
"data_bag",
")",
"@encrypted_data_bags",
"=",
"{",
"}",
"unless",
"@encrypted_data_bags",
"if",
"encrypted_data_bags",
"[",
"data_bag",
"]",
"return",
"get_from_data_bags_cache",
"(",
"data_bag",
")",
"else",
"data_bag_item",
"=",
"encrypted_data_bag_item",
"(",
"data_bag",
",",
"environment",
")",
"data_bag_item",
"||=",
"encrypted_data_bag_item",
"(",
"data_bag",
",",
"WILDCARD",
")",
"data_bag_item",
"||=",
"{",
"}",
"@encrypted_data_bags",
"[",
"data_bag",
"]",
"=",
"data_bag_item",
"return",
"data_bag_item",
"end",
"end"
] | Looks for the given data bag in the cache and if not found, will load a
data bag item named for the chef_environment, or '_wildcard' value.
@param [String] environment
The environment.
@param [String] data_bag
The data bag to load.
@return [Chef::Mash]
The data bag item in Mash form. | [
"Looks",
"for",
"the",
"given",
"data",
"bag",
"in",
"the",
"cache",
"and",
"if",
"not",
"found",
"will",
"load",
"a",
"data",
"bag",
"item",
"named",
"for",
"the",
"chef_environment",
"or",
"_wildcard",
"value",
"."
] | c2409bd8cf9c14b967a719810dab5269d69b42de | https://github.com/riddopic/garcun/blob/c2409bd8cf9c14b967a719810dab5269d69b42de/lib/garcon/chef/secret_bag.rb#L153-L165 | train |
magiclabs/attachment_magic | lib/attachment_magic.rb | AttachmentMagic.ClassMethods.copy_to_temp_file | def copy_to_temp_file(file, temp_base_name)
Tempfile.new(temp_base_name, AttachmentMagic.tempfile_path).tap do |tmp|
tmp.close
FileUtils.cp file, tmp.path
end
end | ruby | def copy_to_temp_file(file, temp_base_name)
Tempfile.new(temp_base_name, AttachmentMagic.tempfile_path).tap do |tmp|
tmp.close
FileUtils.cp file, tmp.path
end
end | [
"def",
"copy_to_temp_file",
"(",
"file",
",",
"temp_base_name",
")",
"Tempfile",
".",
"new",
"(",
"temp_base_name",
",",
"AttachmentMagic",
".",
"tempfile_path",
")",
".",
"tap",
"do",
"|",
"tmp",
"|",
"tmp",
".",
"close",
"FileUtils",
".",
"cp",
"file",
",",
"tmp",
".",
"path",
"end",
"end"
] | Copies the given file path to a new tempfile, returning the closed tempfile. | [
"Copies",
"the",
"given",
"file",
"path",
"to",
"a",
"new",
"tempfile",
"returning",
"the",
"closed",
"tempfile",
"."
] | 98f2d897f108352e53a7b8d05a475111d1a6f2a1 | https://github.com/magiclabs/attachment_magic/blob/98f2d897f108352e53a7b8d05a475111d1a6f2a1/lib/attachment_magic.rb#L130-L135 | train |
magiclabs/attachment_magic | lib/attachment_magic.rb | AttachmentMagic.ClassMethods.write_to_temp_file | def write_to_temp_file(data, temp_base_name)
Tempfile.new(temp_base_name, AttachmentMagic.tempfile_path).tap do |tmp|
tmp.binmode
tmp.write data
tmp.close
end
end | ruby | def write_to_temp_file(data, temp_base_name)
Tempfile.new(temp_base_name, AttachmentMagic.tempfile_path).tap do |tmp|
tmp.binmode
tmp.write data
tmp.close
end
end | [
"def",
"write_to_temp_file",
"(",
"data",
",",
"temp_base_name",
")",
"Tempfile",
".",
"new",
"(",
"temp_base_name",
",",
"AttachmentMagic",
".",
"tempfile_path",
")",
".",
"tap",
"do",
"|",
"tmp",
"|",
"tmp",
".",
"binmode",
"tmp",
".",
"write",
"data",
"tmp",
".",
"close",
"end",
"end"
] | Writes the given data to a new tempfile, returning the closed tempfile. | [
"Writes",
"the",
"given",
"data",
"to",
"a",
"new",
"tempfile",
"returning",
"the",
"closed",
"tempfile",
"."
] | 98f2d897f108352e53a7b8d05a475111d1a6f2a1 | https://github.com/magiclabs/attachment_magic/blob/98f2d897f108352e53a7b8d05a475111d1a6f2a1/lib/attachment_magic.rb#L138-L144 | train |
magiclabs/attachment_magic | lib/attachment_magic.rb | AttachmentMagic.InstanceMethods.uploaded_data= | def uploaded_data=(file_data)
if file_data.respond_to?(:content_type)
return nil if file_data.size == 0
self.content_type = detect_mimetype(file_data)
self.filename = file_data.original_filename if respond_to?(:filename)
else
return nil if file_data.blank? || file_data['size'] == 0
self.content_type = file_data['content_type']
self.filename = file_data['filename']
file_data = file_data['tempfile']
end
if file_data.is_a?(StringIO)
file_data.rewind
set_temp_data file_data.read
else
self.temp_paths.unshift file_data.tempfile.path
end
end | ruby | def uploaded_data=(file_data)
if file_data.respond_to?(:content_type)
return nil if file_data.size == 0
self.content_type = detect_mimetype(file_data)
self.filename = file_data.original_filename if respond_to?(:filename)
else
return nil if file_data.blank? || file_data['size'] == 0
self.content_type = file_data['content_type']
self.filename = file_data['filename']
file_data = file_data['tempfile']
end
if file_data.is_a?(StringIO)
file_data.rewind
set_temp_data file_data.read
else
self.temp_paths.unshift file_data.tempfile.path
end
end | [
"def",
"uploaded_data",
"=",
"(",
"file_data",
")",
"if",
"file_data",
".",
"respond_to?",
"(",
":content_type",
")",
"return",
"nil",
"if",
"file_data",
".",
"size",
"==",
"0",
"self",
".",
"content_type",
"=",
"detect_mimetype",
"(",
"file_data",
")",
"self",
".",
"filename",
"=",
"file_data",
".",
"original_filename",
"if",
"respond_to?",
"(",
":filename",
")",
"else",
"return",
"nil",
"if",
"file_data",
".",
"blank?",
"||",
"file_data",
"[",
"'size'",
"]",
"==",
"0",
"self",
".",
"content_type",
"=",
"file_data",
"[",
"'content_type'",
"]",
"self",
".",
"filename",
"=",
"file_data",
"[",
"'filename'",
"]",
"file_data",
"=",
"file_data",
"[",
"'tempfile'",
"]",
"end",
"if",
"file_data",
".",
"is_a?",
"(",
"StringIO",
")",
"file_data",
".",
"rewind",
"set_temp_data",
"file_data",
".",
"read",
"else",
"self",
".",
"temp_paths",
".",
"unshift",
"file_data",
".",
"tempfile",
".",
"path",
"end",
"end"
] | This method handles the uploaded file object. If you set the field name to uploaded_data, you don't need
any special code in your controller.
<% form_for :attachment, :html => { :multipart => true } do |f| -%>
<p><%= f.file_field :uploaded_data %></p>
<p><%= submit_tag :Save %>
<% end -%>
@attachment = Attachment.create! params[:attachment] | [
"This",
"method",
"handles",
"the",
"uploaded",
"file",
"object",
".",
"If",
"you",
"set",
"the",
"field",
"name",
"to",
"uploaded_data",
"you",
"don",
"t",
"need",
"any",
"special",
"code",
"in",
"your",
"controller",
"."
] | 98f2d897f108352e53a7b8d05a475111d1a6f2a1 | https://github.com/magiclabs/attachment_magic/blob/98f2d897f108352e53a7b8d05a475111d1a6f2a1/lib/attachment_magic.rb#L189-L206 | train |
magiclabs/attachment_magic | lib/attachment_magic.rb | AttachmentMagic.InstanceMethods.attachment_attributes_valid? | def attachment_attributes_valid?
[:size, :content_type].each do |attr_name|
enum = attachment_options[attr_name]
errors.add attr_name, I18n.translate("activerecord.errors.messages.inclusion", attr_name => enum) unless enum.nil? || enum.include?(send(attr_name))
end
end | ruby | def attachment_attributes_valid?
[:size, :content_type].each do |attr_name|
enum = attachment_options[attr_name]
errors.add attr_name, I18n.translate("activerecord.errors.messages.inclusion", attr_name => enum) unless enum.nil? || enum.include?(send(attr_name))
end
end | [
"def",
"attachment_attributes_valid?",
"[",
":size",
",",
":content_type",
"]",
".",
"each",
"do",
"|",
"attr_name",
"|",
"enum",
"=",
"attachment_options",
"[",
"attr_name",
"]",
"errors",
".",
"add",
"attr_name",
",",
"I18n",
".",
"translate",
"(",
"\"activerecord.errors.messages.inclusion\"",
",",
"attr_name",
"=>",
"enum",
")",
"unless",
"enum",
".",
"nil?",
"||",
"enum",
".",
"include?",
"(",
"send",
"(",
"attr_name",
")",
")",
"end",
"end"
] | validates the size and content_type attributes according to the current model's options | [
"validates",
"the",
"size",
"and",
"content_type",
"attributes",
"according",
"to",
"the",
"current",
"model",
"s",
"options"
] | 98f2d897f108352e53a7b8d05a475111d1a6f2a1 | https://github.com/magiclabs/attachment_magic/blob/98f2d897f108352e53a7b8d05a475111d1a6f2a1/lib/attachment_magic.rb#L270-L275 | train |
chrisjones-tripletri/action_command | lib/action_command/pretty_print_log_action.rb | ActionCommand.PrettyPrintLogAction.execute_internal | def execute_internal(_result)
item = LogMessage.new
parser = LogParser.new(@source, @sequence)
sequences = {}
# keep track of sequences, and when you complete one, then print out the
# entire thing at once.
while parser.next(item)
if item.kind?(ActionCommand::LOG_KIND_COMMAND_OUTPUT) && item.root?
process_output(sequences, item)
else
process_other(sequences, item)
end
item = LogMessage.new
end
# print out any incomplete sequences
print_sequences(sequences)
end | ruby | def execute_internal(_result)
item = LogMessage.new
parser = LogParser.new(@source, @sequence)
sequences = {}
# keep track of sequences, and when you complete one, then print out the
# entire thing at once.
while parser.next(item)
if item.kind?(ActionCommand::LOG_KIND_COMMAND_OUTPUT) && item.root?
process_output(sequences, item)
else
process_other(sequences, item)
end
item = LogMessage.new
end
# print out any incomplete sequences
print_sequences(sequences)
end | [
"def",
"execute_internal",
"(",
"_result",
")",
"item",
"=",
"LogMessage",
".",
"new",
"parser",
"=",
"LogParser",
".",
"new",
"(",
"@source",
",",
"@sequence",
")",
"sequences",
"=",
"{",
"}",
"while",
"parser",
".",
"next",
"(",
"item",
")",
"if",
"item",
".",
"kind?",
"(",
"ActionCommand",
"::",
"LOG_KIND_COMMAND_OUTPUT",
")",
"&&",
"item",
".",
"root?",
"process_output",
"(",
"sequences",
",",
"item",
")",
"else",
"process_other",
"(",
"sequences",
",",
"item",
")",
"end",
"item",
"=",
"LogMessage",
".",
"new",
"end",
"print_sequences",
"(",
"sequences",
")",
"end"
] | Say hello to the specified person. | [
"Say",
"hello",
"to",
"the",
"specified",
"person",
"."
] | 9b9a8ba30e407ca6d88a62a164d1dc22ba149874 | https://github.com/chrisjones-tripletri/action_command/blob/9b9a8ba30e407ca6d88a62a164d1dc22ba149874/lib/action_command/pretty_print_log_action.rb#L28-L45 | train |
cloudhead/mutter | lib/mutter/mutterer.rb | Mutter.Mutterer.load | def load styles
styles += '.yml' unless styles =~ /\.ya?ml$/
styles = File.join(File.dirname(__FILE__), "styles", styles) unless File.exist? styles
YAML.load_file(styles).inject({}) do |h, (key, value)|
value = { :match => value['match'], :style => value['style'] }
h.merge key.to_sym => value
end
end | ruby | def load styles
styles += '.yml' unless styles =~ /\.ya?ml$/
styles = File.join(File.dirname(__FILE__), "styles", styles) unless File.exist? styles
YAML.load_file(styles).inject({}) do |h, (key, value)|
value = { :match => value['match'], :style => value['style'] }
h.merge key.to_sym => value
end
end | [
"def",
"load",
"styles",
"styles",
"+=",
"'.yml'",
"unless",
"styles",
"=~",
"/",
"\\.",
"/",
"styles",
"=",
"File",
".",
"join",
"(",
"File",
".",
"dirname",
"(",
"__FILE__",
")",
",",
"\"styles\"",
",",
"styles",
")",
"unless",
"File",
".",
"exist?",
"styles",
"YAML",
".",
"load_file",
"(",
"styles",
")",
".",
"inject",
"(",
"{",
"}",
")",
"do",
"|",
"h",
",",
"(",
"key",
",",
"value",
")",
"|",
"value",
"=",
"{",
":match",
"=>",
"value",
"[",
"'match'",
"]",
",",
":style",
"=>",
"value",
"[",
"'style'",
"]",
"}",
"h",
".",
"merge",
"key",
".",
"to_sym",
"=>",
"value",
"end",
"end"
] | Loads styles from a YAML style-sheet,
and converts the keys to symbols | [
"Loads",
"styles",
"from",
"a",
"YAML",
"style",
"-",
"sheet",
"and",
"converts",
"the",
"keys",
"to",
"symbols"
] | 08a422552027d5a7b30b60206384c11698cf903d | https://github.com/cloudhead/mutter/blob/08a422552027d5a7b30b60206384c11698cf903d/lib/mutter/mutterer.rb#L64-L71 | train |
cloudhead/mutter | lib/mutter/mutterer.rb | Mutter.Mutterer.unstyle | def unstyle msg
styles.map do |_,v|
v[:match]
end.flatten.inject(msg) do |m, tag|
m.gsub(tag, '')
end
end | ruby | def unstyle msg
styles.map do |_,v|
v[:match]
end.flatten.inject(msg) do |m, tag|
m.gsub(tag, '')
end
end | [
"def",
"unstyle",
"msg",
"styles",
".",
"map",
"do",
"|",
"_",
",",
"v",
"|",
"v",
"[",
":match",
"]",
"end",
".",
"flatten",
".",
"inject",
"(",
"msg",
")",
"do",
"|",
"m",
",",
"tag",
"|",
"m",
".",
"gsub",
"(",
"tag",
",",
"''",
")",
"end",
"end"
] | Remove all tags from string | [
"Remove",
"all",
"tags",
"from",
"string"
] | 08a422552027d5a7b30b60206384c11698cf903d | https://github.com/cloudhead/mutter/blob/08a422552027d5a7b30b60206384c11698cf903d/lib/mutter/mutterer.rb#L88-L94 | train |
cloudhead/mutter | lib/mutter/mutterer.rb | Mutter.Mutterer.write | def write str
self.class.stream.tap do |stream|
stream.write str
stream.flush
end ; nil
end | ruby | def write str
self.class.stream.tap do |stream|
stream.write str
stream.flush
end ; nil
end | [
"def",
"write",
"str",
"self",
".",
"class",
".",
"stream",
".",
"tap",
"do",
"|",
"stream",
"|",
"stream",
".",
"write",
"str",
"stream",
".",
"flush",
"end",
";",
"nil",
"end"
] | Write to the out stream, and flush it | [
"Write",
"to",
"the",
"out",
"stream",
"and",
"flush",
"it"
] | 08a422552027d5a7b30b60206384c11698cf903d | https://github.com/cloudhead/mutter/blob/08a422552027d5a7b30b60206384c11698cf903d/lib/mutter/mutterer.rb#L107-L112 | train |
cloudhead/mutter | lib/mutter/mutterer.rb | Mutter.Mutterer.stylize | def stylize string, styles = []
[styles].flatten.inject(string) do |str, style|
style = style.to_sym
if ANSI[:transforms].include? style
esc str, *ANSI[:transforms][style]
elsif ANSI[:colors].include? style
esc str, ANSI[:colors][style], ANSI[:colors][:reset]
else
stylize(str, @styles[style][:style])
end
end
end | ruby | def stylize string, styles = []
[styles].flatten.inject(string) do |str, style|
style = style.to_sym
if ANSI[:transforms].include? style
esc str, *ANSI[:transforms][style]
elsif ANSI[:colors].include? style
esc str, ANSI[:colors][style], ANSI[:colors][:reset]
else
stylize(str, @styles[style][:style])
end
end
end | [
"def",
"stylize",
"string",
",",
"styles",
"=",
"[",
"]",
"[",
"styles",
"]",
".",
"flatten",
".",
"inject",
"(",
"string",
")",
"do",
"|",
"str",
",",
"style",
"|",
"style",
"=",
"style",
".",
"to_sym",
"if",
"ANSI",
"[",
":transforms",
"]",
".",
"include?",
"style",
"esc",
"str",
",",
"*",
"ANSI",
"[",
":transforms",
"]",
"[",
"style",
"]",
"elsif",
"ANSI",
"[",
":colors",
"]",
".",
"include?",
"style",
"esc",
"str",
",",
"ANSI",
"[",
":colors",
"]",
"[",
"style",
"]",
",",
"ANSI",
"[",
":colors",
"]",
"[",
":reset",
"]",
"else",
"stylize",
"(",
"str",
",",
"@styles",
"[",
"style",
"]",
"[",
":style",
"]",
")",
"end",
"end",
"end"
] | Apply styles to a string
if the style is a default ANSI style, we add the start
and end sequence to the string.
if the style is a custom style, we recurse, sending
the list of ANSI styles contained in the custom style.
TODO: use ';' delimited codes instead of multiple \e sequences | [
"Apply",
"styles",
"to",
"a",
"string"
] | 08a422552027d5a7b30b60206384c11698cf903d | https://github.com/cloudhead/mutter/blob/08a422552027d5a7b30b60206384c11698cf903d/lib/mutter/mutterer.rb#L170-L181 | train |
danielpuglisi/seiten | lib/seiten/page.rb | Seiten.Page.parent_of? | def parent_of?(child)
page = self
if child
if page.id == child.parent_id
true
else
child.parent.nil? ? false : page.parent_of?(child.parent)
end
end
end | ruby | def parent_of?(child)
page = self
if child
if page.id == child.parent_id
true
else
child.parent.nil? ? false : page.parent_of?(child.parent)
end
end
end | [
"def",
"parent_of?",
"(",
"child",
")",
"page",
"=",
"self",
"if",
"child",
"if",
"page",
".",
"id",
"==",
"child",
".",
"parent_id",
"true",
"else",
"child",
".",
"parent",
".",
"nil?",
"?",
"false",
":",
"page",
".",
"parent_of?",
"(",
"child",
".",
"parent",
")",
"end",
"end",
"end"
] | true if child is children of page | [
"true",
"if",
"child",
"is",
"children",
"of",
"page"
] | fa23d9ec616a23c615b0bf4b358bb979ab849104 | https://github.com/danielpuglisi/seiten/blob/fa23d9ec616a23c615b0bf4b358bb979ab849104/lib/seiten/page.rb#L83-L92 | train |
danielpuglisi/seiten | lib/seiten/page.rb | Seiten.Page.active? | def active?(current_page)
if current_page
if id == current_page.id
true
elsif parent_of?(current_page)
true
else
false
end
end
end | ruby | def active?(current_page)
if current_page
if id == current_page.id
true
elsif parent_of?(current_page)
true
else
false
end
end
end | [
"def",
"active?",
"(",
"current_page",
")",
"if",
"current_page",
"if",
"id",
"==",
"current_page",
".",
"id",
"true",
"elsif",
"parent_of?",
"(",
"current_page",
")",
"true",
"else",
"false",
"end",
"end",
"end"
] | true if page is equal current_page or parent of current_page | [
"true",
"if",
"page",
"is",
"equal",
"current_page",
"or",
"parent",
"of",
"current_page"
] | fa23d9ec616a23c615b0bf4b358bb979ab849104 | https://github.com/danielpuglisi/seiten/blob/fa23d9ec616a23c615b0bf4b358bb979ab849104/lib/seiten/page.rb#L95-L105 | train |
knaveofdiamonds/sequel_load_data_infile | lib/sequel/load_data_infile.rb | Sequel.LoadDataInfile.load_infile_sql | def load_infile_sql(path, columns, options={})
replacement = opts[:insert_ignore] ? :ignore : :replace
options = {:update => replacement}.merge(options)
LoadDataInfileExpression.new(path,
opts[:from].first,
columns,
options).
to_sql(db)
end | ruby | def load_infile_sql(path, columns, options={})
replacement = opts[:insert_ignore] ? :ignore : :replace
options = {:update => replacement}.merge(options)
LoadDataInfileExpression.new(path,
opts[:from].first,
columns,
options).
to_sql(db)
end | [
"def",
"load_infile_sql",
"(",
"path",
",",
"columns",
",",
"options",
"=",
"{",
"}",
")",
"replacement",
"=",
"opts",
"[",
":insert_ignore",
"]",
"?",
":ignore",
":",
":replace",
"options",
"=",
"{",
":update",
"=>",
"replacement",
"}",
".",
"merge",
"(",
"options",
")",
"LoadDataInfileExpression",
".",
"new",
"(",
"path",
",",
"opts",
"[",
":from",
"]",
".",
"first",
",",
"columns",
",",
"options",
")",
".",
"to_sql",
"(",
"db",
")",
"end"
] | Returns the SQL for a LOAD DATA INFILE statement. | [
"Returns",
"the",
"SQL",
"for",
"a",
"LOAD",
"DATA",
"INFILE",
"statement",
"."
] | a9198d727b44289ae99d2eaaf4bd7ec032ef737a | https://github.com/knaveofdiamonds/sequel_load_data_infile/blob/a9198d727b44289ae99d2eaaf4bd7ec032ef737a/lib/sequel/load_data_infile.rb#L136-L144 | train |
dabassett/shibbolite | spec/support/features/session_helpers.rb | Features.SessionHelpers.sign_in_as | def sign_in_as(group)
FactoryGirl.create(:user, umbcusername: 'test_user', group: group)
page.driver.browser.process_and_follow_redirects(:get, '/shibbolite/login', {}, {'umbcusername' => 'test_user'})
end | ruby | def sign_in_as(group)
FactoryGirl.create(:user, umbcusername: 'test_user', group: group)
page.driver.browser.process_and_follow_redirects(:get, '/shibbolite/login', {}, {'umbcusername' => 'test_user'})
end | [
"def",
"sign_in_as",
"(",
"group",
")",
"FactoryGirl",
".",
"create",
"(",
":user",
",",
"umbcusername",
":",
"'test_user'",
",",
"group",
":",
"group",
")",
"page",
".",
"driver",
".",
"browser",
".",
"process_and_follow_redirects",
"(",
":get",
",",
"'/shibbolite/login'",
",",
"{",
"}",
",",
"{",
"'umbcusername'",
"=>",
"'test_user'",
"}",
")",
"end"
] | hacked login, but the alternative is
not having integration tests when
using a Shibboleth based auth | [
"hacked",
"login",
"but",
"the",
"alternative",
"is",
"not",
"having",
"integration",
"tests",
"when",
"using",
"a",
"Shibboleth",
"based",
"auth"
] | cbd679c88de4ab238c40029447715f6ff22f3f50 | https://github.com/dabassett/shibbolite/blob/cbd679c88de4ab238c40029447715f6ff22f3f50/spec/support/features/session_helpers.rb#L7-L10 | train |
arvicco/poster | lib/poster/site.rb | Poster.Site.connect | def connect uri
Faraday.new(:url => "#{uri.scheme}://#{uri.host}") do |faraday|
faraday.request :multipart
faraday.request :url_encoded
# faraday.use FaradayMiddleware::FollowRedirects, limit: 3
faraday.use :cookie_jar
faraday.response :logger # log requests to STDOUT
faraday.adapter Faraday.default_adapter # make requests with Net::HTTP
end
end | ruby | def connect uri
Faraday.new(:url => "#{uri.scheme}://#{uri.host}") do |faraday|
faraday.request :multipart
faraday.request :url_encoded
# faraday.use FaradayMiddleware::FollowRedirects, limit: 3
faraday.use :cookie_jar
faraday.response :logger # log requests to STDOUT
faraday.adapter Faraday.default_adapter # make requests with Net::HTTP
end
end | [
"def",
"connect",
"uri",
"Faraday",
".",
"new",
"(",
":url",
"=>",
"\"#{uri.scheme}://#{uri.host}\"",
")",
"do",
"|",
"faraday",
"|",
"faraday",
".",
"request",
":multipart",
"faraday",
".",
"request",
":url_encoded",
"faraday",
".",
"use",
":cookie_jar",
"faraday",
".",
"response",
":logger",
"faraday",
".",
"adapter",
"Faraday",
".",
"default_adapter",
"end",
"end"
] | Establish Faraday connection | [
"Establish",
"Faraday",
"connection"
] | a5f22f7cb02116ab4dc5b7f2bdb672306b3dac63 | https://github.com/arvicco/poster/blob/a5f22f7cb02116ab4dc5b7f2bdb672306b3dac63/lib/poster/site.rb#L20-L29 | train |
tnorthb/coinstack | lib/coinstack/printer.rb | Coinstack.Printer.pretty_print_user_list | def pretty_print_user_list(list)
total = 0
data = []
# Header row
data.push('Asset', 'Total Value', 'Change % (Week)')
list.user_pairs.each do |user_pair|
data.push(user_pair.symbol)
data.push(user_pair.valuation.format)
data.push(user_pair.perchant_change_week.to_s)
total += user_pair.valuation
end
data.push('', '', '')
data.push('TOTAL:', total.format, '')
data.push('', '', '')
print_arrays(data, 3)
end | ruby | def pretty_print_user_list(list)
total = 0
data = []
# Header row
data.push('Asset', 'Total Value', 'Change % (Week)')
list.user_pairs.each do |user_pair|
data.push(user_pair.symbol)
data.push(user_pair.valuation.format)
data.push(user_pair.perchant_change_week.to_s)
total += user_pair.valuation
end
data.push('', '', '')
data.push('TOTAL:', total.format, '')
data.push('', '', '')
print_arrays(data, 3)
end | [
"def",
"pretty_print_user_list",
"(",
"list",
")",
"total",
"=",
"0",
"data",
"=",
"[",
"]",
"data",
".",
"push",
"(",
"'Asset'",
",",
"'Total Value'",
",",
"'Change % (Week)'",
")",
"list",
".",
"user_pairs",
".",
"each",
"do",
"|",
"user_pair",
"|",
"data",
".",
"push",
"(",
"user_pair",
".",
"symbol",
")",
"data",
".",
"push",
"(",
"user_pair",
".",
"valuation",
".",
"format",
")",
"data",
".",
"push",
"(",
"user_pair",
".",
"perchant_change_week",
".",
"to_s",
")",
"total",
"+=",
"user_pair",
".",
"valuation",
"end",
"data",
".",
"push",
"(",
"''",
",",
"''",
",",
"''",
")",
"data",
".",
"push",
"(",
"'TOTAL:'",
",",
"total",
".",
"format",
",",
"''",
")",
"data",
".",
"push",
"(",
"''",
",",
"''",
",",
"''",
")",
"print_arrays",
"(",
"data",
",",
"3",
")",
"end"
] | Prints out a summary of the user's hodlings formatted nicely | [
"Prints",
"out",
"a",
"summary",
"of",
"the",
"user",
"s",
"hodlings",
"formatted",
"nicely"
] | 1316fd069f502fa04fe15bc6ab7e63302aa29fd8 | https://github.com/tnorthb/coinstack/blob/1316fd069f502fa04fe15bc6ab7e63302aa29fd8/lib/coinstack/printer.rb#L13-L29 | train |
tnorthb/coinstack | lib/coinstack/printer.rb | Coinstack.Printer.print_arrays | def print_arrays(data, cols)
formatted_list = cli.list(data, :uneven_columns_across, cols)
cli.say(formatted_list)
end | ruby | def print_arrays(data, cols)
formatted_list = cli.list(data, :uneven_columns_across, cols)
cli.say(formatted_list)
end | [
"def",
"print_arrays",
"(",
"data",
",",
"cols",
")",
"formatted_list",
"=",
"cli",
".",
"list",
"(",
"data",
",",
":uneven_columns_across",
",",
"cols",
")",
"cli",
".",
"say",
"(",
"formatted_list",
")",
"end"
] | Data should be an array of arays, cols is the number of columns it has
Prints the data to screen with equal spacing between them | [
"Data",
"should",
"be",
"an",
"array",
"of",
"arays",
"cols",
"is",
"the",
"number",
"of",
"columns",
"it",
"has",
"Prints",
"the",
"data",
"to",
"screen",
"with",
"equal",
"spacing",
"between",
"them"
] | 1316fd069f502fa04fe15bc6ab7e63302aa29fd8 | https://github.com/tnorthb/coinstack/blob/1316fd069f502fa04fe15bc6ab7e63302aa29fd8/lib/coinstack/printer.rb#L33-L36 | train |
tnorthb/coinstack | lib/coinstack/printer.rb | Coinstack.Printer.array_char_length | def array_char_length(input_array)
length = 0
input_array.each do |a|
length += a.to_s.length
end
length
end | ruby | def array_char_length(input_array)
length = 0
input_array.each do |a|
length += a.to_s.length
end
length
end | [
"def",
"array_char_length",
"(",
"input_array",
")",
"length",
"=",
"0",
"input_array",
".",
"each",
"do",
"|",
"a",
"|",
"length",
"+=",
"a",
".",
"to_s",
".",
"length",
"end",
"length",
"end"
] | Returns the combined length of charaters in an array | [
"Returns",
"the",
"combined",
"length",
"of",
"charaters",
"in",
"an",
"array"
] | 1316fd069f502fa04fe15bc6ab7e63302aa29fd8 | https://github.com/tnorthb/coinstack/blob/1316fd069f502fa04fe15bc6ab7e63302aa29fd8/lib/coinstack/printer.rb#L43-L49 | train |
remote-exec/context-filters | lib/context-filters/filters/filters.rb | ContextFilters::Filters.Filters.select_filters | def select_filters(target, options)
found = filters_store.fetch(options, [])
if
Hash === options || options.nil?
then
options ||={}
options.merge!(:target => target)
found +=
# can not @filters.fetch(options, []) to allow filters provide custom ==()
filters_store.select do |filter_options, filters|
options == filter_options
end.map(&:last).flatten
end
found
end | ruby | def select_filters(target, options)
found = filters_store.fetch(options, [])
if
Hash === options || options.nil?
then
options ||={}
options.merge!(:target => target)
found +=
# can not @filters.fetch(options, []) to allow filters provide custom ==()
filters_store.select do |filter_options, filters|
options == filter_options
end.map(&:last).flatten
end
found
end | [
"def",
"select_filters",
"(",
"target",
",",
"options",
")",
"found",
"=",
"filters_store",
".",
"fetch",
"(",
"options",
",",
"[",
"]",
")",
"if",
"Hash",
"===",
"options",
"||",
"options",
".",
"nil?",
"then",
"options",
"||=",
"{",
"}",
"options",
".",
"merge!",
"(",
":target",
"=>",
"target",
")",
"found",
"+=",
"filters_store",
".",
"select",
"do",
"|",
"filter_options",
",",
"filters",
"|",
"options",
"==",
"filter_options",
"end",
".",
"map",
"(",
"&",
":last",
")",
".",
"flatten",
"end",
"found",
"end"
] | Select matching filters and filters including targets when
options is a +Hash+
@param target [Object] an object to run the method on
@param options [Object] a filter for selecting matching blocks | [
"Select",
"matching",
"filters",
"and",
"filters",
"including",
"targets",
"when",
"options",
"is",
"a",
"+",
"Hash",
"+"
] | 66b54fc6c46b224321713b608d70bba3afde9902 | https://github.com/remote-exec/context-filters/blob/66b54fc6c46b224321713b608d70bba3afde9902/lib/context-filters/filters/filters.rb#L54-L68 | train |
modernistik/parse-stack-async | lib/parse/stack/async.rb | Parse.Object.save_eventually | def save_eventually
block = block_given? ? Proc.new : nil
_self = self
Parse::Stack::Async.run do
begin
result = true
_self.save!
rescue => e
result = false
puts "[SaveEventually] Failed for object #{_self.parse_class}##{_self.id}: #{e}"
ensure
block.call(result) if block
block = nil
_self = nil
end # begin
end # do
end | ruby | def save_eventually
block = block_given? ? Proc.new : nil
_self = self
Parse::Stack::Async.run do
begin
result = true
_self.save!
rescue => e
result = false
puts "[SaveEventually] Failed for object #{_self.parse_class}##{_self.id}: #{e}"
ensure
block.call(result) if block
block = nil
_self = nil
end # begin
end # do
end | [
"def",
"save_eventually",
"block",
"=",
"block_given?",
"?",
"Proc",
".",
"new",
":",
"nil",
"_self",
"=",
"self",
"Parse",
"::",
"Stack",
"::",
"Async",
".",
"run",
"do",
"begin",
"result",
"=",
"true",
"_self",
".",
"save!",
"rescue",
"=>",
"e",
"result",
"=",
"false",
"puts",
"\"[SaveEventually] Failed for object #{_self.parse_class}##{_self.id}: #{e}\"",
"ensure",
"block",
".",
"call",
"(",
"result",
")",
"if",
"block",
"block",
"=",
"nil",
"_self",
"=",
"nil",
"end",
"end",
"end"
] | Adds support for saving a Parse object in the background.
@example
object.save_eventually do |success|
puts "Saved successfully" if success
end
@yield A block to call after the save has completed.
@yieldparam [Boolean] success whether the save was successful.
@return [Boolean] whether the job was enqueued. | [
"Adds",
"support",
"for",
"saving",
"a",
"Parse",
"object",
"in",
"the",
"background",
"."
] | 24f79f0d79c1f2d3f8c561242c4528ac878143a8 | https://github.com/modernistik/parse-stack-async/blob/24f79f0d79c1f2d3f8c561242c4528ac878143a8/lib/parse/stack/async.rb#L66-L82 | train |
modernistik/parse-stack-async | lib/parse/stack/async.rb | Parse.Object.destroy_eventually | def destroy_eventually
block = block_given? ? Proc.new : nil
_self = self
Parse::Stack::Async.run do
begin
result = true
_self.destroy
rescue => e
result = false
puts "[DestroyEventually] Failed for object #{_self.parse_class}##{_self.id}: #{e}"
ensure
block.call(result) if block
block = nil
_self = nil
end # begin
end # do
end | ruby | def destroy_eventually
block = block_given? ? Proc.new : nil
_self = self
Parse::Stack::Async.run do
begin
result = true
_self.destroy
rescue => e
result = false
puts "[DestroyEventually] Failed for object #{_self.parse_class}##{_self.id}: #{e}"
ensure
block.call(result) if block
block = nil
_self = nil
end # begin
end # do
end | [
"def",
"destroy_eventually",
"block",
"=",
"block_given?",
"?",
"Proc",
".",
"new",
":",
"nil",
"_self",
"=",
"self",
"Parse",
"::",
"Stack",
"::",
"Async",
".",
"run",
"do",
"begin",
"result",
"=",
"true",
"_self",
".",
"destroy",
"rescue",
"=>",
"e",
"result",
"=",
"false",
"puts",
"\"[DestroyEventually] Failed for object #{_self.parse_class}##{_self.id}: #{e}\"",
"ensure",
"block",
".",
"call",
"(",
"result",
")",
"if",
"block",
"block",
"=",
"nil",
"_self",
"=",
"nil",
"end",
"end",
"end"
] | save_eventually
Adds support for deleting a Parse object in the background.
@example
object.destroy_eventually do |success|
puts 'Deleted successfully' if success
end
@yield A block to call after the deletion has completed.
@yieldparam [Boolean] success whether the save was successful.'
@return [Boolean] whether the job was enqueued. | [
"save_eventually",
"Adds",
"support",
"for",
"deleting",
"a",
"Parse",
"object",
"in",
"the",
"background",
"."
] | 24f79f0d79c1f2d3f8c561242c4528ac878143a8 | https://github.com/modernistik/parse-stack-async/blob/24f79f0d79c1f2d3f8c561242c4528ac878143a8/lib/parse/stack/async.rb#L92-L108 | train |
Raybeam/myreplicator | lib/exporter/mysql_exporter.rb | Myreplicator.MysqlExporter.export_table | def export_table export_obj
@export_obj = export_obj
ExportMetadata.record(:table => @export_obj.table_name,
:database => @export_obj.source_schema,
:export_to => load_to,
:export_id => @export_obj.id,
:filepath => filepath,
:store_in => @export_obj.s3_path,
:incremental_col => @export_obj.incremental_column) do |metadata|
prepare metadata
if (@export_obj.export_type? == :new && load_to == "mysql") || load_to == "mysql"
on_failure_state_trans(metadata, "new") # If failed, go back to new
on_export_success(metadata)
initial_export metadata
elsif @export_obj.export_type? == :incremental || load_to == "vertica"
on_failure_state_trans(metadata, "failed") # Set state trans on failure
on_export_success(metadata)
incremental_export_into_outfile metadata
end
end # metadata
end | ruby | def export_table export_obj
@export_obj = export_obj
ExportMetadata.record(:table => @export_obj.table_name,
:database => @export_obj.source_schema,
:export_to => load_to,
:export_id => @export_obj.id,
:filepath => filepath,
:store_in => @export_obj.s3_path,
:incremental_col => @export_obj.incremental_column) do |metadata|
prepare metadata
if (@export_obj.export_type? == :new && load_to == "mysql") || load_to == "mysql"
on_failure_state_trans(metadata, "new") # If failed, go back to new
on_export_success(metadata)
initial_export metadata
elsif @export_obj.export_type? == :incremental || load_to == "vertica"
on_failure_state_trans(metadata, "failed") # Set state trans on failure
on_export_success(metadata)
incremental_export_into_outfile metadata
end
end # metadata
end | [
"def",
"export_table",
"export_obj",
"@export_obj",
"=",
"export_obj",
"ExportMetadata",
".",
"record",
"(",
":table",
"=>",
"@export_obj",
".",
"table_name",
",",
":database",
"=>",
"@export_obj",
".",
"source_schema",
",",
":export_to",
"=>",
"load_to",
",",
":export_id",
"=>",
"@export_obj",
".",
"id",
",",
":filepath",
"=>",
"filepath",
",",
":store_in",
"=>",
"@export_obj",
".",
"s3_path",
",",
":incremental_col",
"=>",
"@export_obj",
".",
"incremental_column",
")",
"do",
"|",
"metadata",
"|",
"prepare",
"metadata",
"if",
"(",
"@export_obj",
".",
"export_type?",
"==",
":new",
"&&",
"load_to",
"==",
"\"mysql\"",
")",
"||",
"load_to",
"==",
"\"mysql\"",
"on_failure_state_trans",
"(",
"metadata",
",",
"\"new\"",
")",
"on_export_success",
"(",
"metadata",
")",
"initial_export",
"metadata",
"elsif",
"@export_obj",
".",
"export_type?",
"==",
":incremental",
"||",
"load_to",
"==",
"\"vertica\"",
"on_failure_state_trans",
"(",
"metadata",
",",
"\"failed\"",
")",
"on_export_success",
"(",
"metadata",
")",
"incremental_export_into_outfile",
"metadata",
"end",
"end",
"end"
] | Gets an Export object and dumps the data
Initially using mysqldump
Incrementally using mysql -e afterwards | [
"Gets",
"an",
"Export",
"object",
"and",
"dumps",
"the",
"data",
"Initially",
"using",
"mysqldump",
"Incrementally",
"using",
"mysql",
"-",
"e",
"afterwards"
] | 470938e70f46886b525c65a4a464b4cf8383d00d | https://github.com/Raybeam/myreplicator/blob/470938e70f46886b525c65a4a464b4cf8383d00d/lib/exporter/mysql_exporter.rb#L13-L37 | train |
Raybeam/myreplicator | lib/exporter/mysql_exporter.rb | Myreplicator.MysqlExporter.initial_export | def initial_export metadata
metadata.export_type = "initial"
max_value = @export_obj.max_value if @export_obj.incremental_export?
cmd = initial_mysqldump_cmd
exporting_state_trans # mark exporting
puts "Exporting..."
result = execute_export(cmd, metadata)
check_result(result, 0)
@export_obj.update_max_val(max_value) if @export_obj.incremental_export?
end | ruby | def initial_export metadata
metadata.export_type = "initial"
max_value = @export_obj.max_value if @export_obj.incremental_export?
cmd = initial_mysqldump_cmd
exporting_state_trans # mark exporting
puts "Exporting..."
result = execute_export(cmd, metadata)
check_result(result, 0)
@export_obj.update_max_val(max_value) if @export_obj.incremental_export?
end | [
"def",
"initial_export",
"metadata",
"metadata",
".",
"export_type",
"=",
"\"initial\"",
"max_value",
"=",
"@export_obj",
".",
"max_value",
"if",
"@export_obj",
".",
"incremental_export?",
"cmd",
"=",
"initial_mysqldump_cmd",
"exporting_state_trans",
"puts",
"\"Exporting...\"",
"result",
"=",
"execute_export",
"(",
"cmd",
",",
"metadata",
")",
"check_result",
"(",
"result",
",",
"0",
")",
"@export_obj",
".",
"update_max_val",
"(",
"max_value",
")",
"if",
"@export_obj",
".",
"incremental_export?",
"end"
] | Exports Table using mysqldump. This method is invoked only once.
Dumps with create options, no need to create table manaully | [
"Exports",
"Table",
"using",
"mysqldump",
".",
"This",
"method",
"is",
"invoked",
"only",
"once",
".",
"Dumps",
"with",
"create",
"options",
"no",
"need",
"to",
"create",
"table",
"manaully"
] | 470938e70f46886b525c65a4a464b4cf8383d00d | https://github.com/Raybeam/myreplicator/blob/470938e70f46886b525c65a4a464b4cf8383d00d/lib/exporter/mysql_exporter.rb#L72-L83 | train |
Raybeam/myreplicator | lib/exporter/mysql_exporter.rb | Myreplicator.MysqlExporter.incremental_export_into_outfile | def incremental_export_into_outfile metadata
unless @export_obj.is_running?
if @export_obj.export_type == "incremental"
max_value = @export_obj.max_value
metadata.export_type = "incremental"
@export_obj.update_max_val if @export_obj.max_incremental_value.blank?
end
if (@export_obj.export_type == "all" && @export_obj.export_to == "vertica")
metadata.export_type = "incremental"
end
options = {
:db => @export_obj.source_schema,
:source_schema => @export_obj.source_schema,
:table => @export_obj.table_name,
:filepath => filepath,
:destination_schema => @export_obj.destination_schema,
:enclosed_by => Myreplicator.configs[@export_obj.source_schema]["enclosed_by"],
:export_id => @export_obj.id
}
schema_status = Myreplicator::MysqlExporter.schema_changed?(options)
Kernel.p "===== schema_status ====="
Kernel.p schema_status
if schema_status[:changed] # && new?
metadata.export_type = "initial"
else
options[:incremental_col] = @export_obj.incremental_column
options[:incremental_col_type] = @export_obj.incremental_column_type
options[:export_type] = @export_obj.export_type
options[:incremental_val] = [@export_obj.destination_max_incremental_value, @export_obj.max_incremental_value].min
#options[:incremental_val] = @export_obj.max_incremental_value
end
#Kernel.p "===== incremental_export_into_outfile OPTIONS ====="
#Kernel.p options
cmd = SqlCommands.mysql_export_outfile(options)
#Kernel.p "===== incremental_export_into_outfile CMD ====="
#puts cmd
exporting_state_trans
puts "Exporting..."
result = execute_export(cmd, metadata)
check_result(result, 0)
if @export_obj.export_type == "incremental"
metadata.incremental_val = max_value # store max val in metadata
@export_obj.update_max_val(max_value) # update max value if export was successful
end
end
return false
end | ruby | def incremental_export_into_outfile metadata
unless @export_obj.is_running?
if @export_obj.export_type == "incremental"
max_value = @export_obj.max_value
metadata.export_type = "incremental"
@export_obj.update_max_val if @export_obj.max_incremental_value.blank?
end
if (@export_obj.export_type == "all" && @export_obj.export_to == "vertica")
metadata.export_type = "incremental"
end
options = {
:db => @export_obj.source_schema,
:source_schema => @export_obj.source_schema,
:table => @export_obj.table_name,
:filepath => filepath,
:destination_schema => @export_obj.destination_schema,
:enclosed_by => Myreplicator.configs[@export_obj.source_schema]["enclosed_by"],
:export_id => @export_obj.id
}
schema_status = Myreplicator::MysqlExporter.schema_changed?(options)
Kernel.p "===== schema_status ====="
Kernel.p schema_status
if schema_status[:changed] # && new?
metadata.export_type = "initial"
else
options[:incremental_col] = @export_obj.incremental_column
options[:incremental_col_type] = @export_obj.incremental_column_type
options[:export_type] = @export_obj.export_type
options[:incremental_val] = [@export_obj.destination_max_incremental_value, @export_obj.max_incremental_value].min
#options[:incremental_val] = @export_obj.max_incremental_value
end
#Kernel.p "===== incremental_export_into_outfile OPTIONS ====="
#Kernel.p options
cmd = SqlCommands.mysql_export_outfile(options)
#Kernel.p "===== incremental_export_into_outfile CMD ====="
#puts cmd
exporting_state_trans
puts "Exporting..."
result = execute_export(cmd, metadata)
check_result(result, 0)
if @export_obj.export_type == "incremental"
metadata.incremental_val = max_value # store max val in metadata
@export_obj.update_max_val(max_value) # update max value if export was successful
end
end
return false
end | [
"def",
"incremental_export_into_outfile",
"metadata",
"unless",
"@export_obj",
".",
"is_running?",
"if",
"@export_obj",
".",
"export_type",
"==",
"\"incremental\"",
"max_value",
"=",
"@export_obj",
".",
"max_value",
"metadata",
".",
"export_type",
"=",
"\"incremental\"",
"@export_obj",
".",
"update_max_val",
"if",
"@export_obj",
".",
"max_incremental_value",
".",
"blank?",
"end",
"if",
"(",
"@export_obj",
".",
"export_type",
"==",
"\"all\"",
"&&",
"@export_obj",
".",
"export_to",
"==",
"\"vertica\"",
")",
"metadata",
".",
"export_type",
"=",
"\"incremental\"",
"end",
"options",
"=",
"{",
":db",
"=>",
"@export_obj",
".",
"source_schema",
",",
":source_schema",
"=>",
"@export_obj",
".",
"source_schema",
",",
":table",
"=>",
"@export_obj",
".",
"table_name",
",",
":filepath",
"=>",
"filepath",
",",
":destination_schema",
"=>",
"@export_obj",
".",
"destination_schema",
",",
":enclosed_by",
"=>",
"Myreplicator",
".",
"configs",
"[",
"@export_obj",
".",
"source_schema",
"]",
"[",
"\"enclosed_by\"",
"]",
",",
":export_id",
"=>",
"@export_obj",
".",
"id",
"}",
"schema_status",
"=",
"Myreplicator",
"::",
"MysqlExporter",
".",
"schema_changed?",
"(",
"options",
")",
"Kernel",
".",
"p",
"\"===== schema_status =====\"",
"Kernel",
".",
"p",
"schema_status",
"if",
"schema_status",
"[",
":changed",
"]",
"metadata",
".",
"export_type",
"=",
"\"initial\"",
"else",
"options",
"[",
":incremental_col",
"]",
"=",
"@export_obj",
".",
"incremental_column",
"options",
"[",
":incremental_col_type",
"]",
"=",
"@export_obj",
".",
"incremental_column_type",
"options",
"[",
":export_type",
"]",
"=",
"@export_obj",
".",
"export_type",
"options",
"[",
":incremental_val",
"]",
"=",
"[",
"@export_obj",
".",
"destination_max_incremental_value",
",",
"@export_obj",
".",
"max_incremental_value",
"]",
".",
"min",
"end",
"cmd",
"=",
"SqlCommands",
".",
"mysql_export_outfile",
"(",
"options",
")",
"exporting_state_trans",
"puts",
"\"Exporting...\"",
"result",
"=",
"execute_export",
"(",
"cmd",
",",
"metadata",
")",
"check_result",
"(",
"result",
",",
"0",
")",
"if",
"@export_obj",
".",
"export_type",
"==",
"\"incremental\"",
"metadata",
".",
"incremental_val",
"=",
"max_value",
"@export_obj",
".",
"update_max_val",
"(",
"max_value",
")",
"end",
"end",
"return",
"false",
"end"
] | Exports table incrementally, similar to incremental_export method
Dumps file in tmp directory specified in myreplicator.yml
Note that directory needs 777 permissions for mysql to be able to export the file
Uses \\0 as the delimiter and new line for lines | [
"Exports",
"table",
"incrementally",
"similar",
"to",
"incremental_export",
"method",
"Dumps",
"file",
"in",
"tmp",
"directory",
"specified",
"in",
"myreplicator",
".",
"yml",
"Note",
"that",
"directory",
"needs",
"777",
"permissions",
"for",
"mysql",
"to",
"be",
"able",
"to",
"export",
"the",
"file",
"Uses",
"\\\\",
"0",
"as",
"the",
"delimiter",
"and",
"new",
"line",
"for",
"lines"
] | 470938e70f46886b525c65a4a464b4cf8383d00d | https://github.com/Raybeam/myreplicator/blob/470938e70f46886b525c65a4a464b4cf8383d00d/lib/exporter/mysql_exporter.rb#L110-L162 | train |
Raybeam/myreplicator | lib/exporter/mysql_exporter.rb | Myreplicator.MysqlExporter.check_result | def check_result result, size
unless result.nil?
raise Exceptions::ExportError.new("Export Error\n#{result}") if result.length > 0
end
end | ruby | def check_result result, size
unless result.nil?
raise Exceptions::ExportError.new("Export Error\n#{result}") if result.length > 0
end
end | [
"def",
"check_result",
"result",
",",
"size",
"unless",
"result",
".",
"nil?",
"raise",
"Exceptions",
"::",
"ExportError",
".",
"new",
"(",
"\"Export Error\\n#{result}\"",
")",
"if",
"result",
".",
"length",
">",
"0",
"end",
"end"
] | Checks the returned resut from SSH CMD
Size specifies if there should be any returned results or not | [
"Checks",
"the",
"returned",
"resut",
"from",
"SSH",
"CMD",
"Size",
"specifies",
"if",
"there",
"should",
"be",
"any",
"returned",
"results",
"or",
"not"
] | 470938e70f46886b525c65a4a464b4cf8383d00d | https://github.com/Raybeam/myreplicator/blob/470938e70f46886b525c65a4a464b4cf8383d00d/lib/exporter/mysql_exporter.rb#L239-L243 | train |
Raybeam/myreplicator | lib/exporter/mysql_exporter.rb | Myreplicator.MysqlExporter.zipfile | def zipfile metadata
cmd = "cd #{Myreplicator.configs[@export_obj.source_schema]["ssh_tmp_dir"]}; gzip #{@export_obj.filename}"
puts cmd
zip_result = metadata.ssh.exec!(cmd)
unless zip_result.nil?
raise Exceptions::ExportError.new("Export Error\n#{zip_result}") if zip_result.length > 0
end
metadata.zipped = true
return zip_result
end | ruby | def zipfile metadata
cmd = "cd #{Myreplicator.configs[@export_obj.source_schema]["ssh_tmp_dir"]}; gzip #{@export_obj.filename}"
puts cmd
zip_result = metadata.ssh.exec!(cmd)
unless zip_result.nil?
raise Exceptions::ExportError.new("Export Error\n#{zip_result}") if zip_result.length > 0
end
metadata.zipped = true
return zip_result
end | [
"def",
"zipfile",
"metadata",
"cmd",
"=",
"\"cd #{Myreplicator.configs[@export_obj.source_schema][\"ssh_tmp_dir\"]}; gzip #{@export_obj.filename}\"",
"puts",
"cmd",
"zip_result",
"=",
"metadata",
".",
"ssh",
".",
"exec!",
"(",
"cmd",
")",
"unless",
"zip_result",
".",
"nil?",
"raise",
"Exceptions",
"::",
"ExportError",
".",
"new",
"(",
"\"Export Error\\n#{zip_result}\"",
")",
"if",
"zip_result",
".",
"length",
">",
"0",
"end",
"metadata",
".",
"zipped",
"=",
"true",
"return",
"zip_result",
"end"
] | zips the file on the source DB server | [
"zips",
"the",
"file",
"on",
"the",
"source",
"DB",
"server"
] | 470938e70f46886b525c65a4a464b4cf8383d00d | https://github.com/Raybeam/myreplicator/blob/470938e70f46886b525c65a4a464b4cf8383d00d/lib/exporter/mysql_exporter.rb#L262-L276 | train |
blotto/thermometer | lib/thermometer/configuration.rb | Thermometer.Configuration.load_time_ranges | def load_time_ranges
@time_ranges = ActiveSupport::HashWithIndifferentAccess.new
time_ranges = @config['time']
time_ranges.each do |t,r|
time_range = ActiveSupport::HashWithIndifferentAccess.new
src_ranges ||= r
src_ranges.map { |k,v| time_range[k.to_sym] = rangify_time_boundaries(v) }
@time_ranges[t.to_sym] = time_range
end
end | ruby | def load_time_ranges
@time_ranges = ActiveSupport::HashWithIndifferentAccess.new
time_ranges = @config['time']
time_ranges.each do |t,r|
time_range = ActiveSupport::HashWithIndifferentAccess.new
src_ranges ||= r
src_ranges.map { |k,v| time_range[k.to_sym] = rangify_time_boundaries(v) }
@time_ranges[t.to_sym] = time_range
end
end | [
"def",
"load_time_ranges",
"@time_ranges",
"=",
"ActiveSupport",
"::",
"HashWithIndifferentAccess",
".",
"new",
"time_ranges",
"=",
"@config",
"[",
"'time'",
"]",
"time_ranges",
".",
"each",
"do",
"|",
"t",
",",
"r",
"|",
"time_range",
"=",
"ActiveSupport",
"::",
"HashWithIndifferentAccess",
".",
"new",
"src_ranges",
"||=",
"r",
"src_ranges",
".",
"map",
"{",
"|",
"k",
",",
"v",
"|",
"time_range",
"[",
"k",
".",
"to_sym",
"]",
"=",
"rangify_time_boundaries",
"(",
"v",
")",
"}",
"@time_ranges",
"[",
"t",
".",
"to_sym",
"]",
"=",
"time_range",
"end",
"end"
] | Load ranges from config file | [
"Load",
"ranges",
"from",
"config",
"file"
] | bb436c4f3b2ebce23aa1ed51d551ab7a165aedfa | https://github.com/blotto/thermometer/blob/bb436c4f3b2ebce23aa1ed51d551ab7a165aedfa/lib/thermometer/configuration.rb#L95-L105 | train |
blotto/thermometer | lib/thermometer/configuration.rb | Thermometer.Configuration.rangify_time_boundaries | def rangify_time_boundaries(src)
src.split("..").inject{ |s,e| s.split(".").inject{|n,m| n.to_i.send(m)}..e.split(".").inject{|n,m| n.to_i.send(m) }}
end | ruby | def rangify_time_boundaries(src)
src.split("..").inject{ |s,e| s.split(".").inject{|n,m| n.to_i.send(m)}..e.split(".").inject{|n,m| n.to_i.send(m) }}
end | [
"def",
"rangify_time_boundaries",
"(",
"src",
")",
"src",
".",
"split",
"(",
"\"..\"",
")",
".",
"inject",
"{",
"|",
"s",
",",
"e",
"|",
"s",
".",
"split",
"(",
"\".\"",
")",
".",
"inject",
"{",
"|",
"n",
",",
"m",
"|",
"n",
".",
"to_i",
".",
"send",
"(",
"m",
")",
"}",
"..",
"e",
".",
"split",
"(",
"\".\"",
")",
".",
"inject",
"{",
"|",
"n",
",",
"m",
"|",
"n",
".",
"to_i",
".",
"send",
"(",
"m",
")",
"}",
"}",
"end"
] | Takes a string like "2.days..3.weeks"
and converts to Range object -> 2.days..3.weeks | [
"Takes",
"a",
"string",
"like",
"2",
".",
"days",
"..",
"3",
".",
"weeks",
"and",
"converts",
"to",
"Range",
"object",
"-",
">",
"2",
".",
"days",
"..",
"3",
".",
"weeks"
] | bb436c4f3b2ebce23aa1ed51d551ab7a165aedfa | https://github.com/blotto/thermometer/blob/bb436c4f3b2ebce23aa1ed51d551ab7a165aedfa/lib/thermometer/configuration.rb#L111-L113 | train |
sanctuarycomputer/appi | app/controllers/concerns/appi/handles_resources.rb | APPI.HandlesResources.klass_for_type | def klass_for_type(type, singular=false)
type = type.singularize unless singular
type.classify.constantize
end | ruby | def klass_for_type(type, singular=false)
type = type.singularize unless singular
type.classify.constantize
end | [
"def",
"klass_for_type",
"(",
"type",
",",
"singular",
"=",
"false",
")",
"type",
"=",
"type",
".",
"singularize",
"unless",
"singular",
"type",
".",
"classify",
".",
"constantize",
"end"
] | Resolves the Class for a type.
Params:
+type+:: +String+ A stringified type.
+singular+:: +Boolean+ Pass true if you don't want to singularize the type. | [
"Resolves",
"the",
"Class",
"for",
"a",
"type",
"."
] | 5a06f7c090e4fcaaba9060685fa6a6c7434e8436 | https://github.com/sanctuarycomputer/appi/blob/5a06f7c090e4fcaaba9060685fa6a6c7434e8436/app/controllers/concerns/appi/handles_resources.rb#L31-L34 | train |
sanctuarycomputer/appi | app/controllers/concerns/appi/handles_resources.rb | APPI.HandlesResources.resource_params | def resource_params
attributes = find_in_params(:attributes).try(:permit, permitted_attributes) || {}
relationships = {}
# Build Relationships Data
relationships_in_payload = find_in_params(:relationships)
if relationships_in_payload
raw_relationships = relationships_in_payload.clone
raw_relationships.each_key do |key|
data = raw_relationships.delete(key)[:data]
if permitted_relationships.include?(key.to_sym) && data
if data.kind_of?(Array)
relationships["#{key.singularize}_ids"] = extract_ids data
else
relationships["#{key}_id"] = extract_id data
end
end
end
end
attributes.merge relationships
end | ruby | def resource_params
attributes = find_in_params(:attributes).try(:permit, permitted_attributes) || {}
relationships = {}
# Build Relationships Data
relationships_in_payload = find_in_params(:relationships)
if relationships_in_payload
raw_relationships = relationships_in_payload.clone
raw_relationships.each_key do |key|
data = raw_relationships.delete(key)[:data]
if permitted_relationships.include?(key.to_sym) && data
if data.kind_of?(Array)
relationships["#{key.singularize}_ids"] = extract_ids data
else
relationships["#{key}_id"] = extract_id data
end
end
end
end
attributes.merge relationships
end | [
"def",
"resource_params",
"attributes",
"=",
"find_in_params",
"(",
":attributes",
")",
".",
"try",
"(",
":permit",
",",
"permitted_attributes",
")",
"||",
"{",
"}",
"relationships",
"=",
"{",
"}",
"relationships_in_payload",
"=",
"find_in_params",
"(",
":relationships",
")",
"if",
"relationships_in_payload",
"raw_relationships",
"=",
"relationships_in_payload",
".",
"clone",
"raw_relationships",
".",
"each_key",
"do",
"|",
"key",
"|",
"data",
"=",
"raw_relationships",
".",
"delete",
"(",
"key",
")",
"[",
":data",
"]",
"if",
"permitted_relationships",
".",
"include?",
"(",
"key",
".",
"to_sym",
")",
"&&",
"data",
"if",
"data",
".",
"kind_of?",
"(",
"Array",
")",
"relationships",
"[",
"\"#{key.singularize}_ids\"",
"]",
"=",
"extract_ids",
"data",
"else",
"relationships",
"[",
"\"#{key}_id\"",
"]",
"=",
"extract_id",
"data",
"end",
"end",
"end",
"end",
"attributes",
".",
"merge",
"relationships",
"end"
] | Builds a whitelisted resource_params hash from the permitted_attributes &
permitted_relationships arrays. Will automatically attempt to resolve
string IDs to numerical IDs, in the case the model's slug was passed to
the controller as ID. | [
"Builds",
"a",
"whitelisted",
"resource_params",
"hash",
"from",
"the",
"permitted_attributes",
"&",
"permitted_relationships",
"arrays",
".",
"Will",
"automatically",
"attempt",
"to",
"resolve",
"string",
"IDs",
"to",
"numerical",
"IDs",
"in",
"the",
"case",
"the",
"model",
"s",
"slug",
"was",
"passed",
"to",
"the",
"controller",
"as",
"ID",
"."
] | 5a06f7c090e4fcaaba9060685fa6a6c7434e8436 | https://github.com/sanctuarycomputer/appi/blob/5a06f7c090e4fcaaba9060685fa6a6c7434e8436/app/controllers/concerns/appi/handles_resources.rb#L108-L131 | train |
richard-viney/lightstreamer | lib/lightstreamer/stream_connection_header.rb | Lightstreamer.StreamConnectionHeader.process_line | def process_line(line)
@lines << line
return process_success if @lines.first == 'OK'
return process_error if @lines.first == 'ERROR'
return process_end if @lines.first == 'END'
return process_sync_error if @lines.first == 'SYNC ERROR'
process_unrecognized
end | ruby | def process_line(line)
@lines << line
return process_success if @lines.first == 'OK'
return process_error if @lines.first == 'ERROR'
return process_end if @lines.first == 'END'
return process_sync_error if @lines.first == 'SYNC ERROR'
process_unrecognized
end | [
"def",
"process_line",
"(",
"line",
")",
"@lines",
"<<",
"line",
"return",
"process_success",
"if",
"@lines",
".",
"first",
"==",
"'OK'",
"return",
"process_error",
"if",
"@lines",
".",
"first",
"==",
"'ERROR'",
"return",
"process_end",
"if",
"@lines",
".",
"first",
"==",
"'END'",
"return",
"process_sync_error",
"if",
"@lines",
".",
"first",
"==",
"'SYNC ERROR'",
"process_unrecognized",
"end"
] | Processes a single line of header information. The return value indicates whether further data is required in
order to complete the header.
@param [String] line The line of header data to process.
@return [Boolean] Whether the header is still incomplete and requires further data. | [
"Processes",
"a",
"single",
"line",
"of",
"header",
"information",
".",
"The",
"return",
"value",
"indicates",
"whether",
"further",
"data",
"is",
"required",
"in",
"order",
"to",
"complete",
"the",
"header",
"."
] | 7be6350bd861495a52ca35a8640a1e6df34cf9d1 | https://github.com/richard-viney/lightstreamer/blob/7be6350bd861495a52ca35a8640a1e6df34cf9d1/lib/lightstreamer/stream_connection_header.rb#L24-L33 | train |
vpacher/xpay | lib/xpay/transaction_query.rb | Xpay.TransactionQuery.create_request | def create_request
raise AttributeMissing.new "(2500) TransactionReference or OrderReference need to be present." if (transaction_reference.nil? && order_reference.nil?)
raise AttributeMissing.new "(2500) SiteReference must be present." if (site_reference.nil? && (REXML::XPath.first(@request_xml, "//SiteReference").text.blank? rescue true))
REXML::XPath.first(@request_xml, "//Request").attributes["Type"] = "TRANSACTIONQUERY"
ops = REXML::XPath.first(@request_xml, "//Operation")
["TermUrl", "MerchantName", "Currency", "SettlementDay"].each { |e| ops.delete_element e }
(ops.elements["SiteReference"] || ops.add_element("SiteReference")).text = self.site_reference if self.site_reference
(ops.elements["TransactionReference"] || ops.add_element("TransactionReference")).text = self.transaction_reference if self.transaction_reference
order = REXML::XPath.first(@request_xml, "//Operation")
(order.elements["OrderReference"] || order.add_element("OrderReference")).text = self.order_reference if self.order_reference
root = @request_xml.root
(root.elements["Certificate"] || root.add_element("Certificate")).text = self.site_alias if self.site_alias
end | ruby | def create_request
raise AttributeMissing.new "(2500) TransactionReference or OrderReference need to be present." if (transaction_reference.nil? && order_reference.nil?)
raise AttributeMissing.new "(2500) SiteReference must be present." if (site_reference.nil? && (REXML::XPath.first(@request_xml, "//SiteReference").text.blank? rescue true))
REXML::XPath.first(@request_xml, "//Request").attributes["Type"] = "TRANSACTIONQUERY"
ops = REXML::XPath.first(@request_xml, "//Operation")
["TermUrl", "MerchantName", "Currency", "SettlementDay"].each { |e| ops.delete_element e }
(ops.elements["SiteReference"] || ops.add_element("SiteReference")).text = self.site_reference if self.site_reference
(ops.elements["TransactionReference"] || ops.add_element("TransactionReference")).text = self.transaction_reference if self.transaction_reference
order = REXML::XPath.first(@request_xml, "//Operation")
(order.elements["OrderReference"] || order.add_element("OrderReference")).text = self.order_reference if self.order_reference
root = @request_xml.root
(root.elements["Certificate"] || root.add_element("Certificate")).text = self.site_alias if self.site_alias
end | [
"def",
"create_request",
"raise",
"AttributeMissing",
".",
"new",
"\"(2500) TransactionReference or OrderReference need to be present.\"",
"if",
"(",
"transaction_reference",
".",
"nil?",
"&&",
"order_reference",
".",
"nil?",
")",
"raise",
"AttributeMissing",
".",
"new",
"\"(2500) SiteReference must be present.\"",
"if",
"(",
"site_reference",
".",
"nil?",
"&&",
"(",
"REXML",
"::",
"XPath",
".",
"first",
"(",
"@request_xml",
",",
"\"//SiteReference\"",
")",
".",
"text",
".",
"blank?",
"rescue",
"true",
")",
")",
"REXML",
"::",
"XPath",
".",
"first",
"(",
"@request_xml",
",",
"\"//Request\"",
")",
".",
"attributes",
"[",
"\"Type\"",
"]",
"=",
"\"TRANSACTIONQUERY\"",
"ops",
"=",
"REXML",
"::",
"XPath",
".",
"first",
"(",
"@request_xml",
",",
"\"//Operation\"",
")",
"[",
"\"TermUrl\"",
",",
"\"MerchantName\"",
",",
"\"Currency\"",
",",
"\"SettlementDay\"",
"]",
".",
"each",
"{",
"|",
"e",
"|",
"ops",
".",
"delete_element",
"e",
"}",
"(",
"ops",
".",
"elements",
"[",
"\"SiteReference\"",
"]",
"||",
"ops",
".",
"add_element",
"(",
"\"SiteReference\"",
")",
")",
".",
"text",
"=",
"self",
".",
"site_reference",
"if",
"self",
".",
"site_reference",
"(",
"ops",
".",
"elements",
"[",
"\"TransactionReference\"",
"]",
"||",
"ops",
".",
"add_element",
"(",
"\"TransactionReference\"",
")",
")",
".",
"text",
"=",
"self",
".",
"transaction_reference",
"if",
"self",
".",
"transaction_reference",
"order",
"=",
"REXML",
"::",
"XPath",
".",
"first",
"(",
"@request_xml",
",",
"\"//Operation\"",
")",
"(",
"order",
".",
"elements",
"[",
"\"OrderReference\"",
"]",
"||",
"order",
".",
"add_element",
"(",
"\"OrderReference\"",
")",
")",
".",
"text",
"=",
"self",
".",
"order_reference",
"if",
"self",
".",
"order_reference",
"root",
"=",
"@request_xml",
".",
"root",
"(",
"root",
".",
"elements",
"[",
"\"Certificate\"",
"]",
"||",
"root",
".",
"add_element",
"(",
"\"Certificate\"",
")",
")",
".",
"text",
"=",
"self",
".",
"site_alias",
"if",
"self",
".",
"site_alias",
"end"
] | Write the xml document needed for processing, fill in elements need and delete unused ones from the root_xml
raises an error if any necessary elements are missing | [
"Write",
"the",
"xml",
"document",
"needed",
"for",
"processing",
"fill",
"in",
"elements",
"need",
"and",
"delete",
"unused",
"ones",
"from",
"the",
"root_xml",
"raises",
"an",
"error",
"if",
"any",
"necessary",
"elements",
"are",
"missing"
] | 58c0b0f2600ed30ff44b84f97b96c74590474f3f | https://github.com/vpacher/xpay/blob/58c0b0f2600ed30ff44b84f97b96c74590474f3f/lib/xpay/transaction_query.rb#L43-L55 | train |
subimage/cashboard-rb | lib/cashboard/time_entry.rb | Cashboard.TimeEntry.toggle_timer | def toggle_timer
options = self.class.merge_options()
options.merge!({:body => self.to_xml})
response = self.class.put(self.links[:toggle_timer], options)
# Raise special errors if not a success
self.class.check_status_code(response)
# Re-initialize ourselves with information from response
initialize(response.parsed_response)
if self.stopped_timer
stopped_timer = Cashboard::Struct.new(self.stopped_timer)
end
stopped_timer || nil
end | ruby | def toggle_timer
options = self.class.merge_options()
options.merge!({:body => self.to_xml})
response = self.class.put(self.links[:toggle_timer], options)
# Raise special errors if not a success
self.class.check_status_code(response)
# Re-initialize ourselves with information from response
initialize(response.parsed_response)
if self.stopped_timer
stopped_timer = Cashboard::Struct.new(self.stopped_timer)
end
stopped_timer || nil
end | [
"def",
"toggle_timer",
"options",
"=",
"self",
".",
"class",
".",
"merge_options",
"(",
")",
"options",
".",
"merge!",
"(",
"{",
":body",
"=>",
"self",
".",
"to_xml",
"}",
")",
"response",
"=",
"self",
".",
"class",
".",
"put",
"(",
"self",
".",
"links",
"[",
":toggle_timer",
"]",
",",
"options",
")",
"self",
".",
"class",
".",
"check_status_code",
"(",
"response",
")",
"initialize",
"(",
"response",
".",
"parsed_response",
")",
"if",
"self",
".",
"stopped_timer",
"stopped_timer",
"=",
"Cashboard",
"::",
"Struct",
".",
"new",
"(",
"self",
".",
"stopped_timer",
")",
"end",
"stopped_timer",
"||",
"nil",
"end"
] | readonly
Starts or stops timer depending on its current state.
Will return an object of Cashboard::Struct if another timer was stopped
during this toggle operation.
Will return nil if no timer was stopped. | [
"readonly",
"Starts",
"or",
"stops",
"timer",
"depending",
"on",
"its",
"current",
"state",
"."
] | 320e311ea1549cdd0dada0f8a0a4f9942213b28f | https://github.com/subimage/cashboard-rb/blob/320e311ea1549cdd0dada0f8a0a4f9942213b28f/lib/cashboard/time_entry.rb#L20-L36 | train |
mbj/ducktrap | lib/ducktrap/error.rb | Ducktrap.Error.dump | def dump(output)
output.name(self)
output.attribute(:input, input)
output.nest(:context, context)
end | ruby | def dump(output)
output.name(self)
output.attribute(:input, input)
output.nest(:context, context)
end | [
"def",
"dump",
"(",
"output",
")",
"output",
".",
"name",
"(",
"self",
")",
"output",
".",
"attribute",
"(",
":input",
",",
"input",
")",
"output",
".",
"nest",
"(",
":context",
",",
"context",
")",
"end"
] | Dump state to output
@param [Formatter] formatter
@return [undefined]
@api private | [
"Dump",
"state",
"to",
"output"
] | 482d874d3eb43b2dbb518b8537851d742d785903 | https://github.com/mbj/ducktrap/blob/482d874d3eb43b2dbb518b8537851d742d785903/lib/ducktrap/error.rb#L17-L21 | train |
raid5/agilezen | lib/agilezen/stories.rb | AgileZen.Stories.project_story | def project_story(project_id, story_id, options={})
response_body = nil
begin
response = connection.get do |req|
req.url "/api/v1/projects/#{project_id}/story/#{story_id}", options
end
response_body = response.body
rescue MultiJson::DecodeError => e
#p 'Unable to parse JSON.'
end
response_body
end | ruby | def project_story(project_id, story_id, options={})
response_body = nil
begin
response = connection.get do |req|
req.url "/api/v1/projects/#{project_id}/story/#{story_id}", options
end
response_body = response.body
rescue MultiJson::DecodeError => e
#p 'Unable to parse JSON.'
end
response_body
end | [
"def",
"project_story",
"(",
"project_id",
",",
"story_id",
",",
"options",
"=",
"{",
"}",
")",
"response_body",
"=",
"nil",
"begin",
"response",
"=",
"connection",
".",
"get",
"do",
"|",
"req",
"|",
"req",
".",
"url",
"\"/api/v1/projects/#{project_id}/story/#{story_id}\"",
",",
"options",
"end",
"response_body",
"=",
"response",
".",
"body",
"rescue",
"MultiJson",
"::",
"DecodeError",
"=>",
"e",
"end",
"response_body",
"end"
] | Retrieve information for an individual story of a given project. | [
"Retrieve",
"information",
"for",
"an",
"individual",
"story",
"of",
"a",
"given",
"project",
"."
] | 36fcef642c82b35c8c8664ee6a2ff22ce52054c0 | https://github.com/raid5/agilezen/blob/36fcef642c82b35c8c8664ee6a2ff22ce52054c0/lib/agilezen/stories.rb#L21-L33 | train |
twohlix/database_cached_attribute | lib/database_cached_attribute.rb | DatabaseCachedAttribute.ClassMethods.database_cached_attribute | def database_cached_attribute(*attrs)
attrs.each do |attr|
define_method("invalidate_#{attr}") do |arg=nil| # default arg to allow before_blah callbacks
invalidate_cache attr.to_sym
end
define_method("only_#{attr}_changed?") do
only_change? attr.to_sym
end
define_method("cache_#{attr}") do
update_cache attr.to_sym
end
end
end | ruby | def database_cached_attribute(*attrs)
attrs.each do |attr|
define_method("invalidate_#{attr}") do |arg=nil| # default arg to allow before_blah callbacks
invalidate_cache attr.to_sym
end
define_method("only_#{attr}_changed?") do
only_change? attr.to_sym
end
define_method("cache_#{attr}") do
update_cache attr.to_sym
end
end
end | [
"def",
"database_cached_attribute",
"(",
"*",
"attrs",
")",
"attrs",
".",
"each",
"do",
"|",
"attr",
"|",
"define_method",
"(",
"\"invalidate_#{attr}\"",
")",
"do",
"|",
"arg",
"=",
"nil",
"|",
"invalidate_cache",
"attr",
".",
"to_sym",
"end",
"define_method",
"(",
"\"only_#{attr}_changed?\"",
")",
"do",
"only_change?",
"attr",
".",
"to_sym",
"end",
"define_method",
"(",
"\"cache_#{attr}\"",
")",
"do",
"update_cache",
"attr",
".",
"to_sym",
"end",
"end",
"end"
] | Sets up cache invalidation callbacks for the provided attributes | [
"Sets",
"up",
"cache",
"invalidation",
"callbacks",
"for",
"the",
"provided",
"attributes"
] | 9aee710f10062eb0ffa918310183aaabaeaffa04 | https://github.com/twohlix/database_cached_attribute/blob/9aee710f10062eb0ffa918310183aaabaeaffa04/lib/database_cached_attribute.rb#L62-L76 | train |
midas/nameable_record | lib/nameable_record/name.rb | NameableRecord.Name.to_s | def to_s( pattern='%l, %f' )
if pattern.is_a?( Symbol )
return conversational if pattern == :conversational
return sortable if pattern == :sortable
pattern = PREDEFINED_PATTERNS[pattern]
end
PATTERN_MAP.inject( pattern ) do |name, mapping|
name = name.gsub( mapping.first,
(send( mapping.last ) || '') )
end
end | ruby | def to_s( pattern='%l, %f' )
if pattern.is_a?( Symbol )
return conversational if pattern == :conversational
return sortable if pattern == :sortable
pattern = PREDEFINED_PATTERNS[pattern]
end
PATTERN_MAP.inject( pattern ) do |name, mapping|
name = name.gsub( mapping.first,
(send( mapping.last ) || '') )
end
end | [
"def",
"to_s",
"(",
"pattern",
"=",
"'%l, %f'",
")",
"if",
"pattern",
".",
"is_a?",
"(",
"Symbol",
")",
"return",
"conversational",
"if",
"pattern",
"==",
":conversational",
"return",
"sortable",
"if",
"pattern",
"==",
":sortable",
"pattern",
"=",
"PREDEFINED_PATTERNS",
"[",
"pattern",
"]",
"end",
"PATTERN_MAP",
".",
"inject",
"(",
"pattern",
")",
"do",
"|",
"name",
",",
"mapping",
"|",
"name",
"=",
"name",
".",
"gsub",
"(",
"mapping",
".",
"first",
",",
"(",
"send",
"(",
"mapping",
".",
"last",
")",
"||",
"''",
")",
")",
"end",
"end"
] | Creates a name based on pattern provided. Defaults to last, first.
Symbols:
%l - last name
%f - first name
%m - middle name
%p - prefix
%s - suffix | [
"Creates",
"a",
"name",
"based",
"on",
"pattern",
"provided",
".",
"Defaults",
"to",
"last",
"first",
"."
] | a8a6689151bff1e7448baeffbd9ee603989a708e | https://github.com/midas/nameable_record/blob/a8a6689151bff1e7448baeffbd9ee603989a708e/lib/nameable_record/name.rb#L38-L49 | train |
midas/nameable_record | lib/nameable_record/name.rb | NameableRecord.Name.sortable | def sortable
[
last,
[
prefix,
first,
middle,
suffix
].reject( &:blank? ).
join( ' ' )
].reject( &:blank? ).
join( ', ' )
end | ruby | def sortable
[
last,
[
prefix,
first,
middle,
suffix
].reject( &:blank? ).
join( ' ' )
].reject( &:blank? ).
join( ', ' )
end | [
"def",
"sortable",
"[",
"last",
",",
"[",
"prefix",
",",
"first",
",",
"middle",
",",
"suffix",
"]",
".",
"reject",
"(",
"&",
":blank?",
")",
".",
"join",
"(",
"' '",
")",
"]",
".",
"reject",
"(",
"&",
":blank?",
")",
".",
"join",
"(",
"', '",
")",
"end"
] | Returns the name in a sortable format. | [
"Returns",
"the",
"name",
"in",
"a",
"sortable",
"format",
"."
] | a8a6689151bff1e7448baeffbd9ee603989a708e | https://github.com/midas/nameable_record/blob/a8a6689151bff1e7448baeffbd9ee603989a708e/lib/nameable_record/name.rb#L66-L78 | train |
schoefmann/klarlack | lib/varnish/client.rb | Varnish.Client.vcl | def vcl(op, *params)
response = cmd("vcl.#{op}", *params)
case op
when :list
response.split("\n").map do |line|
a = line.split(/\s+/, 3)
[a[0], a[1].to_i, a[2]]
end
else
response
end
end | ruby | def vcl(op, *params)
response = cmd("vcl.#{op}", *params)
case op
when :list
response.split("\n").map do |line|
a = line.split(/\s+/, 3)
[a[0], a[1].to_i, a[2]]
end
else
response
end
end | [
"def",
"vcl",
"(",
"op",
",",
"*",
"params",
")",
"response",
"=",
"cmd",
"(",
"\"vcl.#{op}\"",
",",
"*",
"params",
")",
"case",
"op",
"when",
":list",
"response",
".",
"split",
"(",
"\"\\n\"",
")",
".",
"map",
"do",
"|",
"line",
"|",
"a",
"=",
"line",
".",
"split",
"(",
"/",
"\\s",
"/",
",",
"3",
")",
"[",
"a",
"[",
"0",
"]",
",",
"a",
"[",
"1",
"]",
".",
"to_i",
",",
"a",
"[",
"2",
"]",
"]",
"end",
"else",
"response",
"end",
"end"
] | Manipulate the VCL configuration
.vcl :load, <configname>, <filename>
.vcl :inline, <configname>, <quoted_VCLstring>
.vcl :use, <configname>
.vcl :discard, <configname>
.vcl :list
.vcl :show, <configname>
Returns an array of VCL configurations for :list, and the servers
response as string otherwise
Ex.:
v = Varnish::Client.new
v.vcl :list
#=> [["active", 0, "boot"]]
v.vcl :load, "newconf", "/etc/varnish/myconf.vcl" | [
"Manipulate",
"the",
"VCL",
"configuration"
] | 7c1b2668da27d663d904c9646ef0d492830fe3de | https://github.com/schoefmann/klarlack/blob/7c1b2668da27d663d904c9646ef0d492830fe3de/lib/varnish/client.rb#L105-L116 | train |
schoefmann/klarlack | lib/varnish/client.rb | Varnish.Client.purge | def purge(*args)
c = 'purge'
c << ".#{args.shift}" if [:url, :hash, :list].include?(args.first)
response = cmd(c, *args)
case c
when 'purge.list'
response.split("\n").map do |line|
a = line.split("\t")
[a[0].to_i, a[1]]
end
else
bool response
end
end | ruby | def purge(*args)
c = 'purge'
c << ".#{args.shift}" if [:url, :hash, :list].include?(args.first)
response = cmd(c, *args)
case c
when 'purge.list'
response.split("\n").map do |line|
a = line.split("\t")
[a[0].to_i, a[1]]
end
else
bool response
end
end | [
"def",
"purge",
"(",
"*",
"args",
")",
"c",
"=",
"'purge'",
"c",
"<<",
"\".#{args.shift}\"",
"if",
"[",
":url",
",",
":hash",
",",
":list",
"]",
".",
"include?",
"(",
"args",
".",
"first",
")",
"response",
"=",
"cmd",
"(",
"c",
",",
"*",
"args",
")",
"case",
"c",
"when",
"'purge.list'",
"response",
".",
"split",
"(",
"\"\\n\"",
")",
".",
"map",
"do",
"|",
"line",
"|",
"a",
"=",
"line",
".",
"split",
"(",
"\"\\t\"",
")",
"[",
"a",
"[",
"0",
"]",
".",
"to_i",
",",
"a",
"[",
"1",
"]",
"]",
"end",
"else",
"bool",
"response",
"end",
"end"
] | Purge objects from the cache or show the purge queue.
Takes one or two arguments:
1.
.purge :url, <regexp>
.purge :hash, <regexp>
<regexp> is a string containing a varnish compatible regexp
2.
.purge <costum-purge-conditions>
.purge :list
Returns true for purging, returns an array containing the purge queue
for :list
Ex.:
v = Varnish::Client.new
v.purge :url, '.*'
v.purge "req.http.host ~ www.foo.com && req.http.url ~ images"
v.purge :list
#=> [[1, "req.url ~ .*"]] | [
"Purge",
"objects",
"from",
"the",
"cache",
"or",
"show",
"the",
"purge",
"queue",
"."
] | 7c1b2668da27d663d904c9646ef0d492830fe3de | https://github.com/schoefmann/klarlack/blob/7c1b2668da27d663d904c9646ef0d492830fe3de/lib/varnish/client.rb#L144-L157 | train |
schoefmann/klarlack | lib/varnish/client.rb | Varnish.Client.stats | def stats
result = cmd("stats")
Hash[*result.split("\n").map { |line|
stat = line.strip!.split(/\s+/, 2)
[stat[1], stat[0].to_i]
}.flatten
]
end | ruby | def stats
result = cmd("stats")
Hash[*result.split("\n").map { |line|
stat = line.strip!.split(/\s+/, 2)
[stat[1], stat[0].to_i]
}.flatten
]
end | [
"def",
"stats",
"result",
"=",
"cmd",
"(",
"\"stats\"",
")",
"Hash",
"[",
"*",
"result",
".",
"split",
"(",
"\"\\n\"",
")",
".",
"map",
"{",
"|",
"line",
"|",
"stat",
"=",
"line",
".",
"strip!",
".",
"split",
"(",
"/",
"\\s",
"/",
",",
"2",
")",
"[",
"stat",
"[",
"1",
"]",
",",
"stat",
"[",
"0",
"]",
".",
"to_i",
"]",
"}",
".",
"flatten",
"]",
"end"
] | Returns a hash of status information
Ex.:
v = Varnish::Client.new
v.stats
=> {"Total header bytes"=>0, "Cache misses"=>0 ...} | [
"Returns",
"a",
"hash",
"of",
"status",
"information"
] | 7c1b2668da27d663d904c9646ef0d492830fe3de | https://github.com/schoefmann/klarlack/blob/7c1b2668da27d663d904c9646ef0d492830fe3de/lib/varnish/client.rb#L170-L177 | train |
barkerest/barkest_core | app/models/barkest_core/pdf_table_builder.rb | BarkestCore.PdfTableBuilder.last_column | def last_column
max = 0
@data.each do |row|
max = row.length if max < row.length
end
max - 1
end | ruby | def last_column
max = 0
@data.each do |row|
max = row.length if max < row.length
end
max - 1
end | [
"def",
"last_column",
"max",
"=",
"0",
"@data",
".",
"each",
"do",
"|",
"row",
"|",
"max",
"=",
"row",
".",
"length",
"if",
"max",
"<",
"row",
".",
"length",
"end",
"max",
"-",
"1",
"end"
] | Gets the last column in the table. | [
"Gets",
"the",
"last",
"column",
"in",
"the",
"table",
"."
] | 3eeb025ec870888cacbc9bae252a39ebf9295f61 | https://github.com/barkerest/barkest_core/blob/3eeb025ec870888cacbc9bae252a39ebf9295f61/app/models/barkest_core/pdf_table_builder.rb#L75-L81 | train |
barkerest/barkest_core | app/models/barkest_core/pdf_table_builder.rb | BarkestCore.PdfTableBuilder.build_column | def build_column(start_column = nil)
if block_given?
raise StandardError.new('build_column block called within row block') if @in_row
raise StandardError.new('build_column called without valid argument') unless start_column.is_a?(Numeric)
backup_col_start = @col_start
backup_col_offset = @col_offset
backup_row_offset = @row_offset
@col_start = start_column.to_i
@col_offset = @col_start
@row_offset = 0
yield
@col_start = backup_col_start
@col_offset = backup_col_offset
@row_offset = backup_row_offset
end
@col_start
end | ruby | def build_column(start_column = nil)
if block_given?
raise StandardError.new('build_column block called within row block') if @in_row
raise StandardError.new('build_column called without valid argument') unless start_column.is_a?(Numeric)
backup_col_start = @col_start
backup_col_offset = @col_offset
backup_row_offset = @row_offset
@col_start = start_column.to_i
@col_offset = @col_start
@row_offset = 0
yield
@col_start = backup_col_start
@col_offset = backup_col_offset
@row_offset = backup_row_offset
end
@col_start
end | [
"def",
"build_column",
"(",
"start_column",
"=",
"nil",
")",
"if",
"block_given?",
"raise",
"StandardError",
".",
"new",
"(",
"'build_column block called within row block'",
")",
"if",
"@in_row",
"raise",
"StandardError",
".",
"new",
"(",
"'build_column called without valid argument'",
")",
"unless",
"start_column",
".",
"is_a?",
"(",
"Numeric",
")",
"backup_col_start",
"=",
"@col_start",
"backup_col_offset",
"=",
"@col_offset",
"backup_row_offset",
"=",
"@row_offset",
"@col_start",
"=",
"start_column",
".",
"to_i",
"@col_offset",
"=",
"@col_start",
"@row_offset",
"=",
"0",
"yield",
"@col_start",
"=",
"backup_col_start",
"@col_offset",
"=",
"backup_col_offset",
"@row_offset",
"=",
"backup_row_offset",
"end",
"@col_start",
"end"
] | Builds data starting at the specified column.
The +start_column+ is the first column you want to be building.
When you start a new row inside of a build_column block, the new row
starts at this same column. | [
"Builds",
"data",
"starting",
"at",
"the",
"specified",
"column",
"."
] | 3eeb025ec870888cacbc9bae252a39ebf9295f61 | https://github.com/barkerest/barkest_core/blob/3eeb025ec870888cacbc9bae252a39ebf9295f61/app/models/barkest_core/pdf_table_builder.rb#L89-L108 | train |
barkerest/barkest_core | app/models/barkest_core/pdf_table_builder.rb | BarkestCore.PdfTableBuilder.row | def row(options = {}, &block)
raise StandardError.new('row called within row block') if @in_row
@in_row = true
@col_offset = @col_start
options = change_row(options || {})
@row_cell_options = @base_cell_options.merge(options)
fill_cells(@row_offset, @col_offset)
# skip placeholders when starting a new row.
if @data[@row_offset]
while @data[@row_offset][@col_offset] == :span_placeholder
@col_offset += 1
end
end
yield if block_given?
@in_row = false
@row_offset += 1
@row_cell_options = nil
end | ruby | def row(options = {}, &block)
raise StandardError.new('row called within row block') if @in_row
@in_row = true
@col_offset = @col_start
options = change_row(options || {})
@row_cell_options = @base_cell_options.merge(options)
fill_cells(@row_offset, @col_offset)
# skip placeholders when starting a new row.
if @data[@row_offset]
while @data[@row_offset][@col_offset] == :span_placeholder
@col_offset += 1
end
end
yield if block_given?
@in_row = false
@row_offset += 1
@row_cell_options = nil
end | [
"def",
"row",
"(",
"options",
"=",
"{",
"}",
",",
"&",
"block",
")",
"raise",
"StandardError",
".",
"new",
"(",
"'row called within row block'",
")",
"if",
"@in_row",
"@in_row",
"=",
"true",
"@col_offset",
"=",
"@col_start",
"options",
"=",
"change_row",
"(",
"options",
"||",
"{",
"}",
")",
"@row_cell_options",
"=",
"@base_cell_options",
".",
"merge",
"(",
"options",
")",
"fill_cells",
"(",
"@row_offset",
",",
"@col_offset",
")",
"if",
"@data",
"[",
"@row_offset",
"]",
"while",
"@data",
"[",
"@row_offset",
"]",
"[",
"@col_offset",
"]",
"==",
":span_placeholder",
"@col_offset",
"+=",
"1",
"end",
"end",
"yield",
"if",
"block_given?",
"@in_row",
"=",
"false",
"@row_offset",
"+=",
"1",
"@row_cell_options",
"=",
"nil",
"end"
] | Builds a row in the table.
Valid options:
row::
Defines the row you want to start on. If not set, then it uses #current_row.
Additional options are merged with the base cell options for this row.
When it completes, the #current_row is set to 1 more than the row we started on. | [
"Builds",
"a",
"row",
"in",
"the",
"table",
"."
] | 3eeb025ec870888cacbc9bae252a39ebf9295f61 | https://github.com/barkerest/barkest_core/blob/3eeb025ec870888cacbc9bae252a39ebf9295f61/app/models/barkest_core/pdf_table_builder.rb#L122-L145 | train |
barkerest/barkest_core | app/models/barkest_core/pdf_table_builder.rb | BarkestCore.PdfTableBuilder.subtable | def subtable(cell_options = {}, options = {}, &block)
raise StandardError.new('subtable called outside of row block') unless @in_row
cell cell_options || {} do
PdfTableBuilder.new(@doc, options || {}, &block).to_table
end
end | ruby | def subtable(cell_options = {}, options = {}, &block)
raise StandardError.new('subtable called outside of row block') unless @in_row
cell cell_options || {} do
PdfTableBuilder.new(@doc, options || {}, &block).to_table
end
end | [
"def",
"subtable",
"(",
"cell_options",
"=",
"{",
"}",
",",
"options",
"=",
"{",
"}",
",",
"&",
"block",
")",
"raise",
"StandardError",
".",
"new",
"(",
"'subtable called outside of row block'",
")",
"unless",
"@in_row",
"cell",
"cell_options",
"||",
"{",
"}",
"do",
"PdfTableBuilder",
".",
"new",
"(",
"@doc",
",",
"options",
"||",
"{",
"}",
",",
"&",
"block",
")",
".",
"to_table",
"end",
"end"
] | Builds a subtable within the current row.
The +cell_options+ are passed to the current cell.
The +options+ are passed to the new TableBuilder. | [
"Builds",
"a",
"subtable",
"within",
"the",
"current",
"row",
"."
] | 3eeb025ec870888cacbc9bae252a39ebf9295f61 | https://github.com/barkerest/barkest_core/blob/3eeb025ec870888cacbc9bae252a39ebf9295f61/app/models/barkest_core/pdf_table_builder.rb#L153-L158 | train |
barkerest/barkest_core | app/models/barkest_core/pdf_table_builder.rb | BarkestCore.PdfTableBuilder.cells | def cells(options = {}, &block)
cell_regex = /^cell_([0-9]+)_/
options ||= { }
result = block_given? ? yield : (options[:values] || [''])
cell_options = result.map { {} }
common_options = {}
options.each do |k,v|
# if the option starts with 'cell_#_' then apply it accordingly.
if (m = cell_regex.match(k.to_s))
k = k.to_s[m[0].length..-1].to_sym
cell_options[m[1].to_i - 1][k] = v
# the 'column' option applies only to the first cell.
elsif k == :column
cell_options[0][k] = v
# everything else applies to all cells, unless overridden explicitly.
elsif k != :values
common_options[k] = v
end
end
cell_options.each_with_index do |opt,idx|
cell common_options.merge(opt).merge( { value: result[idx] } )
end
end | ruby | def cells(options = {}, &block)
cell_regex = /^cell_([0-9]+)_/
options ||= { }
result = block_given? ? yield : (options[:values] || [''])
cell_options = result.map { {} }
common_options = {}
options.each do |k,v|
# if the option starts with 'cell_#_' then apply it accordingly.
if (m = cell_regex.match(k.to_s))
k = k.to_s[m[0].length..-1].to_sym
cell_options[m[1].to_i - 1][k] = v
# the 'column' option applies only to the first cell.
elsif k == :column
cell_options[0][k] = v
# everything else applies to all cells, unless overridden explicitly.
elsif k != :values
common_options[k] = v
end
end
cell_options.each_with_index do |opt,idx|
cell common_options.merge(opt).merge( { value: result[idx] } )
end
end | [
"def",
"cells",
"(",
"options",
"=",
"{",
"}",
",",
"&",
"block",
")",
"cell_regex",
"=",
"/",
"/",
"options",
"||=",
"{",
"}",
"result",
"=",
"block_given?",
"?",
"yield",
":",
"(",
"options",
"[",
":values",
"]",
"||",
"[",
"''",
"]",
")",
"cell_options",
"=",
"result",
".",
"map",
"{",
"{",
"}",
"}",
"common_options",
"=",
"{",
"}",
"options",
".",
"each",
"do",
"|",
"k",
",",
"v",
"|",
"if",
"(",
"m",
"=",
"cell_regex",
".",
"match",
"(",
"k",
".",
"to_s",
")",
")",
"k",
"=",
"k",
".",
"to_s",
"[",
"m",
"[",
"0",
"]",
".",
"length",
"..",
"-",
"1",
"]",
".",
"to_sym",
"cell_options",
"[",
"m",
"[",
"1",
"]",
".",
"to_i",
"-",
"1",
"]",
"[",
"k",
"]",
"=",
"v",
"elsif",
"k",
"==",
":column",
"cell_options",
"[",
"0",
"]",
"[",
"k",
"]",
"=",
"v",
"elsif",
"k",
"!=",
":values",
"common_options",
"[",
"k",
"]",
"=",
"v",
"end",
"end",
"cell_options",
".",
"each_with_index",
"do",
"|",
"opt",
",",
"idx",
"|",
"cell",
"common_options",
".",
"merge",
"(",
"opt",
")",
".",
"merge",
"(",
"{",
"value",
":",
"result",
"[",
"idx",
"]",
"}",
")",
"end",
"end"
] | Creates multiple cells.
Individual cells can be given options by prefixing the keys with 'cell_#' where # is the cell number (starting at 1).
See #cell for valid options. | [
"Creates",
"multiple",
"cells",
"."
] | 3eeb025ec870888cacbc9bae252a39ebf9295f61 | https://github.com/barkerest/barkest_core/blob/3eeb025ec870888cacbc9bae252a39ebf9295f61/app/models/barkest_core/pdf_table_builder.rb#L203-L232 | train |
barkerest/barkest_core | app/models/barkest_core/pdf_table_builder.rb | BarkestCore.PdfTableBuilder.cell | def cell(options = {}, &block)
raise StandardError.new('cell called outside of row block') unless @in_row
options = @row_cell_options.merge(options || {})
options = change_col(options)
result = block_given? ? yield : (options[:value] || '')
options.except!(:value)
set_cell(result, nil, nil, options)
end | ruby | def cell(options = {}, &block)
raise StandardError.new('cell called outside of row block') unless @in_row
options = @row_cell_options.merge(options || {})
options = change_col(options)
result = block_given? ? yield : (options[:value] || '')
options.except!(:value)
set_cell(result, nil, nil, options)
end | [
"def",
"cell",
"(",
"options",
"=",
"{",
"}",
",",
"&",
"block",
")",
"raise",
"StandardError",
".",
"new",
"(",
"'cell called outside of row block'",
")",
"unless",
"@in_row",
"options",
"=",
"@row_cell_options",
".",
"merge",
"(",
"options",
"||",
"{",
"}",
")",
"options",
"=",
"change_col",
"(",
"options",
")",
"result",
"=",
"block_given?",
"?",
"yield",
":",
"(",
"options",
"[",
":value",
"]",
"||",
"''",
")",
"options",
".",
"except!",
"(",
":value",
")",
"set_cell",
"(",
"result",
",",
"nil",
",",
"nil",
",",
"options",
")",
"end"
] | Generates a cell in the current row.
Valid options:
value::
The value to put in the cell, unless a code block is provided, in which case the result of the code block is used.
rowspan::
The number of rows for this cell to cover.
colspan::
The number of columns for this cell to cover.
Additional options are embedded and passed on to Prawn::Table, see {Prawn PDF Table Manual}[http://prawnpdf.org/prawn-table-manual.pdf] for more information. | [
"Generates",
"a",
"cell",
"in",
"the",
"current",
"row",
"."
] | 3eeb025ec870888cacbc9bae252a39ebf9295f61 | https://github.com/barkerest/barkest_core/blob/3eeb025ec870888cacbc9bae252a39ebf9295f61/app/models/barkest_core/pdf_table_builder.rb#L275-L287 | train |
barkerest/barkest_core | app/models/barkest_core/pdf_table_builder.rb | BarkestCore.PdfTableBuilder.fix_row_widths | def fix_row_widths
fill_cells(@row_offset - 1, 0)
max = 0
@data.each_with_index do |row|
max = row.length unless max >= row.length
end
@data.each_with_index do |row,idx|
if row.length < max
row = row + [ @base_cell_options.merge({content: ''}) ] * (max - row.length)
@data[idx] = row
end
end
end | ruby | def fix_row_widths
fill_cells(@row_offset - 1, 0)
max = 0
@data.each_with_index do |row|
max = row.length unless max >= row.length
end
@data.each_with_index do |row,idx|
if row.length < max
row = row + [ @base_cell_options.merge({content: ''}) ] * (max - row.length)
@data[idx] = row
end
end
end | [
"def",
"fix_row_widths",
"fill_cells",
"(",
"@row_offset",
"-",
"1",
",",
"0",
")",
"max",
"=",
"0",
"@data",
".",
"each_with_index",
"do",
"|",
"row",
"|",
"max",
"=",
"row",
".",
"length",
"unless",
"max",
">=",
"row",
".",
"length",
"end",
"@data",
".",
"each_with_index",
"do",
"|",
"row",
",",
"idx",
"|",
"if",
"row",
".",
"length",
"<",
"max",
"row",
"=",
"row",
"+",
"[",
"@base_cell_options",
".",
"merge",
"(",
"{",
"content",
":",
"''",
"}",
")",
"]",
"*",
"(",
"max",
"-",
"row",
".",
"length",
")",
"@data",
"[",
"idx",
"]",
"=",
"row",
"end",
"end",
"end"
] | ensure that all 2nd level arrays are the same size. | [
"ensure",
"that",
"all",
"2nd",
"level",
"arrays",
"are",
"the",
"same",
"size",
"."
] | 3eeb025ec870888cacbc9bae252a39ebf9295f61 | https://github.com/barkerest/barkest_core/blob/3eeb025ec870888cacbc9bae252a39ebf9295f61/app/models/barkest_core/pdf_table_builder.rb#L387-L404 | train |
ekosz/Erics-Tic-Tac-Toe | lib/tic_tac_toe/board.rb | TicTacToe.Board.empty_positions | def empty_positions(&block)
positions = []
each_position do |row, column|
next if get_cell(row, column)
yield(row, column) if block_given?
positions << [row, column]
end
positions
end | ruby | def empty_positions(&block)
positions = []
each_position do |row, column|
next if get_cell(row, column)
yield(row, column) if block_given?
positions << [row, column]
end
positions
end | [
"def",
"empty_positions",
"(",
"&",
"block",
")",
"positions",
"=",
"[",
"]",
"each_position",
"do",
"|",
"row",
",",
"column",
"|",
"next",
"if",
"get_cell",
"(",
"row",
",",
"column",
")",
"yield",
"(",
"row",
",",
"column",
")",
"if",
"block_given?",
"positions",
"<<",
"[",
"row",
",",
"column",
"]",
"end",
"positions",
"end"
] | Returns the corners of the empty cells | [
"Returns",
"the",
"corners",
"of",
"the",
"empty",
"cells"
] | d0c2580974c12187ec9cf11030b72eda6cae3d97 | https://github.com/ekosz/Erics-Tic-Tac-Toe/blob/d0c2580974c12187ec9cf11030b72eda6cae3d97/lib/tic_tac_toe/board.rb#L21-L29 | train |
ekosz/Erics-Tic-Tac-Toe | lib/tic_tac_toe/board.rb | TicTacToe.Board.solved? | def solved?
letter = won_across?
return letter if letter
letter = won_up_and_down?
return letter if letter
letter = won_diagonally?
return letter if letter
false
end | ruby | def solved?
letter = won_across?
return letter if letter
letter = won_up_and_down?
return letter if letter
letter = won_diagonally?
return letter if letter
false
end | [
"def",
"solved?",
"letter",
"=",
"won_across?",
"return",
"letter",
"if",
"letter",
"letter",
"=",
"won_up_and_down?",
"return",
"letter",
"if",
"letter",
"letter",
"=",
"won_diagonally?",
"return",
"letter",
"if",
"letter",
"false",
"end"
] | Returns true if the board has a wining pattern | [
"Returns",
"true",
"if",
"the",
"board",
"has",
"a",
"wining",
"pattern"
] | d0c2580974c12187ec9cf11030b72eda6cae3d97 | https://github.com/ekosz/Erics-Tic-Tac-Toe/blob/d0c2580974c12187ec9cf11030b72eda6cae3d97/lib/tic_tac_toe/board.rb#L64-L72 | train |
buzzware/buzztools | lib/buzztools/config.rb | Buzztools.Config.read | def read(aSource,&aBlock)
default_values.each do |k,v|
done = false
if block_given? && ((newv = yield(k,v,aSource && aSource[k])) != nil)
self[k] = newv
done = true
end
copy_item(aSource,k) if !done && aSource && !aSource[k].nil?
end
self
end | ruby | def read(aSource,&aBlock)
default_values.each do |k,v|
done = false
if block_given? && ((newv = yield(k,v,aSource && aSource[k])) != nil)
self[k] = newv
done = true
end
copy_item(aSource,k) if !done && aSource && !aSource[k].nil?
end
self
end | [
"def",
"read",
"(",
"aSource",
",",
"&",
"aBlock",
")",
"default_values",
".",
"each",
"do",
"|",
"k",
",",
"v",
"|",
"done",
"=",
"false",
"if",
"block_given?",
"&&",
"(",
"(",
"newv",
"=",
"yield",
"(",
"k",
",",
"v",
",",
"aSource",
"&&",
"aSource",
"[",
"k",
"]",
")",
")",
"!=",
"nil",
")",
"self",
"[",
"k",
"]",
"=",
"newv",
"done",
"=",
"true",
"end",
"copy_item",
"(",
"aSource",
",",
"k",
")",
"if",
"!",
"done",
"&&",
"aSource",
"&&",
"!",
"aSource",
"[",
"k",
"]",
".",
"nil?",
"end",
"self",
"end"
] | aBlock allows values to be filtered based on key,default and new values | [
"aBlock",
"allows",
"values",
"to",
"be",
"filtered",
"based",
"on",
"key",
"default",
"and",
"new",
"values"
] | 0823721974d521330ceffe099368ed8cac6209c3 | https://github.com/buzzware/buzztools/blob/0823721974d521330ceffe099368ed8cac6209c3/lib/buzztools/config.rb#L15-L25 | train |
buzzware/buzztools | lib/buzztools/config.rb | Buzztools.Config.reset | def reset
self.clear
me = self
@default_values.each {|n,v| me[n] = v.is_a?(Class) ? nil : v}
end | ruby | def reset
self.clear
me = self
@default_values.each {|n,v| me[n] = v.is_a?(Class) ? nil : v}
end | [
"def",
"reset",
"self",
".",
"clear",
"me",
"=",
"self",
"@default_values",
".",
"each",
"{",
"|",
"n",
",",
"v",
"|",
"me",
"[",
"n",
"]",
"=",
"v",
".",
"is_a?",
"(",
"Class",
")",
"?",
"nil",
":",
"v",
"}",
"end"
] | reset values back to defaults | [
"reset",
"values",
"back",
"to",
"defaults"
] | 0823721974d521330ceffe099368ed8cac6209c3 | https://github.com/buzzware/buzztools/blob/0823721974d521330ceffe099368ed8cac6209c3/lib/buzztools/config.rb#L28-L32 | train |
cbetta/snapshotify | lib/snapshotify/writer.rb | Snapshotify.Writer.ensure_directory | def ensure_directory
dir = File.dirname(full_filename)
unless File.directory?(dir)
FileUtils.mkdir_p(dir)
end
end | ruby | def ensure_directory
dir = File.dirname(full_filename)
unless File.directory?(dir)
FileUtils.mkdir_p(dir)
end
end | [
"def",
"ensure_directory",
"dir",
"=",
"File",
".",
"dirname",
"(",
"full_filename",
")",
"unless",
"File",
".",
"directory?",
"(",
"dir",
")",
"FileUtils",
".",
"mkdir_p",
"(",
"dir",
")",
"end",
"end"
] | Ensure the directory for the file exists | [
"Ensure",
"the",
"directory",
"for",
"the",
"file",
"exists"
] | 7f5553f4281ffc5bf0e54da1141574bd15af45b6 | https://github.com/cbetta/snapshotify/blob/7f5553f4281ffc5bf0e54da1141574bd15af45b6/lib/snapshotify/writer.rb#L29-L34 | train |
cbetta/snapshotify | lib/snapshotify/writer.rb | Snapshotify.Writer.filename | def filename
# Based on the path of the file
path = resource.url.uri.path
# It's either an index.html file
# if the path ends with a slash
if path.end_with?('/')
return path + 'index.html'
# Or it's also an index.html if it ends
# without a slah yet is not a file with an
# extension
elsif !path.split('/').last.include?(".")
return path + '/index.html'
end
# Alternative, the filename is the path as described
path
end | ruby | def filename
# Based on the path of the file
path = resource.url.uri.path
# It's either an index.html file
# if the path ends with a slash
if path.end_with?('/')
return path + 'index.html'
# Or it's also an index.html if it ends
# without a slah yet is not a file with an
# extension
elsif !path.split('/').last.include?(".")
return path + '/index.html'
end
# Alternative, the filename is the path as described
path
end | [
"def",
"filename",
"path",
"=",
"resource",
".",
"url",
".",
"uri",
".",
"path",
"if",
"path",
".",
"end_with?",
"(",
"'/'",
")",
"return",
"path",
"+",
"'index.html'",
"elsif",
"!",
"path",
".",
"split",
"(",
"'/'",
")",
".",
"last",
".",
"include?",
"(",
"\".\"",
")",
"return",
"path",
"+",
"'/index.html'",
"end",
"path",
"end"
] | The actual name of the file | [
"The",
"actual",
"name",
"of",
"the",
"file"
] | 7f5553f4281ffc5bf0e54da1141574bd15af45b6 | https://github.com/cbetta/snapshotify/blob/7f5553f4281ffc5bf0e54da1141574bd15af45b6/lib/snapshotify/writer.rb#L57-L72 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.