Search is not available for this dataset
id
stringlengths
1
8
text
stringlengths
72
9.81M
addition_count
int64
0
10k
commit_subject
stringlengths
0
3.7k
deletion_count
int64
0
8.43k
file_extension
stringlengths
0
32
lang
stringlengths
1
94
license
stringclasses
10 values
repo_name
stringlengths
9
59
10069650
<NME> experiment.rb <BEF> # frozen_string_literal: true module Split class Experiment attr_accessor :name attr_accessor :goals attr_accessor :alternative_probabilities attr_accessor :metadata attr_reader :alternatives attr_reader :resettable DEFAULT_OPTIONS = { resettable: true } def self.find(name) Split.cache(:experiments, name) do return unless Split.redis.exists?(name) Experiment.new(name).tap { |exp| exp.load_from_redis } end end def initialize(name, options = {}) options = DEFAULT_OPTIONS.merge(options) @name = name.to_s extract_alternatives_from_options(options) end def self.finished_key(key) "#{key}:finished" end def set_alternatives_and_options(options) options_with_defaults = DEFAULT_OPTIONS.merge( options.reject { |k, v| v.nil? } ) self.alternatives = options_with_defaults[:alternatives] self.goals = options_with_defaults[:goals] self.resettable = options_with_defaults[:resettable] self.algorithm = options_with_defaults[:algorithm] self.metadata = options_with_defaults[:metadata] end def extract_alternatives_from_options(options) alts = options[:alternatives] || [] if alts.length == 1 if alts[0].is_a? Hash alts = alts[0].map { |k, v| { k => v } } end end if alts.empty? exp_config = Split.configuration.experiment_for(name) if exp_config alts = load_alternatives_from_configuration options[:goals] = Split::GoalsCollection.new(@name).load_from_configuration options[:metadata] = load_metadata_from_configuration options[:resettable] = exp_config[:resettable] options[:algorithm] = exp_config[:algorithm] end end end end self.alternatives = alts self.goals = options[:goals] self.algorithm = options[:algorithm] self.resettable = options[:resettable] # calculate probability that each alternative is the winner @alternative_probabilities = {} def save validate! if new_record? start unless Split.configuration.start_manually persist_experiment_configuration elsif experiment_configuration_has_changed? reset unless Split.configuration.reset_manually persist_experiment_configuration end redis.hmset(experiment_config_key, :resettable, resettable.to_s, :algorithm, algorithm.to_s) self end def validate! if @alternatives.empty? && Split.configuration.experiment_for(@name).nil? raise ExperimentNotFound.new("Experiment #{@name} not found") end @alternatives.each { |a| a.validate! } goals_collection.validate! end def new_record? ExperimentCatalog.find(name).nil? end def ==(obj) self.name == obj.name end def [](name) alternatives.find { |a| a.name == name } end def algorithm @algorithm ||= Split.configuration.algorithm end def algorithm=(algorithm) @algorithm = algorithm.is_a?(String) ? algorithm.constantize : algorithm end def resettable=(resettable) @resettable = resettable.is_a?(String) ? resettable == "true" : resettable end def alternatives=(alts) @alternatives = alts.map do |alternative| if alternative.kind_of?(Split::Alternative) alternative else Split::Alternative.new(alternative, @name) end end end def winner Split.cache(:experiment_winner, name) do experiment_winner = redis.hget(:experiment_winner, name) if experiment_winner Split::Alternative.new(experiment_winner, name) else nil end end end def has_winner? return @has_winner if defined? @has_winner @has_winner = !winner.nil? end def winner=(winner_name) redis.hset(:experiment_winner, name, winner_name.to_s) @has_winner = true Split.configuration.on_experiment_winner_choose.call(self) end def participant_count alternatives.inject(0) { |sum, a| sum + a.participant_count } end def control alternatives.first end def reset_winner redis.hdel(:experiment_winner, name) @has_winner = false Split::Cache.clear_key(@name) end def start redis.hset(:experiment_start_times, @name, Time.now.to_i) end def start_time Split.cache(:experiment_start_times, @name) do t = redis.hget(:experiment_start_times, @name) if t # Check if stored time is an integer if t =~ /^[-+]?[0-9]+$/ Time.at(t.to_i) else Time.parse(t) end end end end def next_alternative winner || random_alternative end def random_alternative if alternatives.length > 1 algorithm.choose_alternative(self) else alternatives.first end end def version @version ||= (redis.get("#{name}:version").to_i || 0) end def increment_version @version = redis.incr("#{name}:version") end def key if version.to_i > 0 "#{name}:#{version}" else name end end def goals_key "#{name}:goals" end def finished_key self.class.finished_key(key) end def metadata_key "#{name}:metadata" end def resettable? resettable end def reset Split.configuration.on_before_experiment_reset.call(self) Split::Cache.clear_key(@name) alternatives.each(&:reset) reset_winner Split.configuration.on_experiment_reset.call(self) increment_version end def delete Split.configuration.on_before_experiment_delete.call(self) if Split.configuration.start_manually redis.hdel(:experiment_start_times, @name) end reset_winner redis.srem(:experiments, name) remove_experiment_cohorting remove_experiment_configuration Split.configuration.on_experiment_delete.call(self) increment_version end def delete_metadata redis.del(metadata_key) end def load_from_redis exp_config = redis.hgetall(experiment_config_key) options = { resettable: exp_config["resettable"], algorithm: exp_config["algorithm"], def load_from_redis exp_config = Split.redis.hgetall(experiment_config_key) self.resettable = exp_config['resettable'] self.algorithm = exp_config['algorithm'] self.alternatives = load_alternatives_from_redis self.goals = Split::GoalsCollection.new(@name).load_from_redis self.metadata = load_metadata_from_redis end def calc_winning_alternatives else goals.each do |goal| self.estimate_winning_alternative(goal) end end self.calc_time = intervals_since_epoch self.save end end def estimate_winning_alternative(goal = nil) # initialize a hash of beta distributions based on the alternatives' conversion rates beta_params = calc_beta_params(goal) winning_alternatives = [] Split.configuration.beta_probability_simulations.times do # calculate simulated conversion rates from the beta distributions simulated_cr_hash = calc_simulated_conversion_rates(beta_params) winning_alternative = find_simulated_winner(simulated_cr_hash) # push the winning pair to the winning_alternatives array winning_alternatives.push(winning_alternative) end winning_counts = count_simulated_wins(winning_alternatives) @alternative_probabilities = calc_alternative_probabilities(winning_counts, Split.configuration.beta_probability_simulations) write_to_alternatives(goal) self.save end def write_to_alternatives(goal = nil) alternatives.each do |alternative| alternative.set_p_winner(@alternative_probabilities[alternative], goal) end end def calc_alternative_probabilities(winning_counts, number_of_simulations) alternative_probabilities = {} winning_counts.each do |alternative, wins| alternative_probabilities[alternative] = wins / number_of_simulations.to_f end alternative_probabilities end def count_simulated_wins(winning_alternatives) # initialize a hash to keep track of winning alternative in simulations winning_counts = {} alternatives.each do |alternative| winning_counts[alternative] = 0 end # count number of times each alternative won, calculate probabilities, place in hash winning_alternatives.each do |alternative| winning_counts[alternative] += 1 end winning_counts end def find_simulated_winner(simulated_cr_hash) # figure out which alternative had the highest simulated conversion rate winning_pair = ["", 0.0] simulated_cr_hash.each do |alternative, rate| if rate > winning_pair[1] winning_pair = [alternative, rate] end end winner = winning_pair[0] winner end def calc_simulated_conversion_rates(beta_params) simulated_cr_hash = {} # create a hash which has the conversion rate pulled from each alternative's beta distribution beta_params.each do |alternative, params| alpha = params[0] beta = params[1] simulated_conversion_rate = Split::Algorithms.beta_distribution_rng(alpha, beta) simulated_cr_hash[alternative] = simulated_conversion_rate end simulated_cr_hash end def calc_beta_params(goal = nil) beta_params = {} alternatives.each do |alternative| conversions = goal.nil? ? alternative.completed_count : alternative.completed_count(goal) alpha = 1 + conversions beta = 1 + alternative.participant_count - conversions params = [alpha, beta] beta_params[alternative] = params end beta_params end def calc_time=(time) redis.hset(experiment_config_key, :calc_time, time) end def calc_time redis.hget(experiment_config_key, :calc_time).to_i end def jstring(goal = nil) js_id = if goal.nil? name else name + "-" + goal end js_id.gsub("/", "--") end def cohorting_disabled? @cohorting_disabled ||= begin value = redis.hget(experiment_config_key, :cohorting) value.nil? ? false : value.downcase == "true" end end def disable_cohorting @cohorting_disabled = true redis.hset(experiment_config_key, :cohorting, true.to_s) end def enable_cohorting @cohorting_disabled = false redis.hset(experiment_config_key, :cohorting, false.to_s) end protected def experiment_config_key "experiment_configurations/#{@name}" end def load_metadata_from_configuration Split.configuration.experiment_for(@name)[:metadata] end def load_metadata_from_redis meta = redis.get(metadata_key) JSON.parse(meta) unless meta.nil? end def load_alternatives_from_configuration alts = Split.configuration.experiment_for(@name)[:alternatives] raise ArgumentError, "Experiment configuration is missing :alternatives array" unless alts if alts.is_a?(Hash) alts.keys else alts.flatten end end def load_alternatives_from_redis alternatives = redis.lrange(@name, 0, -1) alternatives.map do |alt| alt = begin JSON.parse(alt) rescue alt end Split::Alternative.new(alt, @name) end end private def redis Split.redis end def redis_interface RedisInterface.new end def persist_experiment_configuration redis_interface.add_to_set(:experiments, name) redis_interface.persist_list(name, @alternatives.map { |alt| { alt.name => alt.weight }.to_json }) goals_collection.save if @metadata redis.set(metadata_key, @metadata.to_json) else delete_metadata end end def remove_experiment_configuration @alternatives.each(&:delete) goals_collection.delete delete_metadata redis.del(@name) end def experiment_configuration_has_changed? existing_experiment = Experiment.find(@name) existing_experiment.alternatives.map(&:to_s) != @alternatives.map(&:to_s) || existing_experiment.goals != @goals || existing_experiment.metadata != @metadata end def goals_collection Split::GoalsCollection.new(@name, @goals) end def remove_experiment_cohorting @cohorting_disabled = false redis.hdel(experiment_config_key, :cohorting) end end end <MSG> Merge pull request #392 from pakallis/remove-duplication Use Experiment#set_alternatives_and_options to DRY code <DFF> @@ -67,10 +67,9 @@ module Split end end - self.alternatives = alts - self.goals = options[:goals] - self.algorithm = options[:algorithm] - self.resettable = options[:resettable] + options[:alternatives] = alts + + set_alternatives_and_options(options) # calculate probability that each alternative is the winner @alternative_probabilities = {} @@ -266,11 +265,16 @@ module Split def load_from_redis exp_config = Split.redis.hgetall(experiment_config_key) - self.resettable = exp_config['resettable'] - self.algorithm = exp_config['algorithm'] - self.alternatives = load_alternatives_from_redis - self.goals = Split::GoalsCollection.new(@name).load_from_redis - self.metadata = load_metadata_from_redis + + options = { + resettable: exp_config['resettable'], + algorithm: exp_config['algorithm'], + alternatives: load_alternatives_from_redis, + goals: Split::GoalsCollection.new(@name).load_from_redis, + metadata: load_metadata_from_redis + } + + set_alternatives_and_options(options) end def calc_winning_alternatives
13
Merge pull request #392 from pakallis/remove-duplication
9
.rb
rb
mit
splitrb/split
10069651
<NME> experiment.rb <BEF> # frozen_string_literal: true module Split class Experiment attr_accessor :name attr_accessor :goals attr_accessor :alternative_probabilities attr_accessor :metadata attr_reader :alternatives attr_reader :resettable DEFAULT_OPTIONS = { resettable: true } def self.find(name) Split.cache(:experiments, name) do return unless Split.redis.exists?(name) Experiment.new(name).tap { |exp| exp.load_from_redis } end end def initialize(name, options = {}) options = DEFAULT_OPTIONS.merge(options) @name = name.to_s extract_alternatives_from_options(options) end def self.finished_key(key) "#{key}:finished" end def set_alternatives_and_options(options) options_with_defaults = DEFAULT_OPTIONS.merge( options.reject { |k, v| v.nil? } ) self.alternatives = options_with_defaults[:alternatives] self.goals = options_with_defaults[:goals] self.resettable = options_with_defaults[:resettable] self.algorithm = options_with_defaults[:algorithm] self.metadata = options_with_defaults[:metadata] end def extract_alternatives_from_options(options) alts = options[:alternatives] || [] if alts.length == 1 if alts[0].is_a? Hash alts = alts[0].map { |k, v| { k => v } } end end if alts.empty? exp_config = Split.configuration.experiment_for(name) if exp_config alts = load_alternatives_from_configuration options[:goals] = Split::GoalsCollection.new(@name).load_from_configuration options[:metadata] = load_metadata_from_configuration options[:resettable] = exp_config[:resettable] options[:algorithm] = exp_config[:algorithm] end end end end self.alternatives = alts self.goals = options[:goals] self.algorithm = options[:algorithm] self.resettable = options[:resettable] # calculate probability that each alternative is the winner @alternative_probabilities = {} def save validate! if new_record? start unless Split.configuration.start_manually persist_experiment_configuration elsif experiment_configuration_has_changed? reset unless Split.configuration.reset_manually persist_experiment_configuration end redis.hmset(experiment_config_key, :resettable, resettable.to_s, :algorithm, algorithm.to_s) self end def validate! if @alternatives.empty? && Split.configuration.experiment_for(@name).nil? raise ExperimentNotFound.new("Experiment #{@name} not found") end @alternatives.each { |a| a.validate! } goals_collection.validate! end def new_record? ExperimentCatalog.find(name).nil? end def ==(obj) self.name == obj.name end def [](name) alternatives.find { |a| a.name == name } end def algorithm @algorithm ||= Split.configuration.algorithm end def algorithm=(algorithm) @algorithm = algorithm.is_a?(String) ? algorithm.constantize : algorithm end def resettable=(resettable) @resettable = resettable.is_a?(String) ? resettable == "true" : resettable end def alternatives=(alts) @alternatives = alts.map do |alternative| if alternative.kind_of?(Split::Alternative) alternative else Split::Alternative.new(alternative, @name) end end end def winner Split.cache(:experiment_winner, name) do experiment_winner = redis.hget(:experiment_winner, name) if experiment_winner Split::Alternative.new(experiment_winner, name) else nil end end end def has_winner? return @has_winner if defined? @has_winner @has_winner = !winner.nil? end def winner=(winner_name) redis.hset(:experiment_winner, name, winner_name.to_s) @has_winner = true Split.configuration.on_experiment_winner_choose.call(self) end def participant_count alternatives.inject(0) { |sum, a| sum + a.participant_count } end def control alternatives.first end def reset_winner redis.hdel(:experiment_winner, name) @has_winner = false Split::Cache.clear_key(@name) end def start redis.hset(:experiment_start_times, @name, Time.now.to_i) end def start_time Split.cache(:experiment_start_times, @name) do t = redis.hget(:experiment_start_times, @name) if t # Check if stored time is an integer if t =~ /^[-+]?[0-9]+$/ Time.at(t.to_i) else Time.parse(t) end end end end def next_alternative winner || random_alternative end def random_alternative if alternatives.length > 1 algorithm.choose_alternative(self) else alternatives.first end end def version @version ||= (redis.get("#{name}:version").to_i || 0) end def increment_version @version = redis.incr("#{name}:version") end def key if version.to_i > 0 "#{name}:#{version}" else name end end def goals_key "#{name}:goals" end def finished_key self.class.finished_key(key) end def metadata_key "#{name}:metadata" end def resettable? resettable end def reset Split.configuration.on_before_experiment_reset.call(self) Split::Cache.clear_key(@name) alternatives.each(&:reset) reset_winner Split.configuration.on_experiment_reset.call(self) increment_version end def delete Split.configuration.on_before_experiment_delete.call(self) if Split.configuration.start_manually redis.hdel(:experiment_start_times, @name) end reset_winner redis.srem(:experiments, name) remove_experiment_cohorting remove_experiment_configuration Split.configuration.on_experiment_delete.call(self) increment_version end def delete_metadata redis.del(metadata_key) end def load_from_redis exp_config = redis.hgetall(experiment_config_key) options = { resettable: exp_config["resettable"], algorithm: exp_config["algorithm"], def load_from_redis exp_config = Split.redis.hgetall(experiment_config_key) self.resettable = exp_config['resettable'] self.algorithm = exp_config['algorithm'] self.alternatives = load_alternatives_from_redis self.goals = Split::GoalsCollection.new(@name).load_from_redis self.metadata = load_metadata_from_redis end def calc_winning_alternatives else goals.each do |goal| self.estimate_winning_alternative(goal) end end self.calc_time = intervals_since_epoch self.save end end def estimate_winning_alternative(goal = nil) # initialize a hash of beta distributions based on the alternatives' conversion rates beta_params = calc_beta_params(goal) winning_alternatives = [] Split.configuration.beta_probability_simulations.times do # calculate simulated conversion rates from the beta distributions simulated_cr_hash = calc_simulated_conversion_rates(beta_params) winning_alternative = find_simulated_winner(simulated_cr_hash) # push the winning pair to the winning_alternatives array winning_alternatives.push(winning_alternative) end winning_counts = count_simulated_wins(winning_alternatives) @alternative_probabilities = calc_alternative_probabilities(winning_counts, Split.configuration.beta_probability_simulations) write_to_alternatives(goal) self.save end def write_to_alternatives(goal = nil) alternatives.each do |alternative| alternative.set_p_winner(@alternative_probabilities[alternative], goal) end end def calc_alternative_probabilities(winning_counts, number_of_simulations) alternative_probabilities = {} winning_counts.each do |alternative, wins| alternative_probabilities[alternative] = wins / number_of_simulations.to_f end alternative_probabilities end def count_simulated_wins(winning_alternatives) # initialize a hash to keep track of winning alternative in simulations winning_counts = {} alternatives.each do |alternative| winning_counts[alternative] = 0 end # count number of times each alternative won, calculate probabilities, place in hash winning_alternatives.each do |alternative| winning_counts[alternative] += 1 end winning_counts end def find_simulated_winner(simulated_cr_hash) # figure out which alternative had the highest simulated conversion rate winning_pair = ["", 0.0] simulated_cr_hash.each do |alternative, rate| if rate > winning_pair[1] winning_pair = [alternative, rate] end end winner = winning_pair[0] winner end def calc_simulated_conversion_rates(beta_params) simulated_cr_hash = {} # create a hash which has the conversion rate pulled from each alternative's beta distribution beta_params.each do |alternative, params| alpha = params[0] beta = params[1] simulated_conversion_rate = Split::Algorithms.beta_distribution_rng(alpha, beta) simulated_cr_hash[alternative] = simulated_conversion_rate end simulated_cr_hash end def calc_beta_params(goal = nil) beta_params = {} alternatives.each do |alternative| conversions = goal.nil? ? alternative.completed_count : alternative.completed_count(goal) alpha = 1 + conversions beta = 1 + alternative.participant_count - conversions params = [alpha, beta] beta_params[alternative] = params end beta_params end def calc_time=(time) redis.hset(experiment_config_key, :calc_time, time) end def calc_time redis.hget(experiment_config_key, :calc_time).to_i end def jstring(goal = nil) js_id = if goal.nil? name else name + "-" + goal end js_id.gsub("/", "--") end def cohorting_disabled? @cohorting_disabled ||= begin value = redis.hget(experiment_config_key, :cohorting) value.nil? ? false : value.downcase == "true" end end def disable_cohorting @cohorting_disabled = true redis.hset(experiment_config_key, :cohorting, true.to_s) end def enable_cohorting @cohorting_disabled = false redis.hset(experiment_config_key, :cohorting, false.to_s) end protected def experiment_config_key "experiment_configurations/#{@name}" end def load_metadata_from_configuration Split.configuration.experiment_for(@name)[:metadata] end def load_metadata_from_redis meta = redis.get(metadata_key) JSON.parse(meta) unless meta.nil? end def load_alternatives_from_configuration alts = Split.configuration.experiment_for(@name)[:alternatives] raise ArgumentError, "Experiment configuration is missing :alternatives array" unless alts if alts.is_a?(Hash) alts.keys else alts.flatten end end def load_alternatives_from_redis alternatives = redis.lrange(@name, 0, -1) alternatives.map do |alt| alt = begin JSON.parse(alt) rescue alt end Split::Alternative.new(alt, @name) end end private def redis Split.redis end def redis_interface RedisInterface.new end def persist_experiment_configuration redis_interface.add_to_set(:experiments, name) redis_interface.persist_list(name, @alternatives.map { |alt| { alt.name => alt.weight }.to_json }) goals_collection.save if @metadata redis.set(metadata_key, @metadata.to_json) else delete_metadata end end def remove_experiment_configuration @alternatives.each(&:delete) goals_collection.delete delete_metadata redis.del(@name) end def experiment_configuration_has_changed? existing_experiment = Experiment.find(@name) existing_experiment.alternatives.map(&:to_s) != @alternatives.map(&:to_s) || existing_experiment.goals != @goals || existing_experiment.metadata != @metadata end def goals_collection Split::GoalsCollection.new(@name, @goals) end def remove_experiment_cohorting @cohorting_disabled = false redis.hdel(experiment_config_key, :cohorting) end end end <MSG> Merge pull request #392 from pakallis/remove-duplication Use Experiment#set_alternatives_and_options to DRY code <DFF> @@ -67,10 +67,9 @@ module Split end end - self.alternatives = alts - self.goals = options[:goals] - self.algorithm = options[:algorithm] - self.resettable = options[:resettable] + options[:alternatives] = alts + + set_alternatives_and_options(options) # calculate probability that each alternative is the winner @alternative_probabilities = {} @@ -266,11 +265,16 @@ module Split def load_from_redis exp_config = Split.redis.hgetall(experiment_config_key) - self.resettable = exp_config['resettable'] - self.algorithm = exp_config['algorithm'] - self.alternatives = load_alternatives_from_redis - self.goals = Split::GoalsCollection.new(@name).load_from_redis - self.metadata = load_metadata_from_redis + + options = { + resettable: exp_config['resettable'], + algorithm: exp_config['algorithm'], + alternatives: load_alternatives_from_redis, + goals: Split::GoalsCollection.new(@name).load_from_redis, + metadata: load_metadata_from_redis + } + + set_alternatives_and_options(options) end def calc_winning_alternatives
13
Merge pull request #392 from pakallis/remove-duplication
9
.rb
rb
mit
splitrb/split
10069652
<NME> experiment.rb <BEF> # frozen_string_literal: true module Split class Experiment attr_accessor :name attr_accessor :goals attr_accessor :alternative_probabilities attr_accessor :metadata attr_reader :alternatives attr_reader :resettable DEFAULT_OPTIONS = { resettable: true } def self.find(name) Split.cache(:experiments, name) do return unless Split.redis.exists?(name) Experiment.new(name).tap { |exp| exp.load_from_redis } end end def initialize(name, options = {}) options = DEFAULT_OPTIONS.merge(options) @name = name.to_s extract_alternatives_from_options(options) end def self.finished_key(key) "#{key}:finished" end def set_alternatives_and_options(options) options_with_defaults = DEFAULT_OPTIONS.merge( options.reject { |k, v| v.nil? } ) self.alternatives = options_with_defaults[:alternatives] self.goals = options_with_defaults[:goals] self.resettable = options_with_defaults[:resettable] self.algorithm = options_with_defaults[:algorithm] self.metadata = options_with_defaults[:metadata] end def extract_alternatives_from_options(options) alts = options[:alternatives] || [] if alts.length == 1 if alts[0].is_a? Hash alts = alts[0].map { |k, v| { k => v } } end end if alts.empty? exp_config = Split.configuration.experiment_for(name) if exp_config alts = load_alternatives_from_configuration options[:goals] = Split::GoalsCollection.new(@name).load_from_configuration options[:metadata] = load_metadata_from_configuration options[:resettable] = exp_config[:resettable] options[:algorithm] = exp_config[:algorithm] end end end end self.alternatives = alts self.goals = options[:goals] self.algorithm = options[:algorithm] self.resettable = options[:resettable] # calculate probability that each alternative is the winner @alternative_probabilities = {} def save validate! if new_record? start unless Split.configuration.start_manually persist_experiment_configuration elsif experiment_configuration_has_changed? reset unless Split.configuration.reset_manually persist_experiment_configuration end redis.hmset(experiment_config_key, :resettable, resettable.to_s, :algorithm, algorithm.to_s) self end def validate! if @alternatives.empty? && Split.configuration.experiment_for(@name).nil? raise ExperimentNotFound.new("Experiment #{@name} not found") end @alternatives.each { |a| a.validate! } goals_collection.validate! end def new_record? ExperimentCatalog.find(name).nil? end def ==(obj) self.name == obj.name end def [](name) alternatives.find { |a| a.name == name } end def algorithm @algorithm ||= Split.configuration.algorithm end def algorithm=(algorithm) @algorithm = algorithm.is_a?(String) ? algorithm.constantize : algorithm end def resettable=(resettable) @resettable = resettable.is_a?(String) ? resettable == "true" : resettable end def alternatives=(alts) @alternatives = alts.map do |alternative| if alternative.kind_of?(Split::Alternative) alternative else Split::Alternative.new(alternative, @name) end end end def winner Split.cache(:experiment_winner, name) do experiment_winner = redis.hget(:experiment_winner, name) if experiment_winner Split::Alternative.new(experiment_winner, name) else nil end end end def has_winner? return @has_winner if defined? @has_winner @has_winner = !winner.nil? end def winner=(winner_name) redis.hset(:experiment_winner, name, winner_name.to_s) @has_winner = true Split.configuration.on_experiment_winner_choose.call(self) end def participant_count alternatives.inject(0) { |sum, a| sum + a.participant_count } end def control alternatives.first end def reset_winner redis.hdel(:experiment_winner, name) @has_winner = false Split::Cache.clear_key(@name) end def start redis.hset(:experiment_start_times, @name, Time.now.to_i) end def start_time Split.cache(:experiment_start_times, @name) do t = redis.hget(:experiment_start_times, @name) if t # Check if stored time is an integer if t =~ /^[-+]?[0-9]+$/ Time.at(t.to_i) else Time.parse(t) end end end end def next_alternative winner || random_alternative end def random_alternative if alternatives.length > 1 algorithm.choose_alternative(self) else alternatives.first end end def version @version ||= (redis.get("#{name}:version").to_i || 0) end def increment_version @version = redis.incr("#{name}:version") end def key if version.to_i > 0 "#{name}:#{version}" else name end end def goals_key "#{name}:goals" end def finished_key self.class.finished_key(key) end def metadata_key "#{name}:metadata" end def resettable? resettable end def reset Split.configuration.on_before_experiment_reset.call(self) Split::Cache.clear_key(@name) alternatives.each(&:reset) reset_winner Split.configuration.on_experiment_reset.call(self) increment_version end def delete Split.configuration.on_before_experiment_delete.call(self) if Split.configuration.start_manually redis.hdel(:experiment_start_times, @name) end reset_winner redis.srem(:experiments, name) remove_experiment_cohorting remove_experiment_configuration Split.configuration.on_experiment_delete.call(self) increment_version end def delete_metadata redis.del(metadata_key) end def load_from_redis exp_config = redis.hgetall(experiment_config_key) options = { resettable: exp_config["resettable"], algorithm: exp_config["algorithm"], def load_from_redis exp_config = Split.redis.hgetall(experiment_config_key) self.resettable = exp_config['resettable'] self.algorithm = exp_config['algorithm'] self.alternatives = load_alternatives_from_redis self.goals = Split::GoalsCollection.new(@name).load_from_redis self.metadata = load_metadata_from_redis end def calc_winning_alternatives else goals.each do |goal| self.estimate_winning_alternative(goal) end end self.calc_time = intervals_since_epoch self.save end end def estimate_winning_alternative(goal = nil) # initialize a hash of beta distributions based on the alternatives' conversion rates beta_params = calc_beta_params(goal) winning_alternatives = [] Split.configuration.beta_probability_simulations.times do # calculate simulated conversion rates from the beta distributions simulated_cr_hash = calc_simulated_conversion_rates(beta_params) winning_alternative = find_simulated_winner(simulated_cr_hash) # push the winning pair to the winning_alternatives array winning_alternatives.push(winning_alternative) end winning_counts = count_simulated_wins(winning_alternatives) @alternative_probabilities = calc_alternative_probabilities(winning_counts, Split.configuration.beta_probability_simulations) write_to_alternatives(goal) self.save end def write_to_alternatives(goal = nil) alternatives.each do |alternative| alternative.set_p_winner(@alternative_probabilities[alternative], goal) end end def calc_alternative_probabilities(winning_counts, number_of_simulations) alternative_probabilities = {} winning_counts.each do |alternative, wins| alternative_probabilities[alternative] = wins / number_of_simulations.to_f end alternative_probabilities end def count_simulated_wins(winning_alternatives) # initialize a hash to keep track of winning alternative in simulations winning_counts = {} alternatives.each do |alternative| winning_counts[alternative] = 0 end # count number of times each alternative won, calculate probabilities, place in hash winning_alternatives.each do |alternative| winning_counts[alternative] += 1 end winning_counts end def find_simulated_winner(simulated_cr_hash) # figure out which alternative had the highest simulated conversion rate winning_pair = ["", 0.0] simulated_cr_hash.each do |alternative, rate| if rate > winning_pair[1] winning_pair = [alternative, rate] end end winner = winning_pair[0] winner end def calc_simulated_conversion_rates(beta_params) simulated_cr_hash = {} # create a hash which has the conversion rate pulled from each alternative's beta distribution beta_params.each do |alternative, params| alpha = params[0] beta = params[1] simulated_conversion_rate = Split::Algorithms.beta_distribution_rng(alpha, beta) simulated_cr_hash[alternative] = simulated_conversion_rate end simulated_cr_hash end def calc_beta_params(goal = nil) beta_params = {} alternatives.each do |alternative| conversions = goal.nil? ? alternative.completed_count : alternative.completed_count(goal) alpha = 1 + conversions beta = 1 + alternative.participant_count - conversions params = [alpha, beta] beta_params[alternative] = params end beta_params end def calc_time=(time) redis.hset(experiment_config_key, :calc_time, time) end def calc_time redis.hget(experiment_config_key, :calc_time).to_i end def jstring(goal = nil) js_id = if goal.nil? name else name + "-" + goal end js_id.gsub("/", "--") end def cohorting_disabled? @cohorting_disabled ||= begin value = redis.hget(experiment_config_key, :cohorting) value.nil? ? false : value.downcase == "true" end end def disable_cohorting @cohorting_disabled = true redis.hset(experiment_config_key, :cohorting, true.to_s) end def enable_cohorting @cohorting_disabled = false redis.hset(experiment_config_key, :cohorting, false.to_s) end protected def experiment_config_key "experiment_configurations/#{@name}" end def load_metadata_from_configuration Split.configuration.experiment_for(@name)[:metadata] end def load_metadata_from_redis meta = redis.get(metadata_key) JSON.parse(meta) unless meta.nil? end def load_alternatives_from_configuration alts = Split.configuration.experiment_for(@name)[:alternatives] raise ArgumentError, "Experiment configuration is missing :alternatives array" unless alts if alts.is_a?(Hash) alts.keys else alts.flatten end end def load_alternatives_from_redis alternatives = redis.lrange(@name, 0, -1) alternatives.map do |alt| alt = begin JSON.parse(alt) rescue alt end Split::Alternative.new(alt, @name) end end private def redis Split.redis end def redis_interface RedisInterface.new end def persist_experiment_configuration redis_interface.add_to_set(:experiments, name) redis_interface.persist_list(name, @alternatives.map { |alt| { alt.name => alt.weight }.to_json }) goals_collection.save if @metadata redis.set(metadata_key, @metadata.to_json) else delete_metadata end end def remove_experiment_configuration @alternatives.each(&:delete) goals_collection.delete delete_metadata redis.del(@name) end def experiment_configuration_has_changed? existing_experiment = Experiment.find(@name) existing_experiment.alternatives.map(&:to_s) != @alternatives.map(&:to_s) || existing_experiment.goals != @goals || existing_experiment.metadata != @metadata end def goals_collection Split::GoalsCollection.new(@name, @goals) end def remove_experiment_cohorting @cohorting_disabled = false redis.hdel(experiment_config_key, :cohorting) end end end <MSG> Merge pull request #392 from pakallis/remove-duplication Use Experiment#set_alternatives_and_options to DRY code <DFF> @@ -67,10 +67,9 @@ module Split end end - self.alternatives = alts - self.goals = options[:goals] - self.algorithm = options[:algorithm] - self.resettable = options[:resettable] + options[:alternatives] = alts + + set_alternatives_and_options(options) # calculate probability that each alternative is the winner @alternative_probabilities = {} @@ -266,11 +265,16 @@ module Split def load_from_redis exp_config = Split.redis.hgetall(experiment_config_key) - self.resettable = exp_config['resettable'] - self.algorithm = exp_config['algorithm'] - self.alternatives = load_alternatives_from_redis - self.goals = Split::GoalsCollection.new(@name).load_from_redis - self.metadata = load_metadata_from_redis + + options = { + resettable: exp_config['resettable'], + algorithm: exp_config['algorithm'], + alternatives: load_alternatives_from_redis, + goals: Split::GoalsCollection.new(@name).load_from_redis, + metadata: load_metadata_from_redis + } + + set_alternatives_and_options(options) end def calc_winning_alternatives
13
Merge pull request #392 from pakallis/remove-duplication
9
.rb
rb
mit
splitrb/split
10069653
<NME> configuration_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" describe Split::Configuration do before(:each) { @config = Split::Configuration.new } it "should provide a default value for ignore_ip_addresses" do expect(@config.ignore_ip_addresses).to eq([]) end it "should provide default values for db failover" do expect(@config.db_failover).to be_falsey expect(@config.db_failover_on_db_error).to be_a Proc end it "should not allow multiple experiments by default" do expect(@config.allow_multiple_experiments).to be_falsey end it "should be enabled by default" do expect(@config.enabled).to be_truthy end it "disabled is the opposite of enabled" do @config.enabled = false expect(@config.disabled?).to be_truthy end %w[Baidu Gigabot Googlebot libwww-perl lwp-trivial msnbot SiteUptime Slurp WordPress ZIBB ZyBorg AdsBot-Google Wget curl bitlybot facebookexternalhit spider].each do |robot| @config.robot_regex.should =~ robot end @config.robot_regex.should =~ "EventMachine HttpClient" @config.robot_regex.should =~ "libwww-perl/5.836" @config.robot_regex.should =~ "Pingdom.com_bot_version_1.4_(http://www.pingdom.com)" end it "should accept real UAs with the robot regexp" do expect(@config.robot_regex).to match(" - ") end it "should accept real UAs with the robot regexp" do expect(@config.robot_regex).not_to match("Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.4) Gecko/20091017 SeaMonkey/2.0") expect(@config.robot_regex).not_to match("Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; F-6.0SP2-20041109; .NET CLR 2.0.50727; .NET CLR 3.0.04506.648; .NET CLR 3.5.21022; .NET CLR 1.1.4322; InfoPath.3)") end it "should allow adding a bot to the bot list" do @config.bots["newbot"] = "An amazing test bot" expect(@config.robot_regex).to match("newbot") end it "should use the session adapter for persistence by default" do expect(@config.persistence).to eq(Split::Persistence::SessionAdapter) end it "should load a metric" do @config.experiments = { my_experiment: { alternatives: ["control_opt", "other_opt"], metric: :my_metric } } expect(@config.metrics).not_to be_nil expect(@config.metrics.keys).to eq([:my_metric]) end it "should allow loading of experiment using experment_for" do @config.experiments = { my_experiment: { alternatives: ["control_opt", "other_opt"], metric: :my_metric } } expect(@config.experiment_for(:my_experiment)).to eq({ alternatives: ["control_opt", ["other_opt"]] }) end context "when experiments are defined via YAML" do context "as strings" do context "in a basic configuration" do before do experiments_yaml = <<-eos my_experiment: alternatives: - Control Opt - Alt One - Alt Two resettable: false eos @config.experiments = YAML.load(experiments_yaml) end it "should normalize experiments" do expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: ["Control Opt", ["Alt One", "Alt Two"]] } }) end end context "in a configuration with metadata" do before do experiments_yaml = <<-eos my_experiment: alternatives: - name: Control Opt percent: 67 - name: Alt One percent: 10 - name: Alt Two percent: 23 metadata: Control Opt: text: 'Control Option' Alt One: text: 'Alternative One' Alt Two: text: 'Alternative Two' resettable: false eos @config.experiments = YAML.load(experiments_yaml) end it "should have metadata on the experiment" do meta = @config.normalized_experiments[:my_experiment][:metadata] expect(meta).to_not be nil expect(meta["Control Opt"]["text"]).to eq("Control Option") end end context "in a complex configuration" do before do experiments_yaml = <<-eos my_experiment: alternatives: - name: Control Opt percent: 67 - name: Alt One percent: 10 - name: Alt Two percent: 23 resettable: false metric: my_metric another_experiment: alternatives: - a - b eos @config.experiments = YAML.load(experiments_yaml) end it "should normalize experiments" do expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: [{ "Control Opt"=>0.67 }, [{ "Alt One"=>0.1 }, { "Alt Two"=>0.23 }]] }, another_experiment: { alternatives: ["a", ["b"]] } }) end it "should recognize metrics" do expect(@config.metrics).not_to be_nil expect(@config.metrics.keys).to eq([:my_metric]) end end end context "as symbols" do context "with valid YAML" do before do experiments_yaml = <<-eos :my_experiment: :alternatives: - Control Opt - Alt One - Alt Two :resettable: false eos @config.experiments = YAML.load(experiments_yaml) end it "should normalize experiments" do expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: ["Control Opt", ["Alt One", "Alt Two"]] } }) end end context "with invalid YAML" do let(:yaml) { YAML.load(input) } context "with an empty string" do let(:input) { "" } it "should raise an error" do expect { @config.experiments = yaml }.to raise_error(Split::InvalidExperimentsFormatError) end end context "with just the YAML header" do let(:input) { "---" } it "should raise an error" do expect { @config.experiments = yaml }.to raise_error(Split::InvalidExperimentsFormatError) end end end end end it "should normalize experiments" do @config.experiments = { my_experiment: { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } } expect(@config.normalized_experiments).to eq({ my_experiment: { alternatives: [{ "control_opt"=>0.67 }, [{ "second_opt"=>0.1 }, { "third_opt"=>0.23 }]] } }) end context "redis configuration" do it "should default to local redis server" do old_redis_url = ENV["REDIS_URL"] ENV.delete("REDIS_URL") expect(Split::Configuration.new.redis).to eq("redis://localhost:6379") ENV["REDIS_URL"] = old_redis_url end it "should allow for redis url to be configured" do @config.redis = "custom_redis_url" expect(@config.redis).to eq("custom_redis_url") end context "provided REDIS_URL environment variable" do it "should use the ENV variable" do old_redis_url = ENV["REDIS_URL"] ENV["REDIS_URL"] = "env_redis_url" expect(Split::Configuration.new.redis).to eq("env_redis_url") ENV["REDIS_URL"] = old_redis_url end end end context "persistence cookie length" do it "should default to 1 year" do expect(@config.persistence_cookie_length).to eq(31536000) end it "should allow the persistence cookie length to be configured" do @config.persistence_cookie_length = 2592000 expect(@config.persistence_cookie_length).to eq(2592000) end end context "persistence cookie domain" do it "should default to nil" do expect(@config.persistence_cookie_domain).to eq(nil) end it "should allow the persistence cookie domain to be configured" do @config.persistence_cookie_domain = ".acme.com" expect(@config.persistence_cookie_domain).to eq(".acme.com") end end end <MSG> Merge pull request #122 from iangreenleaf/more_bots Add more bot user agents <DFF> @@ -30,9 +30,12 @@ describe Split::Configuration do %w[Baidu Gigabot Googlebot libwww-perl lwp-trivial msnbot SiteUptime Slurp WordPress ZIBB ZyBorg AdsBot-Google Wget curl bitlybot facebookexternalhit spider].each do |robot| @config.robot_regex.should =~ robot end + @config.robot_regex.should =~ "EventMachine HttpClient" @config.robot_regex.should =~ "libwww-perl/5.836" @config.robot_regex.should =~ "Pingdom.com_bot_version_1.4_(http://www.pingdom.com)" + + @config.robot_regex.should =~ " - " end it "should accept real UAs with the robot regexp" do
3
Merge pull request #122 from iangreenleaf/more_bots
0
.rb
rb
mit
splitrb/split
10069654
<NME> configuration_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" describe Split::Configuration do before(:each) { @config = Split::Configuration.new } it "should provide a default value for ignore_ip_addresses" do expect(@config.ignore_ip_addresses).to eq([]) end it "should provide default values for db failover" do expect(@config.db_failover).to be_falsey expect(@config.db_failover_on_db_error).to be_a Proc end it "should not allow multiple experiments by default" do expect(@config.allow_multiple_experiments).to be_falsey end it "should be enabled by default" do expect(@config.enabled).to be_truthy end it "disabled is the opposite of enabled" do @config.enabled = false expect(@config.disabled?).to be_truthy end %w[Baidu Gigabot Googlebot libwww-perl lwp-trivial msnbot SiteUptime Slurp WordPress ZIBB ZyBorg AdsBot-Google Wget curl bitlybot facebookexternalhit spider].each do |robot| @config.robot_regex.should =~ robot end @config.robot_regex.should =~ "EventMachine HttpClient" @config.robot_regex.should =~ "libwww-perl/5.836" @config.robot_regex.should =~ "Pingdom.com_bot_version_1.4_(http://www.pingdom.com)" end it "should accept real UAs with the robot regexp" do expect(@config.robot_regex).to match(" - ") end it "should accept real UAs with the robot regexp" do expect(@config.robot_regex).not_to match("Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.4) Gecko/20091017 SeaMonkey/2.0") expect(@config.robot_regex).not_to match("Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; F-6.0SP2-20041109; .NET CLR 2.0.50727; .NET CLR 3.0.04506.648; .NET CLR 3.5.21022; .NET CLR 1.1.4322; InfoPath.3)") end it "should allow adding a bot to the bot list" do @config.bots["newbot"] = "An amazing test bot" expect(@config.robot_regex).to match("newbot") end it "should use the session adapter for persistence by default" do expect(@config.persistence).to eq(Split::Persistence::SessionAdapter) end it "should load a metric" do @config.experiments = { my_experiment: { alternatives: ["control_opt", "other_opt"], metric: :my_metric } } expect(@config.metrics).not_to be_nil expect(@config.metrics.keys).to eq([:my_metric]) end it "should allow loading of experiment using experment_for" do @config.experiments = { my_experiment: { alternatives: ["control_opt", "other_opt"], metric: :my_metric } } expect(@config.experiment_for(:my_experiment)).to eq({ alternatives: ["control_opt", ["other_opt"]] }) end context "when experiments are defined via YAML" do context "as strings" do context "in a basic configuration" do before do experiments_yaml = <<-eos my_experiment: alternatives: - Control Opt - Alt One - Alt Two resettable: false eos @config.experiments = YAML.load(experiments_yaml) end it "should normalize experiments" do expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: ["Control Opt", ["Alt One", "Alt Two"]] } }) end end context "in a configuration with metadata" do before do experiments_yaml = <<-eos my_experiment: alternatives: - name: Control Opt percent: 67 - name: Alt One percent: 10 - name: Alt Two percent: 23 metadata: Control Opt: text: 'Control Option' Alt One: text: 'Alternative One' Alt Two: text: 'Alternative Two' resettable: false eos @config.experiments = YAML.load(experiments_yaml) end it "should have metadata on the experiment" do meta = @config.normalized_experiments[:my_experiment][:metadata] expect(meta).to_not be nil expect(meta["Control Opt"]["text"]).to eq("Control Option") end end context "in a complex configuration" do before do experiments_yaml = <<-eos my_experiment: alternatives: - name: Control Opt percent: 67 - name: Alt One percent: 10 - name: Alt Two percent: 23 resettable: false metric: my_metric another_experiment: alternatives: - a - b eos @config.experiments = YAML.load(experiments_yaml) end it "should normalize experiments" do expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: [{ "Control Opt"=>0.67 }, [{ "Alt One"=>0.1 }, { "Alt Two"=>0.23 }]] }, another_experiment: { alternatives: ["a", ["b"]] } }) end it "should recognize metrics" do expect(@config.metrics).not_to be_nil expect(@config.metrics.keys).to eq([:my_metric]) end end end context "as symbols" do context "with valid YAML" do before do experiments_yaml = <<-eos :my_experiment: :alternatives: - Control Opt - Alt One - Alt Two :resettable: false eos @config.experiments = YAML.load(experiments_yaml) end it "should normalize experiments" do expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: ["Control Opt", ["Alt One", "Alt Two"]] } }) end end context "with invalid YAML" do let(:yaml) { YAML.load(input) } context "with an empty string" do let(:input) { "" } it "should raise an error" do expect { @config.experiments = yaml }.to raise_error(Split::InvalidExperimentsFormatError) end end context "with just the YAML header" do let(:input) { "---" } it "should raise an error" do expect { @config.experiments = yaml }.to raise_error(Split::InvalidExperimentsFormatError) end end end end end it "should normalize experiments" do @config.experiments = { my_experiment: { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } } expect(@config.normalized_experiments).to eq({ my_experiment: { alternatives: [{ "control_opt"=>0.67 }, [{ "second_opt"=>0.1 }, { "third_opt"=>0.23 }]] } }) end context "redis configuration" do it "should default to local redis server" do old_redis_url = ENV["REDIS_URL"] ENV.delete("REDIS_URL") expect(Split::Configuration.new.redis).to eq("redis://localhost:6379") ENV["REDIS_URL"] = old_redis_url end it "should allow for redis url to be configured" do @config.redis = "custom_redis_url" expect(@config.redis).to eq("custom_redis_url") end context "provided REDIS_URL environment variable" do it "should use the ENV variable" do old_redis_url = ENV["REDIS_URL"] ENV["REDIS_URL"] = "env_redis_url" expect(Split::Configuration.new.redis).to eq("env_redis_url") ENV["REDIS_URL"] = old_redis_url end end end context "persistence cookie length" do it "should default to 1 year" do expect(@config.persistence_cookie_length).to eq(31536000) end it "should allow the persistence cookie length to be configured" do @config.persistence_cookie_length = 2592000 expect(@config.persistence_cookie_length).to eq(2592000) end end context "persistence cookie domain" do it "should default to nil" do expect(@config.persistence_cookie_domain).to eq(nil) end it "should allow the persistence cookie domain to be configured" do @config.persistence_cookie_domain = ".acme.com" expect(@config.persistence_cookie_domain).to eq(".acme.com") end end end <MSG> Merge pull request #122 from iangreenleaf/more_bots Add more bot user agents <DFF> @@ -30,9 +30,12 @@ describe Split::Configuration do %w[Baidu Gigabot Googlebot libwww-perl lwp-trivial msnbot SiteUptime Slurp WordPress ZIBB ZyBorg AdsBot-Google Wget curl bitlybot facebookexternalhit spider].each do |robot| @config.robot_regex.should =~ robot end + @config.robot_regex.should =~ "EventMachine HttpClient" @config.robot_regex.should =~ "libwww-perl/5.836" @config.robot_regex.should =~ "Pingdom.com_bot_version_1.4_(http://www.pingdom.com)" + + @config.robot_regex.should =~ " - " end it "should accept real UAs with the robot regexp" do
3
Merge pull request #122 from iangreenleaf/more_bots
0
.rb
rb
mit
splitrb/split
10069655
<NME> configuration_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" describe Split::Configuration do before(:each) { @config = Split::Configuration.new } it "should provide a default value for ignore_ip_addresses" do expect(@config.ignore_ip_addresses).to eq([]) end it "should provide default values for db failover" do expect(@config.db_failover).to be_falsey expect(@config.db_failover_on_db_error).to be_a Proc end it "should not allow multiple experiments by default" do expect(@config.allow_multiple_experiments).to be_falsey end it "should be enabled by default" do expect(@config.enabled).to be_truthy end it "disabled is the opposite of enabled" do @config.enabled = false expect(@config.disabled?).to be_truthy end %w[Baidu Gigabot Googlebot libwww-perl lwp-trivial msnbot SiteUptime Slurp WordPress ZIBB ZyBorg AdsBot-Google Wget curl bitlybot facebookexternalhit spider].each do |robot| @config.robot_regex.should =~ robot end @config.robot_regex.should =~ "EventMachine HttpClient" @config.robot_regex.should =~ "libwww-perl/5.836" @config.robot_regex.should =~ "Pingdom.com_bot_version_1.4_(http://www.pingdom.com)" end it "should accept real UAs with the robot regexp" do expect(@config.robot_regex).to match(" - ") end it "should accept real UAs with the robot regexp" do expect(@config.robot_regex).not_to match("Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.4) Gecko/20091017 SeaMonkey/2.0") expect(@config.robot_regex).not_to match("Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; F-6.0SP2-20041109; .NET CLR 2.0.50727; .NET CLR 3.0.04506.648; .NET CLR 3.5.21022; .NET CLR 1.1.4322; InfoPath.3)") end it "should allow adding a bot to the bot list" do @config.bots["newbot"] = "An amazing test bot" expect(@config.robot_regex).to match("newbot") end it "should use the session adapter for persistence by default" do expect(@config.persistence).to eq(Split::Persistence::SessionAdapter) end it "should load a metric" do @config.experiments = { my_experiment: { alternatives: ["control_opt", "other_opt"], metric: :my_metric } } expect(@config.metrics).not_to be_nil expect(@config.metrics.keys).to eq([:my_metric]) end it "should allow loading of experiment using experment_for" do @config.experiments = { my_experiment: { alternatives: ["control_opt", "other_opt"], metric: :my_metric } } expect(@config.experiment_for(:my_experiment)).to eq({ alternatives: ["control_opt", ["other_opt"]] }) end context "when experiments are defined via YAML" do context "as strings" do context "in a basic configuration" do before do experiments_yaml = <<-eos my_experiment: alternatives: - Control Opt - Alt One - Alt Two resettable: false eos @config.experiments = YAML.load(experiments_yaml) end it "should normalize experiments" do expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: ["Control Opt", ["Alt One", "Alt Two"]] } }) end end context "in a configuration with metadata" do before do experiments_yaml = <<-eos my_experiment: alternatives: - name: Control Opt percent: 67 - name: Alt One percent: 10 - name: Alt Two percent: 23 metadata: Control Opt: text: 'Control Option' Alt One: text: 'Alternative One' Alt Two: text: 'Alternative Two' resettable: false eos @config.experiments = YAML.load(experiments_yaml) end it "should have metadata on the experiment" do meta = @config.normalized_experiments[:my_experiment][:metadata] expect(meta).to_not be nil expect(meta["Control Opt"]["text"]).to eq("Control Option") end end context "in a complex configuration" do before do experiments_yaml = <<-eos my_experiment: alternatives: - name: Control Opt percent: 67 - name: Alt One percent: 10 - name: Alt Two percent: 23 resettable: false metric: my_metric another_experiment: alternatives: - a - b eos @config.experiments = YAML.load(experiments_yaml) end it "should normalize experiments" do expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: [{ "Control Opt"=>0.67 }, [{ "Alt One"=>0.1 }, { "Alt Two"=>0.23 }]] }, another_experiment: { alternatives: ["a", ["b"]] } }) end it "should recognize metrics" do expect(@config.metrics).not_to be_nil expect(@config.metrics.keys).to eq([:my_metric]) end end end context "as symbols" do context "with valid YAML" do before do experiments_yaml = <<-eos :my_experiment: :alternatives: - Control Opt - Alt One - Alt Two :resettable: false eos @config.experiments = YAML.load(experiments_yaml) end it "should normalize experiments" do expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: ["Control Opt", ["Alt One", "Alt Two"]] } }) end end context "with invalid YAML" do let(:yaml) { YAML.load(input) } context "with an empty string" do let(:input) { "" } it "should raise an error" do expect { @config.experiments = yaml }.to raise_error(Split::InvalidExperimentsFormatError) end end context "with just the YAML header" do let(:input) { "---" } it "should raise an error" do expect { @config.experiments = yaml }.to raise_error(Split::InvalidExperimentsFormatError) end end end end end it "should normalize experiments" do @config.experiments = { my_experiment: { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } } expect(@config.normalized_experiments).to eq({ my_experiment: { alternatives: [{ "control_opt"=>0.67 }, [{ "second_opt"=>0.1 }, { "third_opt"=>0.23 }]] } }) end context "redis configuration" do it "should default to local redis server" do old_redis_url = ENV["REDIS_URL"] ENV.delete("REDIS_URL") expect(Split::Configuration.new.redis).to eq("redis://localhost:6379") ENV["REDIS_URL"] = old_redis_url end it "should allow for redis url to be configured" do @config.redis = "custom_redis_url" expect(@config.redis).to eq("custom_redis_url") end context "provided REDIS_URL environment variable" do it "should use the ENV variable" do old_redis_url = ENV["REDIS_URL"] ENV["REDIS_URL"] = "env_redis_url" expect(Split::Configuration.new.redis).to eq("env_redis_url") ENV["REDIS_URL"] = old_redis_url end end end context "persistence cookie length" do it "should default to 1 year" do expect(@config.persistence_cookie_length).to eq(31536000) end it "should allow the persistence cookie length to be configured" do @config.persistence_cookie_length = 2592000 expect(@config.persistence_cookie_length).to eq(2592000) end end context "persistence cookie domain" do it "should default to nil" do expect(@config.persistence_cookie_domain).to eq(nil) end it "should allow the persistence cookie domain to be configured" do @config.persistence_cookie_domain = ".acme.com" expect(@config.persistence_cookie_domain).to eq(".acme.com") end end end <MSG> Merge pull request #122 from iangreenleaf/more_bots Add more bot user agents <DFF> @@ -30,9 +30,12 @@ describe Split::Configuration do %w[Baidu Gigabot Googlebot libwww-perl lwp-trivial msnbot SiteUptime Slurp WordPress ZIBB ZyBorg AdsBot-Google Wget curl bitlybot facebookexternalhit spider].each do |robot| @config.robot_regex.should =~ robot end + @config.robot_regex.should =~ "EventMachine HttpClient" @config.robot_regex.should =~ "libwww-perl/5.836" @config.robot_regex.should =~ "Pingdom.com_bot_version_1.4_(http://www.pingdom.com)" + + @config.robot_regex.should =~ " - " end it "should accept real UAs with the robot regexp" do
3
Merge pull request #122 from iangreenleaf/more_bots
0
.rb
rb
mit
splitrb/split
10069656
<NME> dashboard_helpers_spec.rb <BEF> ADDFILE <MSG> confidence_level helper should handle tiny z-scores <DFF> @@ -0,0 +1,12 @@ +require 'spec_helper' +require 'split/dashboard/helpers' + +include Split::DashboardHelpers + +describe Split::DashboardHelpers do + describe 'confidence_level' do + it 'should handle very small numbers' do + confidence_level(0.000000000000006).should eql('No Change') + end + end +end \ No newline at end of file
12
confidence_level helper should handle tiny z-scores
0
.rb
rb
mit
splitrb/split
10069657
<NME> dashboard_helpers_spec.rb <BEF> ADDFILE <MSG> confidence_level helper should handle tiny z-scores <DFF> @@ -0,0 +1,12 @@ +require 'spec_helper' +require 'split/dashboard/helpers' + +include Split::DashboardHelpers + +describe Split::DashboardHelpers do + describe 'confidence_level' do + it 'should handle very small numbers' do + confidence_level(0.000000000000006).should eql('No Change') + end + end +end \ No newline at end of file
12
confidence_level helper should handle tiny z-scores
0
.rb
rb
mit
splitrb/split
10069658
<NME> dashboard_helpers_spec.rb <BEF> ADDFILE <MSG> confidence_level helper should handle tiny z-scores <DFF> @@ -0,0 +1,12 @@ +require 'spec_helper' +require 'split/dashboard/helpers' + +include Split::DashboardHelpers + +describe Split::DashboardHelpers do + describe 'confidence_level' do + it 'should handle very small numbers' do + confidence_level(0.000000000000006).should eql('No Change') + end + end +end \ No newline at end of file
12
confidence_level helper should handle tiny z-scores
0
.rb
rb
mit
splitrb/split
10069659
<NME> models.py <BEF> import os from django.conf import settings from django.db import models from django.utils.translation import ugettext_lazy as _ from django.contrib.auth.models import User OS_NAMES = ( ("aix", "AIX"), ("beos", "BeOS"), ("debian", "Debian Linux"), ("dos", "DOS"), ("freebsd", "FreeBSD"), ("hpux", "HP/UX"), ("mac", "Mac System x."), ("macos", "MacOS X"), ("mandrake", "Mandrake Linux"), ("netbsd", "NetBSD"), ("openbsd", "OpenBSD"), ("qnx", "QNX"), ("redhat", "RedHat Linux"), ("solaris", "SUN Solaris"), ("suse", "SuSE Linux"), ("yellowdog", "Yellow Dog Linux"), ) ARCHITECTURES = ( ("alpha", "Alpha"), ("hppa", "HPPA"), ("ix86", "Intel"), ("powerpc", "PowerPC"), ("sparc", "Sparc"), ("ultrasparc", "UltraSparc"), ) UPLOAD_TO = getattr(settings, "DJANGOPYPI_RELEASE_UPLOAD_TO", 'dist') class Classifier(models.Model): name = models.CharField(max_length=255, unique=True) class Meta: verbose_name = _(u"classifier") verbose_name_plural = _(u"classifiers") def __unicode__(self): return self.name class Project(models.Model): name = models.CharField(max_length=255, unique=True) license = models.TextField(blank=True) metadata_version = models.CharField(max_length=64, default=1.0) author = models.CharField(max_length=128, blank=True) home_page = models.URLField(verify_exists=False, blank=True, null=True) download_url = models.CharField(max_length=200, blank=True, null=True) summary = models.TextField(blank=True) description = models.TextField(blank=True) author_email = models.CharField(max_length=255, blank=True) classifiers = models.ManyToManyField(Classifier) owner = models.ForeignKey(User, related_name="projects") updated = models.DateTimeField(auto_now=True) class Meta: verbose_name = _(u"project") verbose_name_plural = _(u"projects") def __unicode__(self): return self.name @models.permalink def get_absolute_url(self): return ('djangopypi-show_links', (), {'dist_name': self.name}) @models.permalink def get_pypi_absolute_url(self): return ('djangopypi-pypi_show_links', (), {'dist_name': self.name}) def get_release(self, version): """Return the release object for version, or None""" try: return self.releases.get(version=version) except Release.DoesNotExist: return None class Release(models.Model): version = models.CharField(max_length=32) author_email = models.CharField(max_length=255, blank=True) classifiers = models.ManyToManyField(Classifier) owner = models.ForeignKey(User, related_name="projects") class Meta: verbose_name = _(u"project") upload_time = models.DateTimeField(auto_now=True) class Meta: verbose_name = _(u"release") verbose_name_plural = _(u"releases") unique_together = ("project", "version", "platform", "distribution", "pyversion") def __unicode__(self): return u"%s (%s)" % (self.release_name, self.platform) class Release(models.Model): version = models.CharField(max_length=128) distribution = models.FileField(upload_to="dists") dist_md5sum = models.CharField(max_length=255, blank=True) platform = models.CharField(max_length=255, blank=True) signature = models.CharField(max_length=128, blank=True) project = models.ForeignKey(Project, related_name="releases") class Meta: unique_together = ('version', 'platform') return os.path.basename(self.distribution.name) @property def __unicode__(self): return u"%s (%s)" % (self.release_name, self.platform) @property def filename(self): return os.path.basename(self.distribution.name) return ('djangopypi-show_version', (), {'dist_name': self.project, 'version': self.version}) def get_dl_url(self): return "%s#md5=%s" % (self.distribution.url, self.dist_md5sum) <MSG> added missing metadata to schemas and datetime <DFF> @@ -87,6 +87,7 @@ class Project(models.Model): author_email = models.CharField(max_length=255, blank=True) classifiers = models.ManyToManyField(Classifier) owner = models.ForeignKey(User, related_name="projects") + updated = models.DateTimeField(auto_now=True) class Meta: verbose_name = _(u"project") @@ -103,10 +104,13 @@ class Project(models.Model): class Release(models.Model): version = models.CharField(max_length=128) distribution = models.FileField(upload_to="dists") - dist_md5sum = models.CharField(max_length=255, blank=True) + md5_digest = models.CharField(max_length=255, blank=True) platform = models.CharField(max_length=255, blank=True) signature = models.CharField(max_length=128, blank=True) + filetype = models.CharField(max_length=255, blank=True) + pyversion = models.CharField(max_length=255, blank=True) project = models.ForeignKey(Project, related_name="releases") + upload_time = models.DateTimeField(auto_now=True) class Meta: unique_together = ('version', 'platform') @@ -116,6 +120,17 @@ class Release(models.Model): def __unicode__(self): return u"%s (%s)" % (self.release_name, self.platform) + @property + def type(self): + dist_file_types = { + 'sdist':'Source', + 'bdist_dumb':'"dumb" binary', + 'bdist_rpm':'RPM', + 'bdist_wininst':'MS Windows installer', + 'bdist_egg':'Python Egg', + 'bdist_dmg':'OS X Disk Image'} + return dist_file_types.get(self.filetype, self.filetype) + @property def filename(self): return os.path.basename(self.distribution.name) @@ -133,6 +148,6 @@ class Release(models.Model): return ('djangopypi-show_version', (), {'dist_name': self.project, 'version': self.version}) def get_dl_url(self): - return "%s#md5=%s" % (self.distribution.url, self.dist_md5sum) + return "%s#md5=%s" % (self.distribution.url, self.md5_digest)
17
added missing metadata to schemas and datetime
2
.py
py
bsd-3-clause
ask/chishop
10069660
<NME> .rubocop_todo.yml <BEF> ADDFILE <MSG> Update rubocop.yml config <DFF> @@ -0,0 +1,699 @@ +# This configuration was generated by +# `rubocop --auto-gen-config` +# on 2019-10-21 18:09:14 -0300 using RuboCop version 0.75.1. +# The point is for the user to remove these configuration records +# one by one as the offenses are removed from the code base. +# Note that changes in the inspected code, or installation of new +# versions of RuboCop, may require this file to be generated again. + +# Offense count: 4 +# Cop supports --auto-correct. +# Configuration parameters: TreatCommentsAsGroupSeparators, Include. +# Include: **/*.gemspec +Gemspec/OrderedDependencies: + Exclude: + - 'split.gemspec' + +# Offense count: 1 +# Configuration parameters: Include. +# Include: **/*.gemspec, +Gemspec/RequiredRubyVersion: + Exclude: + - 'split.gemspec' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, IndentationWidth. +# SupportedStyles: with_first_argument, with_fixed_indentation +Layout/AlignArguments: + Exclude: + - 'lib/split.rb' + - 'lib/split/experiment_catalog.rb' + +# Offense count: 3 +# Cop supports --auto-correct. +# Configuration parameters: AllowMultipleStyles, EnforcedHashRocketStyle, EnforcedColonStyle, EnforcedLastArgumentHashStyle. +# SupportedHashRocketStyles: key, separator, table +# SupportedColonStyles: key, separator, table +# SupportedLastArgumentHashStyles: always_inspect, always_ignore, ignore_implicit, ignore_explicit +Layout/AlignHash: + Exclude: + - 'lib/split/helper.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Layout/CommentIndentation: + Exclude: + - 'lib/split/experiment.rb' + +# Offense count: 5 +# Cop supports --auto-correct. +Layout/ElseAlignment: + Exclude: + - 'lib/split.rb' + - 'lib/split/helper.rb' + - 'lib/split/trial.rb' + +# Offense count: 20 +# Cop supports --auto-correct. +Layout/EmptyLineAfterGuardClause: + Exclude: + - 'lib/split.rb' + - 'lib/split/algorithms/weighted_sample.rb' + - 'lib/split/alternative.rb' + - 'lib/split/combined_experiments_helper.rb' + - 'lib/split/configuration.rb' + - 'lib/split/experiment.rb' + - 'lib/split/experiment_catalog.rb' + - 'lib/split/goals_collection.rb' + - 'lib/split/helper.rb' + - 'lib/split/user.rb' + +# Offense count: 25 +# Cop supports --auto-correct. +Layout/EmptyLineAfterMagicComment: + Enabled: false + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: AllowAdjacentOneLineDefs, NumberOfEmptyLines. +Layout/EmptyLineBetweenDefs: + Exclude: + - 'lib/split/helper.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Layout/EmptyLines: + Exclude: + - 'lib/split/helper.rb' + +# Offense count: 8 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: empty_lines, empty_lines_except_namespace, empty_lines_special, no_empty_lines, beginning_only, ending_only +Layout/EmptyLinesAroundClassBody: + Exclude: + - 'lib/split/experiment_catalog.rb' + - 'lib/split/goals_collection.rb' + - 'lib/split/metric.rb' + - 'lib/split/persistence/cookie_adapter.rb' + - 'lib/split/persistence/redis_adapter.rb' + - 'lib/split/persistence/session_adapter.rb' + - 'lib/split/zscore.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +Layout/EmptyLinesAroundMethodBody: + Exclude: + - 'lib/split/dashboard/helpers.rb' + - 'lib/split/zscore.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: empty_lines, empty_lines_except_namespace, empty_lines_special, no_empty_lines +Layout/EmptyLinesAroundModuleBody: + Exclude: + - 'lib/split/encapsulated_helper.rb' + +# Offense count: 3 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyleAlignWith, AutoCorrect, Severity. +# SupportedStylesAlignWith: keyword, variable, start_of_line +Layout/EndAlignment: + Exclude: + - 'lib/split.rb' + - 'lib/split/helper.rb' + - 'lib/split/trial.rb' + +# Offense count: 4 +# Cop supports --auto-correct. +# Configuration parameters: AllowForAlignment, AllowBeforeTrailingComments, ForceEqualSignAlignment. +Layout/ExtraSpacing: + Exclude: + - 'lib/split/algorithms/whiplash.rb' + - 'lib/split/dashboard.rb' + - 'lib/split/metric.rb' + - 'lib/split/trial.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, IndentationWidth. +# SupportedStyles: special_inside_parentheses, consistent, align_braces +Layout/IndentFirstHashElement: + Exclude: + - 'split.gemspec' + +# Offense count: 3 +# Cop supports --auto-correct. +# Configuration parameters: Width, IgnoredPatterns. +Layout/IndentationWidth: + Exclude: + - 'lib/split.rb' + - 'lib/split/helper.rb' + - 'lib/split/trial.rb' + +# Offense count: 10 +# Cop supports --auto-correct. +Layout/SpaceAfterComma: + Exclude: + - 'lib/split/algorithms/weighted_sample.rb' + - 'lib/split/configuration.rb' + - 'lib/split/encapsulated_helper.rb' + - 'lib/split/experiment.rb' + - 'lib/split/metric.rb' + - 'lib/split/user.rb' + +# Offense count: 5 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: space, no_space +Layout/SpaceAroundEqualsInParameterDefault: + Exclude: + - 'lib/split/goals_collection.rb' + - 'lib/split/persistence/dual_adapter.rb' + - 'lib/split/persistence/redis_adapter.rb' + - 'lib/split/trial.rb' + - 'lib/split/user.rb' + +# Offense count: 24 +# Cop supports --auto-correct. +# Configuration parameters: AllowForAlignment. +Layout/SpaceAroundOperators: + Exclude: + - 'lib/split/algorithms/whiplash.rb' + - 'lib/split/alternative.rb' + - 'lib/split/metric.rb' + - 'lib/split/trial.rb' + - 'lib/split/zscore.rb' + +# Offense count: 14 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, EnforcedStyleForEmptyBraces. +# SupportedStyles: space, no_space +# SupportedStylesForEmptyBraces: space, no_space +Layout/SpaceBeforeBlockBraces: + Exclude: + - 'lib/split/configuration.rb' + - 'lib/split/experiment.rb' + - 'lib/split/experiment_catalog.rb' + - 'lib/split/helper.rb' + - 'lib/split/trial.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, EnforcedStyleForEmptyBrackets. +# SupportedStyles: space, no_space, compact +# SupportedStylesForEmptyBrackets: space, no_space +Layout/SpaceInsideArrayLiteralBrackets: + Exclude: + - 'lib/split/dashboard/helpers.rb' + +# Offense count: 31 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, EnforcedStyleForEmptyBraces, SpaceBeforeBlockParameters. +# SupportedStyles: space, no_space +# SupportedStylesForEmptyBraces: space, no_space +Layout/SpaceInsideBlockBraces: + Exclude: + - 'lib/split/configuration.rb' + - 'lib/split/experiment.rb' + - 'lib/split/experiment_catalog.rb' + - 'lib/split/helper.rb' + - 'lib/split/trial.rb' + - 'lib/split/user.rb' + +# Offense count: 10 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, EnforcedStyleForEmptyBraces. +# SupportedStyles: space, no_space, compact +# SupportedStylesForEmptyBraces: space, no_space +Layout/SpaceInsideHashLiteralBraces: + Exclude: + - 'lib/split/experiment.rb' + - 'lib/split/helper.rb' + - 'lib/split/persistence/redis_adapter.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: final_newline, final_blank_line +Layout/TrailingBlankLines: + Exclude: + - 'Rakefile' + +# Offense count: 8 +# Configuration parameters: AllowSafeAssignment. +Lint/AssignmentInCondition: + Exclude: + - 'lib/split/combined_experiments_helper.rb' + - 'lib/split/configuration.rb' + - 'lib/split/persistence/cookie_adapter.rb' + - 'lib/split/persistence/dual_adapter.rb' + - 'lib/split/persistence/redis_adapter.rb' + +# Offense count: 4 +# Cop supports --auto-correct. +# Configuration parameters: IgnoreEmptyBlocks, AllowUnusedKeywordArguments. +Lint/UnusedBlockArgument: + Exclude: + - 'lib/split/algorithms/block_randomization.rb' + - 'lib/split/configuration.rb' + - 'lib/split/engine.rb' + - 'lib/split/metric.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: AllowUnusedKeywordArguments, IgnoreEmptyMethods. +Lint/UnusedMethodArgument: + Exclude: + - 'lib/split/dashboard/helpers.rb' + - 'lib/split/persistence/cookie_adapter.rb' + +# Offense count: 1 +Lint/UselessAssignment: + Exclude: + - 'lib/split/goals_collection.rb' + +# Offense count: 19 +Metrics/AbcSize: + Max: 47 + +# Offense count: 3 +# Configuration parameters: CountComments. +Metrics/ClassLength: + Max: 377 + +# Offense count: 6 +Metrics/CyclomaticComplexity: + Max: 14 + +# Offense count: 23 +# Configuration parameters: CountComments, ExcludedMethods. +Metrics/MethodLength: + Max: 66 + +# Offense count: 1 +# Configuration parameters: CountComments. +Metrics/ModuleLength: + Max: 134 + +# Offense count: 8 +Metrics/PerceivedComplexity: + Max: 16 + +# Offense count: 4 +Naming/AccessorMethodName: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/experiment.rb' + - 'lib/split/persistence/cookie_adapter.rb' + +# Offense count: 1 +Naming/BinaryOperatorParameterName: + Exclude: + - 'lib/split/experiment.rb' + +# Offense count: 4 +# Configuration parameters: NamePrefix, NamePrefixBlacklist, NameWhitelist, MethodDefinitionMacros. +# NamePrefix: is_, has_, have_ +# NamePrefixBlacklist: is_, has_, have_ +# NameWhitelist: is_a? +# MethodDefinitionMacros: define_method, define_singleton_method +Naming/PredicateName: + Exclude: + - 'spec/**/*' + - 'lib/split/experiment.rb' + - 'lib/split/helper.rb' + +# Offense count: 5 +# Configuration parameters: MinNameLength, AllowNamesEndingInNumbers, AllowedNames, ForbiddenNames. +# AllowedNames: io, id, to, by, on, in, at, ip, db +Naming/UncommunicativeMethodParamName: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/zscore.rb' + +# Offense count: 6 +# Configuration parameters: EnforcedStyle. +# SupportedStyles: snake_case, normalcase, non_integer +Naming/VariableNumber: + Exclude: + - 'lib/split/zscore.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: always, conditionals +Style/AndOr: + Exclude: + - 'lib/split/experiment_catalog.rb' + +# Offense count: 2 +# Configuration parameters: AllowedChars. +Style/AsciiComments: + Exclude: + - 'lib/split/zscore.rb' + +# Offense count: 4 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: percent_q, bare_percent +Style/BarePercentLiterals: + Exclude: + - 'lib/split/dashboard/pagination_helpers.rb' + +# Offense count: 8 +Style/CaseEquality: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/experiment_catalog.rb' + - 'lib/split/helper.rb' + - 'lib/split/metric.rb' + +# Offense count: 5 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: is_a?, kind_of? +Style/ClassCheck: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/configuration.rb' + - 'lib/split/experiment.rb' + - 'lib/split/goals_collection.rb' + - 'lib/split/trial.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/ColonMethodCall: + Exclude: + - 'lib/split/combined_experiments_helper.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: Keywords. +# Keywords: TODO, FIXME, OPTIMIZE, HACK, REVIEW +Style/CommentAnnotation: + Exclude: + - 'lib/split/configuration.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SingleLineConditionsOnly, IncludeTernaryExpressions. +# SupportedStyles: assign_to_condition, assign_inside_condition +Style/ConditionalAssignment: + Exclude: + - 'lib/split/dashboard.rb' + - 'lib/split/persistence/redis_adapter.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/DefWithParentheses: + Exclude: + - 'lib/split/helper.rb' + +# Offense count: 29 +Style/Documentation: + Enabled: false + +# Offense count: 3 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: empty, nil, both +Style/EmptyElse: + Exclude: + - 'lib/split/experiment.rb' + - 'lib/split/metric.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/Encoding: + Exclude: + - 'split.gemspec' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/ExpandPathArguments: + Exclude: + - 'split.gemspec' + +# Offense count: 13 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: always, never +Style/FrozenStringLiteralComment: + Exclude: + - 'Appraisals' + - 'Gemfile' + - 'Rakefile' + - 'gemfiles/4.2.gemfile' + - 'gemfiles/5.0.gemfile' + - 'gemfiles/5.1.gemfile' + - 'gemfiles/5.2.gemfile' + - 'gemfiles/6.0.gemfile' + - 'lib/split/algorithms/block_randomization.rb' + - 'lib/split/goals_collection.rb' + - 'lib/split/redis_interface.rb' + - 'lib/split/user.rb' + - 'split.gemspec' + +# Offense count: 13 +# Configuration parameters: MinBodyLength. +Style/GuardClause: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/configuration.rb' + - 'lib/split/experiment.rb' + - 'lib/split/goals_collection.rb' + - 'lib/split/helper.rb' + - 'lib/split/persistence/dual_adapter.rb' + - 'lib/split/trial.rb' + +# Offense count: 25 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, UseHashRocketsWithSymbolValues, PreferHashRocketsForNonAlnumEndingSymbols. +# SupportedStyles: ruby19, hash_rockets, no_mixed_keys, ruby19_no_mixed_keys +Style/HashSyntax: + Exclude: + - 'Rakefile' + - 'lib/split.rb' + - 'lib/split/experiment.rb' + - 'lib/split/experiment_catalog.rb' + - 'lib/split/helper.rb' + - 'lib/split/metric.rb' + - 'lib/split/persistence.rb' + - 'lib/split/persistence/redis_adapter.rb' + +# Offense count: 4 +Style/IdenticalConditionalBranches: + Exclude: + - 'lib/split/configuration.rb' + - 'lib/split/experiment.rb' + +# Offense count: 14 +# Cop supports --auto-correct. +Style/IfUnlessModifier: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/experiment.rb' + - 'lib/split/experiment_catalog.rb' + - 'lib/split/goals_collection.rb' + - 'lib/split/metric.rb' + - 'lib/split/trial.rb' + - 'lib/split/user.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: require_parentheses, require_no_parentheses, require_no_parentheses_except_multiline +Style/MethodDefParentheses: + Exclude: + - 'lib/split/configuration.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, Autocorrect. +# SupportedStyles: module_function, extend_self +Style/ModuleFunction: + Exclude: + - 'lib/split.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: literals, strict +Style/MutableConstant: + Exclude: + - 'lib/split/experiment.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: both, prefix, postfix +Style/NegatedIf: + Exclude: + - 'lib/split/user.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: IncludeSemanticChanges. +Style/NonNilCheck: + Exclude: + - 'lib/split/configuration.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/Not: + Exclude: + - 'lib/split/experiment_catalog.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: Strict. +Style/NumericLiterals: + MinDigits: 9 + +# Offense count: 3 +# Cop supports --auto-correct. +# Configuration parameters: AutoCorrect, EnforcedStyle, IgnoredMethods. +# SupportedStyles: predicate, comparison +Style/NumericPredicate: + Exclude: + - 'spec/**/*' + - 'lib/split/experiment.rb' + - 'lib/split/trial.rb' + - 'lib/split/user.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +Style/ParallelAssignment: + Exclude: + - 'lib/split/experiment_catalog.rb' + - 'lib/split/persistence/cookie_adapter.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: short, verbose +Style/PreferredHashMethods: + Exclude: + - 'lib/split/configuration.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: compact, exploded +Style/RaiseArgs: + Exclude: + - 'lib/split/configuration.rb' + - 'lib/split/experiment.rb' + +# Offense count: 6 +# Cop supports --auto-correct. +Style/RedundantParentheses: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/zscore.rb' + +# Offense count: 11 +# Cop supports --auto-correct. +# Configuration parameters: AllowMultipleReturnValues. +Style/RedundantReturn: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/experiment.rb' + - 'lib/split/experiment_catalog.rb' + - 'lib/split/helper.rb' + - 'lib/split/metric.rb' + - 'lib/split/zscore.rb' + +# Offense count: 21 +# Cop supports --auto-correct. +Style/RedundantSelf: + Exclude: + - 'lib/split.rb' + - 'lib/split/alternative.rb' + - 'lib/split/configuration.rb' + - 'lib/split/experiment.rb' + - 'lib/split/extensions/string.rb' + - 'lib/split/metric.rb' + - 'lib/split/persistence/dual_adapter.rb' + - 'lib/split/persistence/redis_adapter.rb' + - 'lib/split/trial.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +Style/RescueModifier: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/configuration.rb' + +# Offense count: 5 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: implicit, explicit +Style/RescueStandardError: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/experiment.rb' + - 'lib/split/helper.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: ConvertCodeThatCanStartToReturnNil, Whitelist. +# Whitelist: present?, blank?, presence, try, try! +Style/SafeNavigation: + Exclude: + - 'lib/split/configuration.rb' + - 'lib/split/helper.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: AllowAsExpressionSeparator. +Style/Semicolon: + Exclude: + - 'lib/split/algorithms/whiplash.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: . +# SupportedStyles: use_perl_names, use_english_names +Style/SpecialGlobalVars: + EnforcedStyle: use_perl_names + +# Offense count: 86 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, ConsistentQuotesInMultiline. +# SupportedStyles: single_quotes, double_quotes +Style/StringLiterals: + Enabled: false + +# Offense count: 3 +# Cop supports --auto-correct. +# Configuration parameters: IgnoredMethods. +# IgnoredMethods: respond_to, define_method +Style/SymbolProc: + Exclude: + - 'lib/split/experiment.rb' + - 'lib/split/experiment_catalog.rb' + - 'lib/split/metric.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: AllowNamedUnderscoreVariables. +Style/TrailingUnderscoreVariable: + Exclude: + - 'lib/split/helper.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/ZeroLengthPredicate: + Exclude: + - 'lib/split/user.rb' + +# Offense count: 74 +# Cop supports --auto-correct. +# Configuration parameters: AutoCorrect, AllowHeredoc, AllowURI, URISchemes, IgnoreCopDirectives, IgnoredPatterns. +# URISchemes: http, https +Metrics/LineLength: + Max: 183
699
Update rubocop.yml config
0
.yml
rubocop_todo
mit
splitrb/split
10069661
<NME> .rubocop_todo.yml <BEF> ADDFILE <MSG> Update rubocop.yml config <DFF> @@ -0,0 +1,699 @@ +# This configuration was generated by +# `rubocop --auto-gen-config` +# on 2019-10-21 18:09:14 -0300 using RuboCop version 0.75.1. +# The point is for the user to remove these configuration records +# one by one as the offenses are removed from the code base. +# Note that changes in the inspected code, or installation of new +# versions of RuboCop, may require this file to be generated again. + +# Offense count: 4 +# Cop supports --auto-correct. +# Configuration parameters: TreatCommentsAsGroupSeparators, Include. +# Include: **/*.gemspec +Gemspec/OrderedDependencies: + Exclude: + - 'split.gemspec' + +# Offense count: 1 +# Configuration parameters: Include. +# Include: **/*.gemspec, +Gemspec/RequiredRubyVersion: + Exclude: + - 'split.gemspec' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, IndentationWidth. +# SupportedStyles: with_first_argument, with_fixed_indentation +Layout/AlignArguments: + Exclude: + - 'lib/split.rb' + - 'lib/split/experiment_catalog.rb' + +# Offense count: 3 +# Cop supports --auto-correct. +# Configuration parameters: AllowMultipleStyles, EnforcedHashRocketStyle, EnforcedColonStyle, EnforcedLastArgumentHashStyle. +# SupportedHashRocketStyles: key, separator, table +# SupportedColonStyles: key, separator, table +# SupportedLastArgumentHashStyles: always_inspect, always_ignore, ignore_implicit, ignore_explicit +Layout/AlignHash: + Exclude: + - 'lib/split/helper.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Layout/CommentIndentation: + Exclude: + - 'lib/split/experiment.rb' + +# Offense count: 5 +# Cop supports --auto-correct. +Layout/ElseAlignment: + Exclude: + - 'lib/split.rb' + - 'lib/split/helper.rb' + - 'lib/split/trial.rb' + +# Offense count: 20 +# Cop supports --auto-correct. +Layout/EmptyLineAfterGuardClause: + Exclude: + - 'lib/split.rb' + - 'lib/split/algorithms/weighted_sample.rb' + - 'lib/split/alternative.rb' + - 'lib/split/combined_experiments_helper.rb' + - 'lib/split/configuration.rb' + - 'lib/split/experiment.rb' + - 'lib/split/experiment_catalog.rb' + - 'lib/split/goals_collection.rb' + - 'lib/split/helper.rb' + - 'lib/split/user.rb' + +# Offense count: 25 +# Cop supports --auto-correct. +Layout/EmptyLineAfterMagicComment: + Enabled: false + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: AllowAdjacentOneLineDefs, NumberOfEmptyLines. +Layout/EmptyLineBetweenDefs: + Exclude: + - 'lib/split/helper.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Layout/EmptyLines: + Exclude: + - 'lib/split/helper.rb' + +# Offense count: 8 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: empty_lines, empty_lines_except_namespace, empty_lines_special, no_empty_lines, beginning_only, ending_only +Layout/EmptyLinesAroundClassBody: + Exclude: + - 'lib/split/experiment_catalog.rb' + - 'lib/split/goals_collection.rb' + - 'lib/split/metric.rb' + - 'lib/split/persistence/cookie_adapter.rb' + - 'lib/split/persistence/redis_adapter.rb' + - 'lib/split/persistence/session_adapter.rb' + - 'lib/split/zscore.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +Layout/EmptyLinesAroundMethodBody: + Exclude: + - 'lib/split/dashboard/helpers.rb' + - 'lib/split/zscore.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: empty_lines, empty_lines_except_namespace, empty_lines_special, no_empty_lines +Layout/EmptyLinesAroundModuleBody: + Exclude: + - 'lib/split/encapsulated_helper.rb' + +# Offense count: 3 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyleAlignWith, AutoCorrect, Severity. +# SupportedStylesAlignWith: keyword, variable, start_of_line +Layout/EndAlignment: + Exclude: + - 'lib/split.rb' + - 'lib/split/helper.rb' + - 'lib/split/trial.rb' + +# Offense count: 4 +# Cop supports --auto-correct. +# Configuration parameters: AllowForAlignment, AllowBeforeTrailingComments, ForceEqualSignAlignment. +Layout/ExtraSpacing: + Exclude: + - 'lib/split/algorithms/whiplash.rb' + - 'lib/split/dashboard.rb' + - 'lib/split/metric.rb' + - 'lib/split/trial.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, IndentationWidth. +# SupportedStyles: special_inside_parentheses, consistent, align_braces +Layout/IndentFirstHashElement: + Exclude: + - 'split.gemspec' + +# Offense count: 3 +# Cop supports --auto-correct. +# Configuration parameters: Width, IgnoredPatterns. +Layout/IndentationWidth: + Exclude: + - 'lib/split.rb' + - 'lib/split/helper.rb' + - 'lib/split/trial.rb' + +# Offense count: 10 +# Cop supports --auto-correct. +Layout/SpaceAfterComma: + Exclude: + - 'lib/split/algorithms/weighted_sample.rb' + - 'lib/split/configuration.rb' + - 'lib/split/encapsulated_helper.rb' + - 'lib/split/experiment.rb' + - 'lib/split/metric.rb' + - 'lib/split/user.rb' + +# Offense count: 5 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: space, no_space +Layout/SpaceAroundEqualsInParameterDefault: + Exclude: + - 'lib/split/goals_collection.rb' + - 'lib/split/persistence/dual_adapter.rb' + - 'lib/split/persistence/redis_adapter.rb' + - 'lib/split/trial.rb' + - 'lib/split/user.rb' + +# Offense count: 24 +# Cop supports --auto-correct. +# Configuration parameters: AllowForAlignment. +Layout/SpaceAroundOperators: + Exclude: + - 'lib/split/algorithms/whiplash.rb' + - 'lib/split/alternative.rb' + - 'lib/split/metric.rb' + - 'lib/split/trial.rb' + - 'lib/split/zscore.rb' + +# Offense count: 14 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, EnforcedStyleForEmptyBraces. +# SupportedStyles: space, no_space +# SupportedStylesForEmptyBraces: space, no_space +Layout/SpaceBeforeBlockBraces: + Exclude: + - 'lib/split/configuration.rb' + - 'lib/split/experiment.rb' + - 'lib/split/experiment_catalog.rb' + - 'lib/split/helper.rb' + - 'lib/split/trial.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, EnforcedStyleForEmptyBrackets. +# SupportedStyles: space, no_space, compact +# SupportedStylesForEmptyBrackets: space, no_space +Layout/SpaceInsideArrayLiteralBrackets: + Exclude: + - 'lib/split/dashboard/helpers.rb' + +# Offense count: 31 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, EnforcedStyleForEmptyBraces, SpaceBeforeBlockParameters. +# SupportedStyles: space, no_space +# SupportedStylesForEmptyBraces: space, no_space +Layout/SpaceInsideBlockBraces: + Exclude: + - 'lib/split/configuration.rb' + - 'lib/split/experiment.rb' + - 'lib/split/experiment_catalog.rb' + - 'lib/split/helper.rb' + - 'lib/split/trial.rb' + - 'lib/split/user.rb' + +# Offense count: 10 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, EnforcedStyleForEmptyBraces. +# SupportedStyles: space, no_space, compact +# SupportedStylesForEmptyBraces: space, no_space +Layout/SpaceInsideHashLiteralBraces: + Exclude: + - 'lib/split/experiment.rb' + - 'lib/split/helper.rb' + - 'lib/split/persistence/redis_adapter.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: final_newline, final_blank_line +Layout/TrailingBlankLines: + Exclude: + - 'Rakefile' + +# Offense count: 8 +# Configuration parameters: AllowSafeAssignment. +Lint/AssignmentInCondition: + Exclude: + - 'lib/split/combined_experiments_helper.rb' + - 'lib/split/configuration.rb' + - 'lib/split/persistence/cookie_adapter.rb' + - 'lib/split/persistence/dual_adapter.rb' + - 'lib/split/persistence/redis_adapter.rb' + +# Offense count: 4 +# Cop supports --auto-correct. +# Configuration parameters: IgnoreEmptyBlocks, AllowUnusedKeywordArguments. +Lint/UnusedBlockArgument: + Exclude: + - 'lib/split/algorithms/block_randomization.rb' + - 'lib/split/configuration.rb' + - 'lib/split/engine.rb' + - 'lib/split/metric.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: AllowUnusedKeywordArguments, IgnoreEmptyMethods. +Lint/UnusedMethodArgument: + Exclude: + - 'lib/split/dashboard/helpers.rb' + - 'lib/split/persistence/cookie_adapter.rb' + +# Offense count: 1 +Lint/UselessAssignment: + Exclude: + - 'lib/split/goals_collection.rb' + +# Offense count: 19 +Metrics/AbcSize: + Max: 47 + +# Offense count: 3 +# Configuration parameters: CountComments. +Metrics/ClassLength: + Max: 377 + +# Offense count: 6 +Metrics/CyclomaticComplexity: + Max: 14 + +# Offense count: 23 +# Configuration parameters: CountComments, ExcludedMethods. +Metrics/MethodLength: + Max: 66 + +# Offense count: 1 +# Configuration parameters: CountComments. +Metrics/ModuleLength: + Max: 134 + +# Offense count: 8 +Metrics/PerceivedComplexity: + Max: 16 + +# Offense count: 4 +Naming/AccessorMethodName: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/experiment.rb' + - 'lib/split/persistence/cookie_adapter.rb' + +# Offense count: 1 +Naming/BinaryOperatorParameterName: + Exclude: + - 'lib/split/experiment.rb' + +# Offense count: 4 +# Configuration parameters: NamePrefix, NamePrefixBlacklist, NameWhitelist, MethodDefinitionMacros. +# NamePrefix: is_, has_, have_ +# NamePrefixBlacklist: is_, has_, have_ +# NameWhitelist: is_a? +# MethodDefinitionMacros: define_method, define_singleton_method +Naming/PredicateName: + Exclude: + - 'spec/**/*' + - 'lib/split/experiment.rb' + - 'lib/split/helper.rb' + +# Offense count: 5 +# Configuration parameters: MinNameLength, AllowNamesEndingInNumbers, AllowedNames, ForbiddenNames. +# AllowedNames: io, id, to, by, on, in, at, ip, db +Naming/UncommunicativeMethodParamName: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/zscore.rb' + +# Offense count: 6 +# Configuration parameters: EnforcedStyle. +# SupportedStyles: snake_case, normalcase, non_integer +Naming/VariableNumber: + Exclude: + - 'lib/split/zscore.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: always, conditionals +Style/AndOr: + Exclude: + - 'lib/split/experiment_catalog.rb' + +# Offense count: 2 +# Configuration parameters: AllowedChars. +Style/AsciiComments: + Exclude: + - 'lib/split/zscore.rb' + +# Offense count: 4 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: percent_q, bare_percent +Style/BarePercentLiterals: + Exclude: + - 'lib/split/dashboard/pagination_helpers.rb' + +# Offense count: 8 +Style/CaseEquality: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/experiment_catalog.rb' + - 'lib/split/helper.rb' + - 'lib/split/metric.rb' + +# Offense count: 5 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: is_a?, kind_of? +Style/ClassCheck: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/configuration.rb' + - 'lib/split/experiment.rb' + - 'lib/split/goals_collection.rb' + - 'lib/split/trial.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/ColonMethodCall: + Exclude: + - 'lib/split/combined_experiments_helper.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: Keywords. +# Keywords: TODO, FIXME, OPTIMIZE, HACK, REVIEW +Style/CommentAnnotation: + Exclude: + - 'lib/split/configuration.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SingleLineConditionsOnly, IncludeTernaryExpressions. +# SupportedStyles: assign_to_condition, assign_inside_condition +Style/ConditionalAssignment: + Exclude: + - 'lib/split/dashboard.rb' + - 'lib/split/persistence/redis_adapter.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/DefWithParentheses: + Exclude: + - 'lib/split/helper.rb' + +# Offense count: 29 +Style/Documentation: + Enabled: false + +# Offense count: 3 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: empty, nil, both +Style/EmptyElse: + Exclude: + - 'lib/split/experiment.rb' + - 'lib/split/metric.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/Encoding: + Exclude: + - 'split.gemspec' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/ExpandPathArguments: + Exclude: + - 'split.gemspec' + +# Offense count: 13 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: always, never +Style/FrozenStringLiteralComment: + Exclude: + - 'Appraisals' + - 'Gemfile' + - 'Rakefile' + - 'gemfiles/4.2.gemfile' + - 'gemfiles/5.0.gemfile' + - 'gemfiles/5.1.gemfile' + - 'gemfiles/5.2.gemfile' + - 'gemfiles/6.0.gemfile' + - 'lib/split/algorithms/block_randomization.rb' + - 'lib/split/goals_collection.rb' + - 'lib/split/redis_interface.rb' + - 'lib/split/user.rb' + - 'split.gemspec' + +# Offense count: 13 +# Configuration parameters: MinBodyLength. +Style/GuardClause: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/configuration.rb' + - 'lib/split/experiment.rb' + - 'lib/split/goals_collection.rb' + - 'lib/split/helper.rb' + - 'lib/split/persistence/dual_adapter.rb' + - 'lib/split/trial.rb' + +# Offense count: 25 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, UseHashRocketsWithSymbolValues, PreferHashRocketsForNonAlnumEndingSymbols. +# SupportedStyles: ruby19, hash_rockets, no_mixed_keys, ruby19_no_mixed_keys +Style/HashSyntax: + Exclude: + - 'Rakefile' + - 'lib/split.rb' + - 'lib/split/experiment.rb' + - 'lib/split/experiment_catalog.rb' + - 'lib/split/helper.rb' + - 'lib/split/metric.rb' + - 'lib/split/persistence.rb' + - 'lib/split/persistence/redis_adapter.rb' + +# Offense count: 4 +Style/IdenticalConditionalBranches: + Exclude: + - 'lib/split/configuration.rb' + - 'lib/split/experiment.rb' + +# Offense count: 14 +# Cop supports --auto-correct. +Style/IfUnlessModifier: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/experiment.rb' + - 'lib/split/experiment_catalog.rb' + - 'lib/split/goals_collection.rb' + - 'lib/split/metric.rb' + - 'lib/split/trial.rb' + - 'lib/split/user.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: require_parentheses, require_no_parentheses, require_no_parentheses_except_multiline +Style/MethodDefParentheses: + Exclude: + - 'lib/split/configuration.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, Autocorrect. +# SupportedStyles: module_function, extend_self +Style/ModuleFunction: + Exclude: + - 'lib/split.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: literals, strict +Style/MutableConstant: + Exclude: + - 'lib/split/experiment.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: both, prefix, postfix +Style/NegatedIf: + Exclude: + - 'lib/split/user.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: IncludeSemanticChanges. +Style/NonNilCheck: + Exclude: + - 'lib/split/configuration.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/Not: + Exclude: + - 'lib/split/experiment_catalog.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: Strict. +Style/NumericLiterals: + MinDigits: 9 + +# Offense count: 3 +# Cop supports --auto-correct. +# Configuration parameters: AutoCorrect, EnforcedStyle, IgnoredMethods. +# SupportedStyles: predicate, comparison +Style/NumericPredicate: + Exclude: + - 'spec/**/*' + - 'lib/split/experiment.rb' + - 'lib/split/trial.rb' + - 'lib/split/user.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +Style/ParallelAssignment: + Exclude: + - 'lib/split/experiment_catalog.rb' + - 'lib/split/persistence/cookie_adapter.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: short, verbose +Style/PreferredHashMethods: + Exclude: + - 'lib/split/configuration.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: compact, exploded +Style/RaiseArgs: + Exclude: + - 'lib/split/configuration.rb' + - 'lib/split/experiment.rb' + +# Offense count: 6 +# Cop supports --auto-correct. +Style/RedundantParentheses: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/zscore.rb' + +# Offense count: 11 +# Cop supports --auto-correct. +# Configuration parameters: AllowMultipleReturnValues. +Style/RedundantReturn: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/experiment.rb' + - 'lib/split/experiment_catalog.rb' + - 'lib/split/helper.rb' + - 'lib/split/metric.rb' + - 'lib/split/zscore.rb' + +# Offense count: 21 +# Cop supports --auto-correct. +Style/RedundantSelf: + Exclude: + - 'lib/split.rb' + - 'lib/split/alternative.rb' + - 'lib/split/configuration.rb' + - 'lib/split/experiment.rb' + - 'lib/split/extensions/string.rb' + - 'lib/split/metric.rb' + - 'lib/split/persistence/dual_adapter.rb' + - 'lib/split/persistence/redis_adapter.rb' + - 'lib/split/trial.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +Style/RescueModifier: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/configuration.rb' + +# Offense count: 5 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: implicit, explicit +Style/RescueStandardError: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/experiment.rb' + - 'lib/split/helper.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: ConvertCodeThatCanStartToReturnNil, Whitelist. +# Whitelist: present?, blank?, presence, try, try! +Style/SafeNavigation: + Exclude: + - 'lib/split/configuration.rb' + - 'lib/split/helper.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: AllowAsExpressionSeparator. +Style/Semicolon: + Exclude: + - 'lib/split/algorithms/whiplash.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: . +# SupportedStyles: use_perl_names, use_english_names +Style/SpecialGlobalVars: + EnforcedStyle: use_perl_names + +# Offense count: 86 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, ConsistentQuotesInMultiline. +# SupportedStyles: single_quotes, double_quotes +Style/StringLiterals: + Enabled: false + +# Offense count: 3 +# Cop supports --auto-correct. +# Configuration parameters: IgnoredMethods. +# IgnoredMethods: respond_to, define_method +Style/SymbolProc: + Exclude: + - 'lib/split/experiment.rb' + - 'lib/split/experiment_catalog.rb' + - 'lib/split/metric.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: AllowNamedUnderscoreVariables. +Style/TrailingUnderscoreVariable: + Exclude: + - 'lib/split/helper.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/ZeroLengthPredicate: + Exclude: + - 'lib/split/user.rb' + +# Offense count: 74 +# Cop supports --auto-correct. +# Configuration parameters: AutoCorrect, AllowHeredoc, AllowURI, URISchemes, IgnoreCopDirectives, IgnoredPatterns. +# URISchemes: http, https +Metrics/LineLength: + Max: 183
699
Update rubocop.yml config
0
.yml
rubocop_todo
mit
splitrb/split
10069662
<NME> .rubocop_todo.yml <BEF> ADDFILE <MSG> Update rubocop.yml config <DFF> @@ -0,0 +1,699 @@ +# This configuration was generated by +# `rubocop --auto-gen-config` +# on 2019-10-21 18:09:14 -0300 using RuboCop version 0.75.1. +# The point is for the user to remove these configuration records +# one by one as the offenses are removed from the code base. +# Note that changes in the inspected code, or installation of new +# versions of RuboCop, may require this file to be generated again. + +# Offense count: 4 +# Cop supports --auto-correct. +# Configuration parameters: TreatCommentsAsGroupSeparators, Include. +# Include: **/*.gemspec +Gemspec/OrderedDependencies: + Exclude: + - 'split.gemspec' + +# Offense count: 1 +# Configuration parameters: Include. +# Include: **/*.gemspec, +Gemspec/RequiredRubyVersion: + Exclude: + - 'split.gemspec' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, IndentationWidth. +# SupportedStyles: with_first_argument, with_fixed_indentation +Layout/AlignArguments: + Exclude: + - 'lib/split.rb' + - 'lib/split/experiment_catalog.rb' + +# Offense count: 3 +# Cop supports --auto-correct. +# Configuration parameters: AllowMultipleStyles, EnforcedHashRocketStyle, EnforcedColonStyle, EnforcedLastArgumentHashStyle. +# SupportedHashRocketStyles: key, separator, table +# SupportedColonStyles: key, separator, table +# SupportedLastArgumentHashStyles: always_inspect, always_ignore, ignore_implicit, ignore_explicit +Layout/AlignHash: + Exclude: + - 'lib/split/helper.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Layout/CommentIndentation: + Exclude: + - 'lib/split/experiment.rb' + +# Offense count: 5 +# Cop supports --auto-correct. +Layout/ElseAlignment: + Exclude: + - 'lib/split.rb' + - 'lib/split/helper.rb' + - 'lib/split/trial.rb' + +# Offense count: 20 +# Cop supports --auto-correct. +Layout/EmptyLineAfterGuardClause: + Exclude: + - 'lib/split.rb' + - 'lib/split/algorithms/weighted_sample.rb' + - 'lib/split/alternative.rb' + - 'lib/split/combined_experiments_helper.rb' + - 'lib/split/configuration.rb' + - 'lib/split/experiment.rb' + - 'lib/split/experiment_catalog.rb' + - 'lib/split/goals_collection.rb' + - 'lib/split/helper.rb' + - 'lib/split/user.rb' + +# Offense count: 25 +# Cop supports --auto-correct. +Layout/EmptyLineAfterMagicComment: + Enabled: false + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: AllowAdjacentOneLineDefs, NumberOfEmptyLines. +Layout/EmptyLineBetweenDefs: + Exclude: + - 'lib/split/helper.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Layout/EmptyLines: + Exclude: + - 'lib/split/helper.rb' + +# Offense count: 8 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: empty_lines, empty_lines_except_namespace, empty_lines_special, no_empty_lines, beginning_only, ending_only +Layout/EmptyLinesAroundClassBody: + Exclude: + - 'lib/split/experiment_catalog.rb' + - 'lib/split/goals_collection.rb' + - 'lib/split/metric.rb' + - 'lib/split/persistence/cookie_adapter.rb' + - 'lib/split/persistence/redis_adapter.rb' + - 'lib/split/persistence/session_adapter.rb' + - 'lib/split/zscore.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +Layout/EmptyLinesAroundMethodBody: + Exclude: + - 'lib/split/dashboard/helpers.rb' + - 'lib/split/zscore.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: empty_lines, empty_lines_except_namespace, empty_lines_special, no_empty_lines +Layout/EmptyLinesAroundModuleBody: + Exclude: + - 'lib/split/encapsulated_helper.rb' + +# Offense count: 3 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyleAlignWith, AutoCorrect, Severity. +# SupportedStylesAlignWith: keyword, variable, start_of_line +Layout/EndAlignment: + Exclude: + - 'lib/split.rb' + - 'lib/split/helper.rb' + - 'lib/split/trial.rb' + +# Offense count: 4 +# Cop supports --auto-correct. +# Configuration parameters: AllowForAlignment, AllowBeforeTrailingComments, ForceEqualSignAlignment. +Layout/ExtraSpacing: + Exclude: + - 'lib/split/algorithms/whiplash.rb' + - 'lib/split/dashboard.rb' + - 'lib/split/metric.rb' + - 'lib/split/trial.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, IndentationWidth. +# SupportedStyles: special_inside_parentheses, consistent, align_braces +Layout/IndentFirstHashElement: + Exclude: + - 'split.gemspec' + +# Offense count: 3 +# Cop supports --auto-correct. +# Configuration parameters: Width, IgnoredPatterns. +Layout/IndentationWidth: + Exclude: + - 'lib/split.rb' + - 'lib/split/helper.rb' + - 'lib/split/trial.rb' + +# Offense count: 10 +# Cop supports --auto-correct. +Layout/SpaceAfterComma: + Exclude: + - 'lib/split/algorithms/weighted_sample.rb' + - 'lib/split/configuration.rb' + - 'lib/split/encapsulated_helper.rb' + - 'lib/split/experiment.rb' + - 'lib/split/metric.rb' + - 'lib/split/user.rb' + +# Offense count: 5 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: space, no_space +Layout/SpaceAroundEqualsInParameterDefault: + Exclude: + - 'lib/split/goals_collection.rb' + - 'lib/split/persistence/dual_adapter.rb' + - 'lib/split/persistence/redis_adapter.rb' + - 'lib/split/trial.rb' + - 'lib/split/user.rb' + +# Offense count: 24 +# Cop supports --auto-correct. +# Configuration parameters: AllowForAlignment. +Layout/SpaceAroundOperators: + Exclude: + - 'lib/split/algorithms/whiplash.rb' + - 'lib/split/alternative.rb' + - 'lib/split/metric.rb' + - 'lib/split/trial.rb' + - 'lib/split/zscore.rb' + +# Offense count: 14 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, EnforcedStyleForEmptyBraces. +# SupportedStyles: space, no_space +# SupportedStylesForEmptyBraces: space, no_space +Layout/SpaceBeforeBlockBraces: + Exclude: + - 'lib/split/configuration.rb' + - 'lib/split/experiment.rb' + - 'lib/split/experiment_catalog.rb' + - 'lib/split/helper.rb' + - 'lib/split/trial.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, EnforcedStyleForEmptyBrackets. +# SupportedStyles: space, no_space, compact +# SupportedStylesForEmptyBrackets: space, no_space +Layout/SpaceInsideArrayLiteralBrackets: + Exclude: + - 'lib/split/dashboard/helpers.rb' + +# Offense count: 31 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, EnforcedStyleForEmptyBraces, SpaceBeforeBlockParameters. +# SupportedStyles: space, no_space +# SupportedStylesForEmptyBraces: space, no_space +Layout/SpaceInsideBlockBraces: + Exclude: + - 'lib/split/configuration.rb' + - 'lib/split/experiment.rb' + - 'lib/split/experiment_catalog.rb' + - 'lib/split/helper.rb' + - 'lib/split/trial.rb' + - 'lib/split/user.rb' + +# Offense count: 10 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, EnforcedStyleForEmptyBraces. +# SupportedStyles: space, no_space, compact +# SupportedStylesForEmptyBraces: space, no_space +Layout/SpaceInsideHashLiteralBraces: + Exclude: + - 'lib/split/experiment.rb' + - 'lib/split/helper.rb' + - 'lib/split/persistence/redis_adapter.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: final_newline, final_blank_line +Layout/TrailingBlankLines: + Exclude: + - 'Rakefile' + +# Offense count: 8 +# Configuration parameters: AllowSafeAssignment. +Lint/AssignmentInCondition: + Exclude: + - 'lib/split/combined_experiments_helper.rb' + - 'lib/split/configuration.rb' + - 'lib/split/persistence/cookie_adapter.rb' + - 'lib/split/persistence/dual_adapter.rb' + - 'lib/split/persistence/redis_adapter.rb' + +# Offense count: 4 +# Cop supports --auto-correct. +# Configuration parameters: IgnoreEmptyBlocks, AllowUnusedKeywordArguments. +Lint/UnusedBlockArgument: + Exclude: + - 'lib/split/algorithms/block_randomization.rb' + - 'lib/split/configuration.rb' + - 'lib/split/engine.rb' + - 'lib/split/metric.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: AllowUnusedKeywordArguments, IgnoreEmptyMethods. +Lint/UnusedMethodArgument: + Exclude: + - 'lib/split/dashboard/helpers.rb' + - 'lib/split/persistence/cookie_adapter.rb' + +# Offense count: 1 +Lint/UselessAssignment: + Exclude: + - 'lib/split/goals_collection.rb' + +# Offense count: 19 +Metrics/AbcSize: + Max: 47 + +# Offense count: 3 +# Configuration parameters: CountComments. +Metrics/ClassLength: + Max: 377 + +# Offense count: 6 +Metrics/CyclomaticComplexity: + Max: 14 + +# Offense count: 23 +# Configuration parameters: CountComments, ExcludedMethods. +Metrics/MethodLength: + Max: 66 + +# Offense count: 1 +# Configuration parameters: CountComments. +Metrics/ModuleLength: + Max: 134 + +# Offense count: 8 +Metrics/PerceivedComplexity: + Max: 16 + +# Offense count: 4 +Naming/AccessorMethodName: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/experiment.rb' + - 'lib/split/persistence/cookie_adapter.rb' + +# Offense count: 1 +Naming/BinaryOperatorParameterName: + Exclude: + - 'lib/split/experiment.rb' + +# Offense count: 4 +# Configuration parameters: NamePrefix, NamePrefixBlacklist, NameWhitelist, MethodDefinitionMacros. +# NamePrefix: is_, has_, have_ +# NamePrefixBlacklist: is_, has_, have_ +# NameWhitelist: is_a? +# MethodDefinitionMacros: define_method, define_singleton_method +Naming/PredicateName: + Exclude: + - 'spec/**/*' + - 'lib/split/experiment.rb' + - 'lib/split/helper.rb' + +# Offense count: 5 +# Configuration parameters: MinNameLength, AllowNamesEndingInNumbers, AllowedNames, ForbiddenNames. +# AllowedNames: io, id, to, by, on, in, at, ip, db +Naming/UncommunicativeMethodParamName: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/zscore.rb' + +# Offense count: 6 +# Configuration parameters: EnforcedStyle. +# SupportedStyles: snake_case, normalcase, non_integer +Naming/VariableNumber: + Exclude: + - 'lib/split/zscore.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: always, conditionals +Style/AndOr: + Exclude: + - 'lib/split/experiment_catalog.rb' + +# Offense count: 2 +# Configuration parameters: AllowedChars. +Style/AsciiComments: + Exclude: + - 'lib/split/zscore.rb' + +# Offense count: 4 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: percent_q, bare_percent +Style/BarePercentLiterals: + Exclude: + - 'lib/split/dashboard/pagination_helpers.rb' + +# Offense count: 8 +Style/CaseEquality: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/experiment_catalog.rb' + - 'lib/split/helper.rb' + - 'lib/split/metric.rb' + +# Offense count: 5 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: is_a?, kind_of? +Style/ClassCheck: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/configuration.rb' + - 'lib/split/experiment.rb' + - 'lib/split/goals_collection.rb' + - 'lib/split/trial.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/ColonMethodCall: + Exclude: + - 'lib/split/combined_experiments_helper.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: Keywords. +# Keywords: TODO, FIXME, OPTIMIZE, HACK, REVIEW +Style/CommentAnnotation: + Exclude: + - 'lib/split/configuration.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SingleLineConditionsOnly, IncludeTernaryExpressions. +# SupportedStyles: assign_to_condition, assign_inside_condition +Style/ConditionalAssignment: + Exclude: + - 'lib/split/dashboard.rb' + - 'lib/split/persistence/redis_adapter.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/DefWithParentheses: + Exclude: + - 'lib/split/helper.rb' + +# Offense count: 29 +Style/Documentation: + Enabled: false + +# Offense count: 3 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: empty, nil, both +Style/EmptyElse: + Exclude: + - 'lib/split/experiment.rb' + - 'lib/split/metric.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/Encoding: + Exclude: + - 'split.gemspec' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/ExpandPathArguments: + Exclude: + - 'split.gemspec' + +# Offense count: 13 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: always, never +Style/FrozenStringLiteralComment: + Exclude: + - 'Appraisals' + - 'Gemfile' + - 'Rakefile' + - 'gemfiles/4.2.gemfile' + - 'gemfiles/5.0.gemfile' + - 'gemfiles/5.1.gemfile' + - 'gemfiles/5.2.gemfile' + - 'gemfiles/6.0.gemfile' + - 'lib/split/algorithms/block_randomization.rb' + - 'lib/split/goals_collection.rb' + - 'lib/split/redis_interface.rb' + - 'lib/split/user.rb' + - 'split.gemspec' + +# Offense count: 13 +# Configuration parameters: MinBodyLength. +Style/GuardClause: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/configuration.rb' + - 'lib/split/experiment.rb' + - 'lib/split/goals_collection.rb' + - 'lib/split/helper.rb' + - 'lib/split/persistence/dual_adapter.rb' + - 'lib/split/trial.rb' + +# Offense count: 25 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, UseHashRocketsWithSymbolValues, PreferHashRocketsForNonAlnumEndingSymbols. +# SupportedStyles: ruby19, hash_rockets, no_mixed_keys, ruby19_no_mixed_keys +Style/HashSyntax: + Exclude: + - 'Rakefile' + - 'lib/split.rb' + - 'lib/split/experiment.rb' + - 'lib/split/experiment_catalog.rb' + - 'lib/split/helper.rb' + - 'lib/split/metric.rb' + - 'lib/split/persistence.rb' + - 'lib/split/persistence/redis_adapter.rb' + +# Offense count: 4 +Style/IdenticalConditionalBranches: + Exclude: + - 'lib/split/configuration.rb' + - 'lib/split/experiment.rb' + +# Offense count: 14 +# Cop supports --auto-correct. +Style/IfUnlessModifier: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/experiment.rb' + - 'lib/split/experiment_catalog.rb' + - 'lib/split/goals_collection.rb' + - 'lib/split/metric.rb' + - 'lib/split/trial.rb' + - 'lib/split/user.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: require_parentheses, require_no_parentheses, require_no_parentheses_except_multiline +Style/MethodDefParentheses: + Exclude: + - 'lib/split/configuration.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, Autocorrect. +# SupportedStyles: module_function, extend_self +Style/ModuleFunction: + Exclude: + - 'lib/split.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: literals, strict +Style/MutableConstant: + Exclude: + - 'lib/split/experiment.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: both, prefix, postfix +Style/NegatedIf: + Exclude: + - 'lib/split/user.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: IncludeSemanticChanges. +Style/NonNilCheck: + Exclude: + - 'lib/split/configuration.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/Not: + Exclude: + - 'lib/split/experiment_catalog.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: Strict. +Style/NumericLiterals: + MinDigits: 9 + +# Offense count: 3 +# Cop supports --auto-correct. +# Configuration parameters: AutoCorrect, EnforcedStyle, IgnoredMethods. +# SupportedStyles: predicate, comparison +Style/NumericPredicate: + Exclude: + - 'spec/**/*' + - 'lib/split/experiment.rb' + - 'lib/split/trial.rb' + - 'lib/split/user.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +Style/ParallelAssignment: + Exclude: + - 'lib/split/experiment_catalog.rb' + - 'lib/split/persistence/cookie_adapter.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: short, verbose +Style/PreferredHashMethods: + Exclude: + - 'lib/split/configuration.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: compact, exploded +Style/RaiseArgs: + Exclude: + - 'lib/split/configuration.rb' + - 'lib/split/experiment.rb' + +# Offense count: 6 +# Cop supports --auto-correct. +Style/RedundantParentheses: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/zscore.rb' + +# Offense count: 11 +# Cop supports --auto-correct. +# Configuration parameters: AllowMultipleReturnValues. +Style/RedundantReturn: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/experiment.rb' + - 'lib/split/experiment_catalog.rb' + - 'lib/split/helper.rb' + - 'lib/split/metric.rb' + - 'lib/split/zscore.rb' + +# Offense count: 21 +# Cop supports --auto-correct. +Style/RedundantSelf: + Exclude: + - 'lib/split.rb' + - 'lib/split/alternative.rb' + - 'lib/split/configuration.rb' + - 'lib/split/experiment.rb' + - 'lib/split/extensions/string.rb' + - 'lib/split/metric.rb' + - 'lib/split/persistence/dual_adapter.rb' + - 'lib/split/persistence/redis_adapter.rb' + - 'lib/split/trial.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +Style/RescueModifier: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/configuration.rb' + +# Offense count: 5 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: implicit, explicit +Style/RescueStandardError: + Exclude: + - 'lib/split/alternative.rb' + - 'lib/split/experiment.rb' + - 'lib/split/helper.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: ConvertCodeThatCanStartToReturnNil, Whitelist. +# Whitelist: present?, blank?, presence, try, try! +Style/SafeNavigation: + Exclude: + - 'lib/split/configuration.rb' + - 'lib/split/helper.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: AllowAsExpressionSeparator. +Style/Semicolon: + Exclude: + - 'lib/split/algorithms/whiplash.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: . +# SupportedStyles: use_perl_names, use_english_names +Style/SpecialGlobalVars: + EnforcedStyle: use_perl_names + +# Offense count: 86 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, ConsistentQuotesInMultiline. +# SupportedStyles: single_quotes, double_quotes +Style/StringLiterals: + Enabled: false + +# Offense count: 3 +# Cop supports --auto-correct. +# Configuration parameters: IgnoredMethods. +# IgnoredMethods: respond_to, define_method +Style/SymbolProc: + Exclude: + - 'lib/split/experiment.rb' + - 'lib/split/experiment_catalog.rb' + - 'lib/split/metric.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: AllowNamedUnderscoreVariables. +Style/TrailingUnderscoreVariable: + Exclude: + - 'lib/split/helper.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/ZeroLengthPredicate: + Exclude: + - 'lib/split/user.rb' + +# Offense count: 74 +# Cop supports --auto-correct. +# Configuration parameters: AutoCorrect, AllowHeredoc, AllowURI, URISchemes, IgnoreCopDirectives, IgnoredPatterns. +# URISchemes: http, https +Metrics/LineLength: + Max: 183
699
Update rubocop.yml config
0
.yml
rubocop_todo
mit
splitrb/split
10069663
<NME> redis_interface_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" describe Split::RedisInterface do let(:list_name) { "list_name" } let(:set_name) { "set_name" } let(:interface) { described_class.new } describe "#persist_list" do subject(:persist_list) do interface.persist_list(list_name, %w(a b c d)) end specify do expect(persist_list).to eq %w(a b c d) expect(Split.redis.lindex(list_name, 0)).to eq "a" expect(Split.redis.lindex(list_name, 1)).to eq "b" expect(Split.redis.lindex(list_name, 2)).to eq "c" expect(Split.redis.lindex(list_name, 3)).to eq "d" expect(Split.redis.llen(list_name)).to eq 4 end context "list is overwritten but not deleted" do specify do expect(persist_list).to eq %w(a b c d) interface.persist_list(list_name, ["z"]) expect(Split.redis.lindex(list_name, 0)).to eq "z" end end describe '#add_to_list' do subject(:add_to_list) do interface.add_to_list(list_name, 'y') interface.add_to_list(list_name, 'z') end specify do add_to_list expect(Split.redis.lindex(list_name, 0)).to eq 'y' expect(Split.redis.lindex(list_name, 1)).to eq 'z' expect(Split.redis.llen(list_name)).to eq 2 end end describe '#set_list_index' do subject(:set_list_index) do interface.add_to_list(list_name, 'y') interface.add_to_list(list_name, 'z') interface.set_list_index(list_name, 0, 'a') end specify do set_list_index expect(Split.redis.lindex(list_name, 0)).to eq 'a' expect(Split.redis.lindex(list_name, 1)).to eq 'z' expect(Split.redis.llen(list_name)).to eq 2 end end describe '#list_length' do subject(:list_length) do interface.add_to_list(list_name, 'y') interface.add_to_list(list_name, 'z') interface.list_length(list_name) end specify do expect(list_length).to eq 2 end end describe '#remove_last_item_from_list' do subject(:remove_last_item_from_list) do interface.add_to_list(list_name, 'y') interface.add_to_list(list_name, 'z') interface.remove_last_item_from_list(list_name) end specify do remove_last_item_from_list expect(Split.redis.lindex(list_name, 0)).to eq 'y' expect(Split.redis.llen(list_name)).to eq 1 end end describe '#make_list_length' do subject(:make_list_length) do interface.add_to_list(list_name, 'y') interface.add_to_list(list_name, 'z') interface.make_list_length(list_name, 1) end specify do make_list_length expect(Split.redis.lindex(list_name, 0)).to eq 'y' expect(Split.redis.llen(list_name)).to eq 1 end end describe '#add_to_set' do subject(:add_to_set) do interface.add_to_set(set_name, 'something') subject(:add_to_set) do interface.add_to_set(set_name, "something") end specify do add_to_set expect(Split.redis.sismember(set_name, "something")).to be true end end end <MSG> Merge pull request #632 from splitrb/simplify-redis-interface Simplify RedisInterface usage when persisting Experiment alternatives <DFF> @@ -29,75 +29,6 @@ describe Split::RedisInterface do end end - describe '#add_to_list' do - subject(:add_to_list) do - interface.add_to_list(list_name, 'y') - interface.add_to_list(list_name, 'z') - end - - specify do - add_to_list - expect(Split.redis.lindex(list_name, 0)).to eq 'y' - expect(Split.redis.lindex(list_name, 1)).to eq 'z' - expect(Split.redis.llen(list_name)).to eq 2 - end - end - - describe '#set_list_index' do - subject(:set_list_index) do - interface.add_to_list(list_name, 'y') - interface.add_to_list(list_name, 'z') - interface.set_list_index(list_name, 0, 'a') - end - - specify do - set_list_index - expect(Split.redis.lindex(list_name, 0)).to eq 'a' - expect(Split.redis.lindex(list_name, 1)).to eq 'z' - expect(Split.redis.llen(list_name)).to eq 2 - end - end - - describe '#list_length' do - subject(:list_length) do - interface.add_to_list(list_name, 'y') - interface.add_to_list(list_name, 'z') - interface.list_length(list_name) - end - - specify do - expect(list_length).to eq 2 - end - end - - describe '#remove_last_item_from_list' do - subject(:remove_last_item_from_list) do - interface.add_to_list(list_name, 'y') - interface.add_to_list(list_name, 'z') - interface.remove_last_item_from_list(list_name) - end - - specify do - remove_last_item_from_list - expect(Split.redis.lindex(list_name, 0)).to eq 'y' - expect(Split.redis.llen(list_name)).to eq 1 - end - end - - describe '#make_list_length' do - subject(:make_list_length) do - interface.add_to_list(list_name, 'y') - interface.add_to_list(list_name, 'z') - interface.make_list_length(list_name, 1) - end - - specify do - make_list_length - expect(Split.redis.lindex(list_name, 0)).to eq 'y' - expect(Split.redis.llen(list_name)).to eq 1 - end - end - describe '#add_to_set' do subject(:add_to_set) do interface.add_to_set(set_name, 'something')
0
Merge pull request #632 from splitrb/simplify-redis-interface
69
.rb
rb
mit
splitrb/split
10069664
<NME> redis_interface_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" describe Split::RedisInterface do let(:list_name) { "list_name" } let(:set_name) { "set_name" } let(:interface) { described_class.new } describe "#persist_list" do subject(:persist_list) do interface.persist_list(list_name, %w(a b c d)) end specify do expect(persist_list).to eq %w(a b c d) expect(Split.redis.lindex(list_name, 0)).to eq "a" expect(Split.redis.lindex(list_name, 1)).to eq "b" expect(Split.redis.lindex(list_name, 2)).to eq "c" expect(Split.redis.lindex(list_name, 3)).to eq "d" expect(Split.redis.llen(list_name)).to eq 4 end context "list is overwritten but not deleted" do specify do expect(persist_list).to eq %w(a b c d) interface.persist_list(list_name, ["z"]) expect(Split.redis.lindex(list_name, 0)).to eq "z" end end describe '#add_to_list' do subject(:add_to_list) do interface.add_to_list(list_name, 'y') interface.add_to_list(list_name, 'z') end specify do add_to_list expect(Split.redis.lindex(list_name, 0)).to eq 'y' expect(Split.redis.lindex(list_name, 1)).to eq 'z' expect(Split.redis.llen(list_name)).to eq 2 end end describe '#set_list_index' do subject(:set_list_index) do interface.add_to_list(list_name, 'y') interface.add_to_list(list_name, 'z') interface.set_list_index(list_name, 0, 'a') end specify do set_list_index expect(Split.redis.lindex(list_name, 0)).to eq 'a' expect(Split.redis.lindex(list_name, 1)).to eq 'z' expect(Split.redis.llen(list_name)).to eq 2 end end describe '#list_length' do subject(:list_length) do interface.add_to_list(list_name, 'y') interface.add_to_list(list_name, 'z') interface.list_length(list_name) end specify do expect(list_length).to eq 2 end end describe '#remove_last_item_from_list' do subject(:remove_last_item_from_list) do interface.add_to_list(list_name, 'y') interface.add_to_list(list_name, 'z') interface.remove_last_item_from_list(list_name) end specify do remove_last_item_from_list expect(Split.redis.lindex(list_name, 0)).to eq 'y' expect(Split.redis.llen(list_name)).to eq 1 end end describe '#make_list_length' do subject(:make_list_length) do interface.add_to_list(list_name, 'y') interface.add_to_list(list_name, 'z') interface.make_list_length(list_name, 1) end specify do make_list_length expect(Split.redis.lindex(list_name, 0)).to eq 'y' expect(Split.redis.llen(list_name)).to eq 1 end end describe '#add_to_set' do subject(:add_to_set) do interface.add_to_set(set_name, 'something') subject(:add_to_set) do interface.add_to_set(set_name, "something") end specify do add_to_set expect(Split.redis.sismember(set_name, "something")).to be true end end end <MSG> Merge pull request #632 from splitrb/simplify-redis-interface Simplify RedisInterface usage when persisting Experiment alternatives <DFF> @@ -29,75 +29,6 @@ describe Split::RedisInterface do end end - describe '#add_to_list' do - subject(:add_to_list) do - interface.add_to_list(list_name, 'y') - interface.add_to_list(list_name, 'z') - end - - specify do - add_to_list - expect(Split.redis.lindex(list_name, 0)).to eq 'y' - expect(Split.redis.lindex(list_name, 1)).to eq 'z' - expect(Split.redis.llen(list_name)).to eq 2 - end - end - - describe '#set_list_index' do - subject(:set_list_index) do - interface.add_to_list(list_name, 'y') - interface.add_to_list(list_name, 'z') - interface.set_list_index(list_name, 0, 'a') - end - - specify do - set_list_index - expect(Split.redis.lindex(list_name, 0)).to eq 'a' - expect(Split.redis.lindex(list_name, 1)).to eq 'z' - expect(Split.redis.llen(list_name)).to eq 2 - end - end - - describe '#list_length' do - subject(:list_length) do - interface.add_to_list(list_name, 'y') - interface.add_to_list(list_name, 'z') - interface.list_length(list_name) - end - - specify do - expect(list_length).to eq 2 - end - end - - describe '#remove_last_item_from_list' do - subject(:remove_last_item_from_list) do - interface.add_to_list(list_name, 'y') - interface.add_to_list(list_name, 'z') - interface.remove_last_item_from_list(list_name) - end - - specify do - remove_last_item_from_list - expect(Split.redis.lindex(list_name, 0)).to eq 'y' - expect(Split.redis.llen(list_name)).to eq 1 - end - end - - describe '#make_list_length' do - subject(:make_list_length) do - interface.add_to_list(list_name, 'y') - interface.add_to_list(list_name, 'z') - interface.make_list_length(list_name, 1) - end - - specify do - make_list_length - expect(Split.redis.lindex(list_name, 0)).to eq 'y' - expect(Split.redis.llen(list_name)).to eq 1 - end - end - describe '#add_to_set' do subject(:add_to_set) do interface.add_to_set(set_name, 'something')
0
Merge pull request #632 from splitrb/simplify-redis-interface
69
.rb
rb
mit
splitrb/split
10069665
<NME> redis_interface_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" describe Split::RedisInterface do let(:list_name) { "list_name" } let(:set_name) { "set_name" } let(:interface) { described_class.new } describe "#persist_list" do subject(:persist_list) do interface.persist_list(list_name, %w(a b c d)) end specify do expect(persist_list).to eq %w(a b c d) expect(Split.redis.lindex(list_name, 0)).to eq "a" expect(Split.redis.lindex(list_name, 1)).to eq "b" expect(Split.redis.lindex(list_name, 2)).to eq "c" expect(Split.redis.lindex(list_name, 3)).to eq "d" expect(Split.redis.llen(list_name)).to eq 4 end context "list is overwritten but not deleted" do specify do expect(persist_list).to eq %w(a b c d) interface.persist_list(list_name, ["z"]) expect(Split.redis.lindex(list_name, 0)).to eq "z" end end describe '#add_to_list' do subject(:add_to_list) do interface.add_to_list(list_name, 'y') interface.add_to_list(list_name, 'z') end specify do add_to_list expect(Split.redis.lindex(list_name, 0)).to eq 'y' expect(Split.redis.lindex(list_name, 1)).to eq 'z' expect(Split.redis.llen(list_name)).to eq 2 end end describe '#set_list_index' do subject(:set_list_index) do interface.add_to_list(list_name, 'y') interface.add_to_list(list_name, 'z') interface.set_list_index(list_name, 0, 'a') end specify do set_list_index expect(Split.redis.lindex(list_name, 0)).to eq 'a' expect(Split.redis.lindex(list_name, 1)).to eq 'z' expect(Split.redis.llen(list_name)).to eq 2 end end describe '#list_length' do subject(:list_length) do interface.add_to_list(list_name, 'y') interface.add_to_list(list_name, 'z') interface.list_length(list_name) end specify do expect(list_length).to eq 2 end end describe '#remove_last_item_from_list' do subject(:remove_last_item_from_list) do interface.add_to_list(list_name, 'y') interface.add_to_list(list_name, 'z') interface.remove_last_item_from_list(list_name) end specify do remove_last_item_from_list expect(Split.redis.lindex(list_name, 0)).to eq 'y' expect(Split.redis.llen(list_name)).to eq 1 end end describe '#make_list_length' do subject(:make_list_length) do interface.add_to_list(list_name, 'y') interface.add_to_list(list_name, 'z') interface.make_list_length(list_name, 1) end specify do make_list_length expect(Split.redis.lindex(list_name, 0)).to eq 'y' expect(Split.redis.llen(list_name)).to eq 1 end end describe '#add_to_set' do subject(:add_to_set) do interface.add_to_set(set_name, 'something') subject(:add_to_set) do interface.add_to_set(set_name, "something") end specify do add_to_set expect(Split.redis.sismember(set_name, "something")).to be true end end end <MSG> Merge pull request #632 from splitrb/simplify-redis-interface Simplify RedisInterface usage when persisting Experiment alternatives <DFF> @@ -29,75 +29,6 @@ describe Split::RedisInterface do end end - describe '#add_to_list' do - subject(:add_to_list) do - interface.add_to_list(list_name, 'y') - interface.add_to_list(list_name, 'z') - end - - specify do - add_to_list - expect(Split.redis.lindex(list_name, 0)).to eq 'y' - expect(Split.redis.lindex(list_name, 1)).to eq 'z' - expect(Split.redis.llen(list_name)).to eq 2 - end - end - - describe '#set_list_index' do - subject(:set_list_index) do - interface.add_to_list(list_name, 'y') - interface.add_to_list(list_name, 'z') - interface.set_list_index(list_name, 0, 'a') - end - - specify do - set_list_index - expect(Split.redis.lindex(list_name, 0)).to eq 'a' - expect(Split.redis.lindex(list_name, 1)).to eq 'z' - expect(Split.redis.llen(list_name)).to eq 2 - end - end - - describe '#list_length' do - subject(:list_length) do - interface.add_to_list(list_name, 'y') - interface.add_to_list(list_name, 'z') - interface.list_length(list_name) - end - - specify do - expect(list_length).to eq 2 - end - end - - describe '#remove_last_item_from_list' do - subject(:remove_last_item_from_list) do - interface.add_to_list(list_name, 'y') - interface.add_to_list(list_name, 'z') - interface.remove_last_item_from_list(list_name) - end - - specify do - remove_last_item_from_list - expect(Split.redis.lindex(list_name, 0)).to eq 'y' - expect(Split.redis.llen(list_name)).to eq 1 - end - end - - describe '#make_list_length' do - subject(:make_list_length) do - interface.add_to_list(list_name, 'y') - interface.add_to_list(list_name, 'z') - interface.make_list_length(list_name, 1) - end - - specify do - make_list_length - expect(Split.redis.lindex(list_name, 0)).to eq 'y' - expect(Split.redis.llen(list_name)).to eq 1 - end - end - describe '#add_to_set' do subject(:add_to_set) do interface.add_to_set(set_name, 'something')
0
Merge pull request #632 from splitrb/simplify-redis-interface
69
.rb
rb
mit
splitrb/split
10069666
<NME> split.gemspec <BEF> # -*- encoding: utf-8 -*- # frozen_string_literal: true $:.push File.expand_path("../lib", __FILE__) require "split/version" Gem::Specification.new do |s| s.name = "split" s.version = Split::VERSION s.platform = Gem::Platform::RUBY s.authors = ["Andrew Nesbitt"] s.licenses = ["MIT"] s.email = ["[email protected]"] s.homepage = "https://github.com/splitrb/split" s.summary = "Rack based split testing framework" s.metadata = { "homepage_uri" => "https://github.com/splitrb/split", "changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md", "source_code_uri" => "https://github.com/splitrb/split", "bug_tracker_uri" => "https://github.com/splitrb/split/issues", "wiki_uri" => "https://github.com/splitrb/split/wiki", "mailing_list_uri" => "https://groups.google.com/d/forum/split-ruby" } s.required_ruby_version = ">= 2.5.0" s.required_rubygems_version = ">= 2.0.0" s.files = `git ls-files`.split("\n") s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") s.require_paths = ["lib"] s.add_dependency "redis", ">= 4.2" s.add_dependency "sinatra", ">= 1.2.6" s.add_dependency 'simple-random', '>= 0.9.3' s.add_development_dependency 'bundler', '~> 1.14' s.add_development_dependency 'simplecov', '~> 0.12' s.add_development_dependency 'rack-test', '~> 0.6' s.add_development_dependency 'rake', '~> 12' s.add_development_dependency 'rspec', '~> 3.4' s.add_development_dependency 'pry', '~> 0.10' s.add_development_dependency 'fakeredis', '~> 0.6.0' end <MSG> Update dependencies <DFF> @@ -35,10 +35,10 @@ Gem::Specification.new do |s| s.add_dependency 'simple-random', '>= 0.9.3' s.add_development_dependency 'bundler', '~> 1.14' - s.add_development_dependency 'simplecov', '~> 0.12' + s.add_development_dependency 'simplecov', '~> 0.15' s.add_development_dependency 'rack-test', '~> 0.6' s.add_development_dependency 'rake', '~> 12' - s.add_development_dependency 'rspec', '~> 3.4' + s.add_development_dependency 'rspec', '~> 3.7' s.add_development_dependency 'pry', '~> 0.10' - s.add_development_dependency 'fakeredis', '~> 0.6.0' + s.add_development_dependency 'fakeredis', '~> 0.6' end
3
Update dependencies
3
.gemspec
gemspec
mit
splitrb/split
10069667
<NME> split.gemspec <BEF> # -*- encoding: utf-8 -*- # frozen_string_literal: true $:.push File.expand_path("../lib", __FILE__) require "split/version" Gem::Specification.new do |s| s.name = "split" s.version = Split::VERSION s.platform = Gem::Platform::RUBY s.authors = ["Andrew Nesbitt"] s.licenses = ["MIT"] s.email = ["[email protected]"] s.homepage = "https://github.com/splitrb/split" s.summary = "Rack based split testing framework" s.metadata = { "homepage_uri" => "https://github.com/splitrb/split", "changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md", "source_code_uri" => "https://github.com/splitrb/split", "bug_tracker_uri" => "https://github.com/splitrb/split/issues", "wiki_uri" => "https://github.com/splitrb/split/wiki", "mailing_list_uri" => "https://groups.google.com/d/forum/split-ruby" } s.required_ruby_version = ">= 2.5.0" s.required_rubygems_version = ">= 2.0.0" s.files = `git ls-files`.split("\n") s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") s.require_paths = ["lib"] s.add_dependency "redis", ">= 4.2" s.add_dependency "sinatra", ">= 1.2.6" s.add_dependency 'simple-random', '>= 0.9.3' s.add_development_dependency 'bundler', '~> 1.14' s.add_development_dependency 'simplecov', '~> 0.12' s.add_development_dependency 'rack-test', '~> 0.6' s.add_development_dependency 'rake', '~> 12' s.add_development_dependency 'rspec', '~> 3.4' s.add_development_dependency 'pry', '~> 0.10' s.add_development_dependency 'fakeredis', '~> 0.6.0' end <MSG> Update dependencies <DFF> @@ -35,10 +35,10 @@ Gem::Specification.new do |s| s.add_dependency 'simple-random', '>= 0.9.3' s.add_development_dependency 'bundler', '~> 1.14' - s.add_development_dependency 'simplecov', '~> 0.12' + s.add_development_dependency 'simplecov', '~> 0.15' s.add_development_dependency 'rack-test', '~> 0.6' s.add_development_dependency 'rake', '~> 12' - s.add_development_dependency 'rspec', '~> 3.4' + s.add_development_dependency 'rspec', '~> 3.7' s.add_development_dependency 'pry', '~> 0.10' - s.add_development_dependency 'fakeredis', '~> 0.6.0' + s.add_development_dependency 'fakeredis', '~> 0.6' end
3
Update dependencies
3
.gemspec
gemspec
mit
splitrb/split
10069668
<NME> split.gemspec <BEF> # -*- encoding: utf-8 -*- # frozen_string_literal: true $:.push File.expand_path("../lib", __FILE__) require "split/version" Gem::Specification.new do |s| s.name = "split" s.version = Split::VERSION s.platform = Gem::Platform::RUBY s.authors = ["Andrew Nesbitt"] s.licenses = ["MIT"] s.email = ["[email protected]"] s.homepage = "https://github.com/splitrb/split" s.summary = "Rack based split testing framework" s.metadata = { "homepage_uri" => "https://github.com/splitrb/split", "changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md", "source_code_uri" => "https://github.com/splitrb/split", "bug_tracker_uri" => "https://github.com/splitrb/split/issues", "wiki_uri" => "https://github.com/splitrb/split/wiki", "mailing_list_uri" => "https://groups.google.com/d/forum/split-ruby" } s.required_ruby_version = ">= 2.5.0" s.required_rubygems_version = ">= 2.0.0" s.files = `git ls-files`.split("\n") s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") s.require_paths = ["lib"] s.add_dependency "redis", ">= 4.2" s.add_dependency "sinatra", ">= 1.2.6" s.add_dependency 'simple-random', '>= 0.9.3' s.add_development_dependency 'bundler', '~> 1.14' s.add_development_dependency 'simplecov', '~> 0.12' s.add_development_dependency 'rack-test', '~> 0.6' s.add_development_dependency 'rake', '~> 12' s.add_development_dependency 'rspec', '~> 3.4' s.add_development_dependency 'pry', '~> 0.10' s.add_development_dependency 'fakeredis', '~> 0.6.0' end <MSG> Update dependencies <DFF> @@ -35,10 +35,10 @@ Gem::Specification.new do |s| s.add_dependency 'simple-random', '>= 0.9.3' s.add_development_dependency 'bundler', '~> 1.14' - s.add_development_dependency 'simplecov', '~> 0.12' + s.add_development_dependency 'simplecov', '~> 0.15' s.add_development_dependency 'rack-test', '~> 0.6' s.add_development_dependency 'rake', '~> 12' - s.add_development_dependency 'rspec', '~> 3.4' + s.add_development_dependency 'rspec', '~> 3.7' s.add_development_dependency 'pry', '~> 0.10' - s.add_development_dependency 'fakeredis', '~> 0.6.0' + s.add_development_dependency 'fakeredis', '~> 0.6' end
3
Update dependencies
3
.gemspec
gemspec
mit
splitrb/split
10069669
<NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false finished(link_color: "purchase") ``` **NOTE:** This does not mean that a single experiment can have/complete progressive goals. **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Clarify finished with first option being a hash We got this wrong. <DFF> @@ -553,6 +553,12 @@ To complete a goal conversion, you do it like: finished(link_color: "purchase") ``` +Note that if you pass additional options, that should be a separate hash: + +```ruby +finished({ link_color: "purchase" }, reset: false) +``` + **NOTE:** This does not mean that a single experiment can have/complete progressive goals. **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion").
6
Clarify finished with first option being a hash
0
.md
md
mit
splitrb/split
10069670
<NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false finished(link_color: "purchase") ``` **NOTE:** This does not mean that a single experiment can have/complete progressive goals. **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Clarify finished with first option being a hash We got this wrong. <DFF> @@ -553,6 +553,12 @@ To complete a goal conversion, you do it like: finished(link_color: "purchase") ``` +Note that if you pass additional options, that should be a separate hash: + +```ruby +finished({ link_color: "purchase" }, reset: false) +``` + **NOTE:** This does not mean that a single experiment can have/complete progressive goals. **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion").
6
Clarify finished with first option being a hash
0
.md
md
mit
splitrb/split
10069671
<NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false finished(link_color: "purchase") ``` **NOTE:** This does not mean that a single experiment can have/complete progressive goals. **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Clarify finished with first option being a hash We got this wrong. <DFF> @@ -553,6 +553,12 @@ To complete a goal conversion, you do it like: finished(link_color: "purchase") ``` +Note that if you pass additional options, that should be a separate hash: + +```ruby +finished({ link_color: "purchase" }, reset: false) +``` + **NOTE:** This does not mean that a single experiment can have/complete progressive goals. **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion").
6
Clarify finished with first option being a hash
0
.md
md
mit
splitrb/split
10069672
<NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do ab_test("link_color", "blue", "red") expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should increment the participation counter after assignment to a new user" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should not increment the counter for an not started experiment" do expect(Split.configuration).to receive(:start_manually).and_return(true) e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end it "should allow the alternative to be forced by passing it in the params" do # ?ab_test[link_color]=blue @params = { "ab_test" => { "link_color" => "blue" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") end end it "should allow passing a block" do alt = ab_test('link_color', 'blue', 'red') ret = ab_test('link_color', 'blue', 'red') { |alternative| "shared/#{alternative}" } alternative = ab_test("link_color", "blue") expect(alternative).to eq("blue") end it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should allow alternative weighting interface as a single hash" do ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"]) expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 doing_other_tests?(@experiment.key).should be false end end context "finished with config" do it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment")).to eq "one" expect(ab_test("my_experiment") do |_, meta| meta end).to eq("Meta1") end end context "is not defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: nil } } end it "should be passed to helper block" do expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq({}) end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment") do |_, meta| meta end).to eq({}) end end end describe "ab_finished" do context "for an experiment that the user participates in" do before(:each) do @experiment_name = "link_color" @alternatives = ["blue", "red"] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count end it "should increment the counter for the completed alternative" do ab_finished(@experiment_name) new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should set experiment's finished key if reset is false" do ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should not increment the counter if reset is false and the experiment has been already finished" do 2.times { ab_finished(@experiment_name, { reset: false }) } new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" a = ab_test("button_size", "small", "big") expect(a).to eq("small") expect { ab_finished("button_size") }.not_to change { Split::Alternative.new(a, "button_size").completed_count } end it "should clear out the user's participation from their session" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) end it "should not call the method without alternative" do ab_user[@experiment.key] = nil expect(self).not_to receive(:some_method) ab_finished(@experiment_name) end end end context "for an experiment that the user is excluded from" do before do alternative = ab_test("link_color", "blue", "red") expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1) alternative = ab_test("button_size", "small", "big") expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0) end it "should not increment the completed counter" do # So, user should be participating in the link_color experiment and # receive the control for button_size. As the user is not participating in # the button size experiment, finishing it should not increase the # completion count for that alternative. expect { ab_finished("button_size") }.not_to change { Split::Alternative.new("small", "button_size").completed_count } end end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric, reset: false expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end end describe "conversions" do it "should return a conversion rate for an alternative" do alternative_name = ab_test("link_color", "blue", "red") previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(previous_convertion_rate).to eq(0.0) ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end end context "and db_failover config option is turned on" do before(:each) do Split.configure do |config| config.db_failover = true end end describe "ab_test" do it "should not raise an exception" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> Merge pull request #176 from bmarini/trial-hooks Add hooks for Trial#choose! and Trial#complete! <DFF> @@ -112,6 +112,14 @@ describe Split::Helper do end end + context "when on_trial_choose is set" do + before { Split.configuration.on_trial_choose = :some_method } + it "should call the method" do + self.should_receive(:some_method) + ab_test('link_color', 'blue', 'red') + end + end + it "should allow passing a block" do alt = ab_test('link_color', 'blue', 'red') ret = ab_test('link_color', 'blue', 'red') { |alternative| "shared/#{alternative}" } @@ -247,6 +255,14 @@ describe Split::Helper do doing_other_tests?(@experiment.key).should be false end + context "when on_trial_complete is set" do + before { Split.configuration.on_trial_complete = :some_method } + it "should call the method" do + self.should_receive(:some_method) + finished(@experiment_name) + end + end + end context "finished with config" do
16
Merge pull request #176 from bmarini/trial-hooks
0
.rb
rb
mit
splitrb/split
10069673
<NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do ab_test("link_color", "blue", "red") expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should increment the participation counter after assignment to a new user" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should not increment the counter for an not started experiment" do expect(Split.configuration).to receive(:start_manually).and_return(true) e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end it "should allow the alternative to be forced by passing it in the params" do # ?ab_test[link_color]=blue @params = { "ab_test" => { "link_color" => "blue" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") end end it "should allow passing a block" do alt = ab_test('link_color', 'blue', 'red') ret = ab_test('link_color', 'blue', 'red') { |alternative| "shared/#{alternative}" } alternative = ab_test("link_color", "blue") expect(alternative).to eq("blue") end it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should allow alternative weighting interface as a single hash" do ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"]) expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 doing_other_tests?(@experiment.key).should be false end end context "finished with config" do it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment")).to eq "one" expect(ab_test("my_experiment") do |_, meta| meta end).to eq("Meta1") end end context "is not defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: nil } } end it "should be passed to helper block" do expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq({}) end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment") do |_, meta| meta end).to eq({}) end end end describe "ab_finished" do context "for an experiment that the user participates in" do before(:each) do @experiment_name = "link_color" @alternatives = ["blue", "red"] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count end it "should increment the counter for the completed alternative" do ab_finished(@experiment_name) new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should set experiment's finished key if reset is false" do ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should not increment the counter if reset is false and the experiment has been already finished" do 2.times { ab_finished(@experiment_name, { reset: false }) } new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" a = ab_test("button_size", "small", "big") expect(a).to eq("small") expect { ab_finished("button_size") }.not_to change { Split::Alternative.new(a, "button_size").completed_count } end it "should clear out the user's participation from their session" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) end it "should not call the method without alternative" do ab_user[@experiment.key] = nil expect(self).not_to receive(:some_method) ab_finished(@experiment_name) end end end context "for an experiment that the user is excluded from" do before do alternative = ab_test("link_color", "blue", "red") expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1) alternative = ab_test("button_size", "small", "big") expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0) end it "should not increment the completed counter" do # So, user should be participating in the link_color experiment and # receive the control for button_size. As the user is not participating in # the button size experiment, finishing it should not increase the # completion count for that alternative. expect { ab_finished("button_size") }.not_to change { Split::Alternative.new("small", "button_size").completed_count } end end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric, reset: false expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end end describe "conversions" do it "should return a conversion rate for an alternative" do alternative_name = ab_test("link_color", "blue", "red") previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(previous_convertion_rate).to eq(0.0) ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end end context "and db_failover config option is turned on" do before(:each) do Split.configure do |config| config.db_failover = true end end describe "ab_test" do it "should not raise an exception" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> Merge pull request #176 from bmarini/trial-hooks Add hooks for Trial#choose! and Trial#complete! <DFF> @@ -112,6 +112,14 @@ describe Split::Helper do end end + context "when on_trial_choose is set" do + before { Split.configuration.on_trial_choose = :some_method } + it "should call the method" do + self.should_receive(:some_method) + ab_test('link_color', 'blue', 'red') + end + end + it "should allow passing a block" do alt = ab_test('link_color', 'blue', 'red') ret = ab_test('link_color', 'blue', 'red') { |alternative| "shared/#{alternative}" } @@ -247,6 +255,14 @@ describe Split::Helper do doing_other_tests?(@experiment.key).should be false end + context "when on_trial_complete is set" do + before { Split.configuration.on_trial_complete = :some_method } + it "should call the method" do + self.should_receive(:some_method) + finished(@experiment_name) + end + end + end context "finished with config" do
16
Merge pull request #176 from bmarini/trial-hooks
0
.rb
rb
mit
splitrb/split
10069674
<NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do ab_test("link_color", "blue", "red") expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should increment the participation counter after assignment to a new user" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should not increment the counter for an not started experiment" do expect(Split.configuration).to receive(:start_manually).and_return(true) e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end it "should allow the alternative to be forced by passing it in the params" do # ?ab_test[link_color]=blue @params = { "ab_test" => { "link_color" => "blue" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") end end it "should allow passing a block" do alt = ab_test('link_color', 'blue', 'red') ret = ab_test('link_color', 'blue', 'red') { |alternative| "shared/#{alternative}" } alternative = ab_test("link_color", "blue") expect(alternative).to eq("blue") end it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should allow alternative weighting interface as a single hash" do ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"]) expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 doing_other_tests?(@experiment.key).should be false end end context "finished with config" do it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment")).to eq "one" expect(ab_test("my_experiment") do |_, meta| meta end).to eq("Meta1") end end context "is not defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: nil } } end it "should be passed to helper block" do expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq({}) end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment") do |_, meta| meta end).to eq({}) end end end describe "ab_finished" do context "for an experiment that the user participates in" do before(:each) do @experiment_name = "link_color" @alternatives = ["blue", "red"] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count end it "should increment the counter for the completed alternative" do ab_finished(@experiment_name) new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should set experiment's finished key if reset is false" do ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should not increment the counter if reset is false and the experiment has been already finished" do 2.times { ab_finished(@experiment_name, { reset: false }) } new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" a = ab_test("button_size", "small", "big") expect(a).to eq("small") expect { ab_finished("button_size") }.not_to change { Split::Alternative.new(a, "button_size").completed_count } end it "should clear out the user's participation from their session" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) end it "should not call the method without alternative" do ab_user[@experiment.key] = nil expect(self).not_to receive(:some_method) ab_finished(@experiment_name) end end end context "for an experiment that the user is excluded from" do before do alternative = ab_test("link_color", "blue", "red") expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1) alternative = ab_test("button_size", "small", "big") expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0) end it "should not increment the completed counter" do # So, user should be participating in the link_color experiment and # receive the control for button_size. As the user is not participating in # the button size experiment, finishing it should not increase the # completion count for that alternative. expect { ab_finished("button_size") }.not_to change { Split::Alternative.new("small", "button_size").completed_count } end end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric, reset: false expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end end describe "conversions" do it "should return a conversion rate for an alternative" do alternative_name = ab_test("link_color", "blue", "red") previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(previous_convertion_rate).to eq(0.0) ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end end context "and db_failover config option is turned on" do before(:each) do Split.configure do |config| config.db_failover = true end end describe "ab_test" do it "should not raise an exception" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> Merge pull request #176 from bmarini/trial-hooks Add hooks for Trial#choose! and Trial#complete! <DFF> @@ -112,6 +112,14 @@ describe Split::Helper do end end + context "when on_trial_choose is set" do + before { Split.configuration.on_trial_choose = :some_method } + it "should call the method" do + self.should_receive(:some_method) + ab_test('link_color', 'blue', 'red') + end + end + it "should allow passing a block" do alt = ab_test('link_color', 'blue', 'red') ret = ab_test('link_color', 'blue', 'red') { |alternative| "shared/#{alternative}" } @@ -247,6 +255,14 @@ describe Split::Helper do doing_other_tests?(@experiment.key).should be false end + context "when on_trial_complete is set" do + before { Split.configuration.on_trial_complete = :some_method } + it "should call the method" do + self.should_receive(:some_method) + finished(@experiment_name) + end + end + end context "finished with config" do
16
Merge pull request #176 from bmarini/trial-hooks
0
.rb
rb
mit
splitrb/split
10069675
<NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do ab_test("link_color", "blue", "red") expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should increment the participation counter after assignment to a new user" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end small = Split::Alternative.new('small', 'button_size') small.participant_count.should eql(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end it "should allow the alternative to be forced by passing it in the params" do # ?ab_test[link_color]=blue @params = { "ab_test" => { "link_color" => "blue" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") @params = { "ab_test" => { "link_color" => "red" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("red") alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1) expect(alternative).to eq("red") end it "should not allow an arbitrary alternative" do @params = { "ab_test" => { "link_color" => "pink" } } alternative = ab_test("link_color", "blue") expect(alternative).to eq("blue") end it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should allow alternative weighting interface as a single hash" do ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"]) expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt" end it "lets override existing choice" do pending "this requires user store reset on first call not depending on whelther it is current trial" @params = { "ab_test" => { "test_1" => "test-alt" } } expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control" expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" end end end it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment")).to eq "one" expect(ab_test("my_experiment") do |_, meta| meta end).to eq("Meta1") end end context "is not defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: nil } } end it "should be passed to helper block" do expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq({}) end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment") do |_, meta| meta end).to eq({}) end end end describe "ab_finished" do context "for an experiment that the user participates in" do before(:each) do @experiment_name = "link_color" @alternatives = ["blue", "red"] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) end end end context 'and db_failover config option is turned on' do it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) end it "should not call the method without alternative" do ab_user[@experiment.key] = nil expect(self).not_to receive(:some_method) ab_finished(@experiment_name) end end end context "for an experiment that the user is excluded from" do before do alternative = ab_test("link_color", "blue", "red") expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1) alternative = ab_test("button_size", "small", "big") expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0) end it "should not increment the completed counter" do end end end end end end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric, reset: false expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end end describe "conversions" do it "should return a conversion rate for an alternative" do alternative_name = ab_test("link_color", "blue", "red") previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(previous_convertion_rate).to eq(0.0) ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end end context "and db_failover config option is turned on" do before(:each) do Split.configure do |config| config.db_failover = true end end describe "ab_test" do it "should not raise an exception" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> create an option to disable split testing <DFF> @@ -85,7 +85,7 @@ describe Split::Helper do small = Split::Alternative.new('small', 'button_size') small.participant_count.should eql(0) end - + it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true @@ -346,6 +346,37 @@ describe Split::Helper do end end + describe "disable split testing" do + + before(:each) do + Split.configure do |config| + config.disable_split = true + end + end + + after(:each) do + Split.configure do |config| + config.disable_split = false + end + end + + it "should not attempt to connect to redis" do + + lambda { + ab_test('link_color', 'blue', 'red') + }.should_not raise_error(Errno::ECONNREFUSED) + end + + it "should return control variable" do + ab_test('link_color', 'blue', 'red').should eq('blue') + lambda { + finished('link_color') + }.should_not raise_error(Errno::ECONNREFUSED) + end + + end + + end context 'and db_failover config option is turned on' do @@ -398,6 +429,7 @@ describe Split::Helper do end end + end end
33
create an option to disable split testing
1
.rb
rb
mit
splitrb/split
10069676
<NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do ab_test("link_color", "blue", "red") expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should increment the participation counter after assignment to a new user" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end small = Split::Alternative.new('small', 'button_size') small.participant_count.should eql(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end it "should allow the alternative to be forced by passing it in the params" do # ?ab_test[link_color]=blue @params = { "ab_test" => { "link_color" => "blue" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") @params = { "ab_test" => { "link_color" => "red" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("red") alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1) expect(alternative).to eq("red") end it "should not allow an arbitrary alternative" do @params = { "ab_test" => { "link_color" => "pink" } } alternative = ab_test("link_color", "blue") expect(alternative).to eq("blue") end it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should allow alternative weighting interface as a single hash" do ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"]) expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt" end it "lets override existing choice" do pending "this requires user store reset on first call not depending on whelther it is current trial" @params = { "ab_test" => { "test_1" => "test-alt" } } expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control" expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" end end end it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment")).to eq "one" expect(ab_test("my_experiment") do |_, meta| meta end).to eq("Meta1") end end context "is not defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: nil } } end it "should be passed to helper block" do expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq({}) end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment") do |_, meta| meta end).to eq({}) end end end describe "ab_finished" do context "for an experiment that the user participates in" do before(:each) do @experiment_name = "link_color" @alternatives = ["blue", "red"] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) end end end context 'and db_failover config option is turned on' do it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) end it "should not call the method without alternative" do ab_user[@experiment.key] = nil expect(self).not_to receive(:some_method) ab_finished(@experiment_name) end end end context "for an experiment that the user is excluded from" do before do alternative = ab_test("link_color", "blue", "red") expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1) alternative = ab_test("button_size", "small", "big") expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0) end it "should not increment the completed counter" do end end end end end end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric, reset: false expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end end describe "conversions" do it "should return a conversion rate for an alternative" do alternative_name = ab_test("link_color", "blue", "red") previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(previous_convertion_rate).to eq(0.0) ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end end context "and db_failover config option is turned on" do before(:each) do Split.configure do |config| config.db_failover = true end end describe "ab_test" do it "should not raise an exception" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> create an option to disable split testing <DFF> @@ -85,7 +85,7 @@ describe Split::Helper do small = Split::Alternative.new('small', 'button_size') small.participant_count.should eql(0) end - + it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true @@ -346,6 +346,37 @@ describe Split::Helper do end end + describe "disable split testing" do + + before(:each) do + Split.configure do |config| + config.disable_split = true + end + end + + after(:each) do + Split.configure do |config| + config.disable_split = false + end + end + + it "should not attempt to connect to redis" do + + lambda { + ab_test('link_color', 'blue', 'red') + }.should_not raise_error(Errno::ECONNREFUSED) + end + + it "should return control variable" do + ab_test('link_color', 'blue', 'red').should eq('blue') + lambda { + finished('link_color') + }.should_not raise_error(Errno::ECONNREFUSED) + end + + end + + end context 'and db_failover config option is turned on' do @@ -398,6 +429,7 @@ describe Split::Helper do end end + end end
33
create an option to disable split testing
1
.rb
rb
mit
splitrb/split
10069677
<NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do ab_test("link_color", "blue", "red") expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should increment the participation counter after assignment to a new user" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end small = Split::Alternative.new('small', 'button_size') small.participant_count.should eql(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end it "should allow the alternative to be forced by passing it in the params" do # ?ab_test[link_color]=blue @params = { "ab_test" => { "link_color" => "blue" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") @params = { "ab_test" => { "link_color" => "red" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("red") alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1) expect(alternative).to eq("red") end it "should not allow an arbitrary alternative" do @params = { "ab_test" => { "link_color" => "pink" } } alternative = ab_test("link_color", "blue") expect(alternative).to eq("blue") end it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should allow alternative weighting interface as a single hash" do ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"]) expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt" end it "lets override existing choice" do pending "this requires user store reset on first call not depending on whelther it is current trial" @params = { "ab_test" => { "test_1" => "test-alt" } } expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control" expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" end end end it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment")).to eq "one" expect(ab_test("my_experiment") do |_, meta| meta end).to eq("Meta1") end end context "is not defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: nil } } end it "should be passed to helper block" do expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq({}) end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment") do |_, meta| meta end).to eq({}) end end end describe "ab_finished" do context "for an experiment that the user participates in" do before(:each) do @experiment_name = "link_color" @alternatives = ["blue", "red"] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) end end end context 'and db_failover config option is turned on' do it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) end it "should not call the method without alternative" do ab_user[@experiment.key] = nil expect(self).not_to receive(:some_method) ab_finished(@experiment_name) end end end context "for an experiment that the user is excluded from" do before do alternative = ab_test("link_color", "blue", "red") expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1) alternative = ab_test("button_size", "small", "big") expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0) end it "should not increment the completed counter" do end end end end end end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric, reset: false expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end end describe "conversions" do it "should return a conversion rate for an alternative" do alternative_name = ab_test("link_color", "blue", "red") previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(previous_convertion_rate).to eq(0.0) ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end end context "and db_failover config option is turned on" do before(:each) do Split.configure do |config| config.db_failover = true end end describe "ab_test" do it "should not raise an exception" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> create an option to disable split testing <DFF> @@ -85,7 +85,7 @@ describe Split::Helper do small = Split::Alternative.new('small', 'button_size') small.participant_count.should eql(0) end - + it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true @@ -346,6 +346,37 @@ describe Split::Helper do end end + describe "disable split testing" do + + before(:each) do + Split.configure do |config| + config.disable_split = true + end + end + + after(:each) do + Split.configure do |config| + config.disable_split = false + end + end + + it "should not attempt to connect to redis" do + + lambda { + ab_test('link_color', 'blue', 'red') + }.should_not raise_error(Errno::ECONNREFUSED) + end + + it "should return control variable" do + ab_test('link_color', 'blue', 'red').should eq('blue') + lambda { + finished('link_color') + }.should_not raise_error(Errno::ECONNREFUSED) + end + + end + + end context 'and db_failover config option is turned on' do @@ -398,6 +429,7 @@ describe Split::Helper do end end + end end
33
create an option to disable split testing
1
.rb
rb
mit
splitrb/split
10069678
<NME> lorem.ts <BEF> import { ok, strictEqual as equal } from 'assert'; import expand from '../src'; function wordCount(str: string): number { return str.split(' ').length; } function splitLines(str: string): string[] { return str.split(/\n/); } describe('Lorem Ipsum generator', () => { it('single', () => { let output = expand('lorem'); ok(/^Lorem,?\sipsum/.test(output)); ok(wordCount(output) > 20); output = expand('lorem5'); ok(/^Lorem,?\sipsum/.test(output)); equal(wordCount(output), 5); output = expand('loremru4'); ok(/^Далеко-далеко,?\sза,?\sсловесными/.test(output)); equal(wordCount(output), 4); output = expand('p>lorem'); ok(/^<p>Lorem,?\sipsum/.test(output)); // https://github.com/emmetio/expand-abbreviation/issues/24 output = expand('(p)lorem2'); ok(/^<p><\/p>\nLorem,?\sipsum/.test(output)); output = expand('p(lorem10)'); ok(/^<p><\/p>\nLorem,?\sipsum/.test(output)); }); it('multiple', () => { let output = expand('lorem6*3'); let lines = splitLines(output); ok(/^Lorem,?\sipsum/.test(output)); equal(lines.length, 3); output = expand('lorem6*2'); lines = splitLines(output); ok(/^Lorem,?\sipsum/.test(output)); equal(lines.length, 2); output = expand('p*3>lorem'); lines = splitLines(output); ok(/^<p>Lorem,?\sipsum/.test(lines[0]!)); ok(!/^<p>Lorem,?\sipsum/.test(lines[1]!)); output = expand('ul>lorem5*3', { options: { 'output.indent': '' } }); lines = splitLines(output); equal(lines.length, 5); ok(/^<li>Lorem,?\sipsum/.test(lines[1]!)); ok(!/^<li>Lorem,?\sipsum/.test(lines[2]!)); }); }); <MSG> Support lorem word range Closes #323 <DFF> @@ -19,6 +19,10 @@ describe('Lorem Ipsum generator', () => { ok(/^Lorem,?\sipsum/.test(output)); equal(wordCount(output), 5); + output = expand('lorem5-10'); + ok(/^Lorem,?\sipsum/.test(output)); + ok(wordCount(output) >= 5 && wordCount(output) <= 10); + output = expand('loremru4'); ok(/^Далеко-далеко,?\sза,?\sсловесными/.test(output)); equal(wordCount(output), 4);
4
Support lorem word range
0
.ts
ts
mit
emmetio/emmet
10069679
<NME> lorem.ts <BEF> import { ok, strictEqual as equal } from 'assert'; import expand from '../src'; function wordCount(str: string): number { return str.split(' ').length; } function splitLines(str: string): string[] { return str.split(/\n/); } describe('Lorem Ipsum generator', () => { it('single', () => { let output = expand('lorem'); ok(/^Lorem,?\sipsum/.test(output)); ok(wordCount(output) > 20); output = expand('lorem5'); ok(/^Lorem,?\sipsum/.test(output)); equal(wordCount(output), 5); output = expand('loremru4'); ok(/^Далеко-далеко,?\sза,?\sсловесными/.test(output)); equal(wordCount(output), 4); output = expand('p>lorem'); ok(/^<p>Lorem,?\sipsum/.test(output)); // https://github.com/emmetio/expand-abbreviation/issues/24 output = expand('(p)lorem2'); ok(/^<p><\/p>\nLorem,?\sipsum/.test(output)); output = expand('p(lorem10)'); ok(/^<p><\/p>\nLorem,?\sipsum/.test(output)); }); it('multiple', () => { let output = expand('lorem6*3'); let lines = splitLines(output); ok(/^Lorem,?\sipsum/.test(output)); equal(lines.length, 3); output = expand('lorem6*2'); lines = splitLines(output); ok(/^Lorem,?\sipsum/.test(output)); equal(lines.length, 2); output = expand('p*3>lorem'); lines = splitLines(output); ok(/^<p>Lorem,?\sipsum/.test(lines[0]!)); ok(!/^<p>Lorem,?\sipsum/.test(lines[1]!)); output = expand('ul>lorem5*3', { options: { 'output.indent': '' } }); lines = splitLines(output); equal(lines.length, 5); ok(/^<li>Lorem,?\sipsum/.test(lines[1]!)); ok(!/^<li>Lorem,?\sipsum/.test(lines[2]!)); }); }); <MSG> Support lorem word range Closes #323 <DFF> @@ -19,6 +19,10 @@ describe('Lorem Ipsum generator', () => { ok(/^Lorem,?\sipsum/.test(output)); equal(wordCount(output), 5); + output = expand('lorem5-10'); + ok(/^Lorem,?\sipsum/.test(output)); + ok(wordCount(output) >= 5 && wordCount(output) <= 10); + output = expand('loremru4'); ok(/^Далеко-далеко,?\sза,?\sсловесными/.test(output)); equal(wordCount(output), 4);
4
Support lorem word range
0
.ts
ts
mit
emmetio/emmet
10069680
<NME> loadclassifiers.py <BEF> ADDFILE <MSG> Merge branch 'runeh/master' <DFF> @@ -0,0 +1,53 @@ +""" +Management command for loading all the known classifiers from the official +pypi, or from a file/url. + +Note, pypi docs says to not add classifiers that are not used in submitted +projects. On the other hand it can be usefull to have a list of classifiers +to choose if you have to modify package data. Use it if you need it. +""" + +from __future__ import with_statement +import urllib +import os.path + +from django.core.management.base import BaseCommand +from djangopypi.models import Classifier + +CLASSIFIERS_URL = "http://pypi.python.org/pypi?%3Aaction=list_classifiers" + +class Command(BaseCommand): + help = """Load all classifiers from pypi. If any arguments are given, +they will be used as paths or urls for classifiers instead of using the +official pypi list url""" + + def handle(self, *args, **options): + args = args or [CLASSIFIERS_URL] + + cnt = 0 + for location in args: + print "Loading %s" % location + lines = self._get_lines(location) + for name in lines: + c, created = Classifier.objects.get_or_create(name=name) + if created: + c.save() + cnt += 1 + + print "Added %s new classifiers from %s source(s)" % (cnt, len(args)) + + def _get_lines(self, location): + """Return a list of lines for a lication that can be a file or + a url. If path/url doesn't exist, returns an empty list""" + try: # This is dirty, but OK I think. both net and file ops raise IOE + if location.startswith(("http://", "https://")): + fp = urllib.urlopen(location) + return [e.strip() for e in fp.read().split('\n') + if e and not e.isspace()] + else: + fp = open(location) + return [e.strip() for e in fp.readlines() + if e and not e.isspace()] + except IOError: + print "Couldn't load %s" % location + return []
53
Merge branch 'runeh/master'
0
.py
py
bsd-3-clause
ask/chishop
10069681
<NME> session_adapter_spec.rb <BEF> # frozen_string_literal: true describe Split::Persistence::SessionAdapter do let(:context) { mock(session: {}) } subject { Split::Persistence::SessionAdapter.new(context) } describe "#[] and #[]=" do describe "#[] and #[]=" do it "should set and return the value for given key" do subject["my_key"] = "my_value" expect(subject["my_key"]).to eq("my_value") end end describe "#delete" do it "should delete the given key" do subject["my_key"] = "my_value" subject.delete("my_key") expect(subject["my_key"]).to be_nil end end describe "#keys" do it "should return an array of the session's stored keys" do subject["my_key"] = "my_value" subject["my_second_key"] = "my_second_value" expect(subject.keys).to match(["my_key", "my_second_key"]) end end end <MSG> Replaces 1.9 style hashes with 1.8 <DFF> @@ -2,7 +2,7 @@ require "spec_helper" describe Split::Persistence::SessionAdapter do - let(:context) { mock(session: {}) } + let(:context) { mock(:session => {}) } subject { Split::Persistence::SessionAdapter.new(context) } describe "#[] and #[]=" do
1
Replaces 1.9 style hashes with 1.8
1
.rb
rb
mit
splitrb/split
10069682
<NME> session_adapter_spec.rb <BEF> # frozen_string_literal: true describe Split::Persistence::SessionAdapter do let(:context) { mock(session: {}) } subject { Split::Persistence::SessionAdapter.new(context) } describe "#[] and #[]=" do describe "#[] and #[]=" do it "should set and return the value for given key" do subject["my_key"] = "my_value" expect(subject["my_key"]).to eq("my_value") end end describe "#delete" do it "should delete the given key" do subject["my_key"] = "my_value" subject.delete("my_key") expect(subject["my_key"]).to be_nil end end describe "#keys" do it "should return an array of the session's stored keys" do subject["my_key"] = "my_value" subject["my_second_key"] = "my_second_value" expect(subject.keys).to match(["my_key", "my_second_key"]) end end end <MSG> Replaces 1.9 style hashes with 1.8 <DFF> @@ -2,7 +2,7 @@ require "spec_helper" describe Split::Persistence::SessionAdapter do - let(:context) { mock(session: {}) } + let(:context) { mock(:session => {}) } subject { Split::Persistence::SessionAdapter.new(context) } describe "#[] and #[]=" do
1
Replaces 1.9 style hashes with 1.8
1
.rb
rb
mit
splitrb/split
10069683
<NME> session_adapter_spec.rb <BEF> # frozen_string_literal: true describe Split::Persistence::SessionAdapter do let(:context) { mock(session: {}) } subject { Split::Persistence::SessionAdapter.new(context) } describe "#[] and #[]=" do describe "#[] and #[]=" do it "should set and return the value for given key" do subject["my_key"] = "my_value" expect(subject["my_key"]).to eq("my_value") end end describe "#delete" do it "should delete the given key" do subject["my_key"] = "my_value" subject.delete("my_key") expect(subject["my_key"]).to be_nil end end describe "#keys" do it "should return an array of the session's stored keys" do subject["my_key"] = "my_value" subject["my_second_key"] = "my_second_value" expect(subject.keys).to match(["my_key", "my_second_key"]) end end end <MSG> Replaces 1.9 style hashes with 1.8 <DFF> @@ -2,7 +2,7 @@ require "spec_helper" describe Split::Persistence::SessionAdapter do - let(:context) { mock(session: {}) } + let(:context) { mock(:session => {}) } subject { Split::Persistence::SessionAdapter.new(context) } describe "#[] and #[]=" do
1
Replaces 1.9 style hashes with 1.8
1
.rb
rb
mit
splitrb/split
10069684
<NME> experiment_spec.rb <BEF> ADDFILE <MSG> Pseudo coding up the experiment class <DFF> @@ -0,0 +1,27 @@ +require 'spec_helper' +require 'multivariate/experiment' + +describe Multivariate::Experiment do + it "should have a name" do + experiment = Multivariate::Experiment.new('basket_text', 'Basket', "Cart") + experiment.name.should eql('basket_text') + end + + it "should have an alternatives" do + experiment = Multivariate::Experiment.new('basket_text', 'Basket', "Cart") + experiment.alternatives.should eql(['Basket', "Cart"]) + end + + it "should save to redis" do + experiment = Multivariate::Experiment.new('basket_text', 'Basket', "Cart") + experiment.save + REDIS.exists('basket_text').should be true + end + + it "should return an existing experiment" do + experiment = Multivariate::Experiment.new('basket_text', 'Basket', "Cart") + experiment.save + Multivariate::Experiment.find('basket_text').name.should eql('basket_text') + end +end +
27
Pseudo coding up the experiment class
0
.rb
rb
mit
splitrb/split
10069685
<NME> experiment_spec.rb <BEF> ADDFILE <MSG> Pseudo coding up the experiment class <DFF> @@ -0,0 +1,27 @@ +require 'spec_helper' +require 'multivariate/experiment' + +describe Multivariate::Experiment do + it "should have a name" do + experiment = Multivariate::Experiment.new('basket_text', 'Basket', "Cart") + experiment.name.should eql('basket_text') + end + + it "should have an alternatives" do + experiment = Multivariate::Experiment.new('basket_text', 'Basket', "Cart") + experiment.alternatives.should eql(['Basket', "Cart"]) + end + + it "should save to redis" do + experiment = Multivariate::Experiment.new('basket_text', 'Basket', "Cart") + experiment.save + REDIS.exists('basket_text').should be true + end + + it "should return an existing experiment" do + experiment = Multivariate::Experiment.new('basket_text', 'Basket', "Cart") + experiment.save + Multivariate::Experiment.find('basket_text').name.should eql('basket_text') + end +end +
27
Pseudo coding up the experiment class
0
.rb
rb
mit
splitrb/split
10069686
<NME> experiment_spec.rb <BEF> ADDFILE <MSG> Pseudo coding up the experiment class <DFF> @@ -0,0 +1,27 @@ +require 'spec_helper' +require 'multivariate/experiment' + +describe Multivariate::Experiment do + it "should have a name" do + experiment = Multivariate::Experiment.new('basket_text', 'Basket', "Cart") + experiment.name.should eql('basket_text') + end + + it "should have an alternatives" do + experiment = Multivariate::Experiment.new('basket_text', 'Basket', "Cart") + experiment.alternatives.should eql(['Basket', "Cart"]) + end + + it "should save to redis" do + experiment = Multivariate::Experiment.new('basket_text', 'Basket', "Cart") + experiment.save + REDIS.exists('basket_text').should be true + end + + it "should return an existing experiment" do + experiment = Multivariate::Experiment.new('basket_text', 'Basket', "Cart") + experiment.save + Multivariate::Experiment.find('basket_text').name.should eql('basket_text') + end +end +
27
Pseudo coding up the experiment class
0
.rb
rb
mit
splitrb/split
10069687
<NME> split.gemspec <BEF> # -*- encoding: utf-8 -*- # frozen_string_literal: true $:.push File.expand_path("../lib", __FILE__) require "split/version" Gem::Specification.new do |s| s.name = "split" s.version = Split::VERSION s.platform = Gem::Platform::RUBY s.authors = ["Andrew Nesbitt"] s.licenses = ["MIT"] s.email = ["[email protected]"] s.homepage = "https://github.com/splitrb/split" s.summary = "Rack based split testing framework" s.metadata = { "homepage_uri" => "https://github.com/splitrb/split", "changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md", "source_code_uri" => "https://github.com/splitrb/split", "bug_tracker_uri" => "https://github.com/splitrb/split/issues", "wiki_uri" => "https://github.com/splitrb/split/wiki", "mailing_list_uri" => "https://groups.google.com/d/forum/split-ruby" } s.required_ruby_version = ">= 2.5.0" s.required_rubygems_version = ">= 2.0.0" s.files = `git ls-files`.split("\n") s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") s.add_development_dependency 'rake', '~> 11.1' s.add_development_dependency 'rspec', '~> 3.4' s.add_development_dependency 'pry', '~> 0.10' s.add_development_dependency 'fakeredis', '~> 0.5.0' end s.add_development_dependency "bundler", ">= 1.17" s.add_development_dependency "simplecov", "~> 0.15" s.add_development_dependency "rack-test", "~> 2.0" s.add_development_dependency "rake", "~> 13" s.add_development_dependency "rspec", "~> 3.7" s.add_development_dependency "pry", "~> 0.10" s.add_development_dependency "rails", ">= 5.0" end <MSG> Update fakeredis, fixes #428 <DFF> @@ -31,5 +31,5 @@ Gem::Specification.new do |s| s.add_development_dependency 'rake', '~> 11.1' s.add_development_dependency 'rspec', '~> 3.4' s.add_development_dependency 'pry', '~> 0.10' - s.add_development_dependency 'fakeredis', '~> 0.5.0' + s.add_development_dependency 'fakeredis', '~> 0.6.0' end
1
Update fakeredis, fixes #428
1
.gemspec
gemspec
mit
splitrb/split
10069688
<NME> split.gemspec <BEF> # -*- encoding: utf-8 -*- # frozen_string_literal: true $:.push File.expand_path("../lib", __FILE__) require "split/version" Gem::Specification.new do |s| s.name = "split" s.version = Split::VERSION s.platform = Gem::Platform::RUBY s.authors = ["Andrew Nesbitt"] s.licenses = ["MIT"] s.email = ["[email protected]"] s.homepage = "https://github.com/splitrb/split" s.summary = "Rack based split testing framework" s.metadata = { "homepage_uri" => "https://github.com/splitrb/split", "changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md", "source_code_uri" => "https://github.com/splitrb/split", "bug_tracker_uri" => "https://github.com/splitrb/split/issues", "wiki_uri" => "https://github.com/splitrb/split/wiki", "mailing_list_uri" => "https://groups.google.com/d/forum/split-ruby" } s.required_ruby_version = ">= 2.5.0" s.required_rubygems_version = ">= 2.0.0" s.files = `git ls-files`.split("\n") s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") s.add_development_dependency 'rake', '~> 11.1' s.add_development_dependency 'rspec', '~> 3.4' s.add_development_dependency 'pry', '~> 0.10' s.add_development_dependency 'fakeredis', '~> 0.5.0' end s.add_development_dependency "bundler", ">= 1.17" s.add_development_dependency "simplecov", "~> 0.15" s.add_development_dependency "rack-test", "~> 2.0" s.add_development_dependency "rake", "~> 13" s.add_development_dependency "rspec", "~> 3.7" s.add_development_dependency "pry", "~> 0.10" s.add_development_dependency "rails", ">= 5.0" end <MSG> Update fakeredis, fixes #428 <DFF> @@ -31,5 +31,5 @@ Gem::Specification.new do |s| s.add_development_dependency 'rake', '~> 11.1' s.add_development_dependency 'rspec', '~> 3.4' s.add_development_dependency 'pry', '~> 0.10' - s.add_development_dependency 'fakeredis', '~> 0.5.0' + s.add_development_dependency 'fakeredis', '~> 0.6.0' end
1
Update fakeredis, fixes #428
1
.gemspec
gemspec
mit
splitrb/split
10069689
<NME> split.gemspec <BEF> # -*- encoding: utf-8 -*- # frozen_string_literal: true $:.push File.expand_path("../lib", __FILE__) require "split/version" Gem::Specification.new do |s| s.name = "split" s.version = Split::VERSION s.platform = Gem::Platform::RUBY s.authors = ["Andrew Nesbitt"] s.licenses = ["MIT"] s.email = ["[email protected]"] s.homepage = "https://github.com/splitrb/split" s.summary = "Rack based split testing framework" s.metadata = { "homepage_uri" => "https://github.com/splitrb/split", "changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md", "source_code_uri" => "https://github.com/splitrb/split", "bug_tracker_uri" => "https://github.com/splitrb/split/issues", "wiki_uri" => "https://github.com/splitrb/split/wiki", "mailing_list_uri" => "https://groups.google.com/d/forum/split-ruby" } s.required_ruby_version = ">= 2.5.0" s.required_rubygems_version = ">= 2.0.0" s.files = `git ls-files`.split("\n") s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") s.add_development_dependency 'rake', '~> 11.1' s.add_development_dependency 'rspec', '~> 3.4' s.add_development_dependency 'pry', '~> 0.10' s.add_development_dependency 'fakeredis', '~> 0.5.0' end s.add_development_dependency "bundler", ">= 1.17" s.add_development_dependency "simplecov", "~> 0.15" s.add_development_dependency "rack-test", "~> 2.0" s.add_development_dependency "rake", "~> 13" s.add_development_dependency "rspec", "~> 3.7" s.add_development_dependency "pry", "~> 0.10" s.add_development_dependency "rails", ">= 5.0" end <MSG> Update fakeredis, fixes #428 <DFF> @@ -31,5 +31,5 @@ Gem::Specification.new do |s| s.add_development_dependency 'rake', '~> 11.1' s.add_development_dependency 'rspec', '~> 3.4' s.add_development_dependency 'pry', '~> 0.10' - s.add_development_dependency 'fakeredis', '~> 0.5.0' + s.add_development_dependency 'fakeredis', '~> 0.6.0' end
1
Update fakeredis, fixes #428
1
.gemspec
gemspec
mit
splitrb/split
10069690
<NME> user_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "split/experiment_catalog" require "split/experiment" require "split/user" describe Split::User do let(:user_keys) { { "link_color" => "blue" } } let(:context) { double(session: { split: user_keys }) } let(:experiment) { Split::Experiment.new("link_color") } before(:each) do @subject = described_class.new(context) end end context '#cleanup_old_versions!' do let(:user_keys) { { 'link_color:1' => 'blue' } } it 'removes key if old experiment is found' do @subject.cleanup_old_versions!(experiment) expect(@subject.keys).to be_empty end end end context "#cleanup_old_experiments!" do it "removes key if experiment is not found" do @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end it "removes key if experiment has a winner" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(experiment).to receive(:start_time).and_return(Date.today) allow(experiment).to receive(:has_winner?).and_return(true) @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end it "removes key if experiment has not started yet" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(experiment).to receive(:has_winner?).and_return(false) @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end context "with finished key" do let(:user_keys) { { "link_color" => "blue", "link_color:finished" => true } } it "does not remove finished key for experiment without a winner" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(Split::ExperimentCatalog).to receive(:find).with("link_color:finished").and_return(nil) allow(experiment).to receive(:start_time).and_return(Date.today) allow(experiment).to receive(:has_winner?).and_return(false) @subject.cleanup_old_experiments! expect(@subject.keys).to include("link_color") expect(@subject.keys).to include("link_color:finished") end end context "when already cleaned up" do before do @subject.cleanup_old_experiments! end it "does not clean up again" do expect(@subject).to_not receive(:keys_without_finished) @subject.cleanup_old_experiments! end end end context "allows user to be loaded from adapter" do it "loads user from adapter (RedisAdapter)" do user = Split::Persistence::RedisAdapter.new(nil, 112233) user["foo"] = "bar" ab_user = Split::User.find(112233, :redis) expect(ab_user["foo"]).to eql("bar") end it "returns nil if adapter does not implement a finder method" do ab_user = Split::User.find(112233, :dual_adapter) expect(ab_user).to be_nil end end context "instantiated with custom adapter" do let(:custom_adapter) { double(:persistence_adapter) } before do @subject = described_class.new(context, custom_adapter) end it "sets user to the custom adapter" do expect(@subject.user).to eq(custom_adapter) end end end <MSG> Merge pull request #661 from serggl/cleanup_old_versions Fix cleanup_old_versions! misbehaviour <DFF> @@ -17,11 +17,25 @@ describe Split::User do end context '#cleanup_old_versions!' do - let(:user_keys) { { 'link_color:1' => 'blue' } } + let(:experiment_version) { "#{experiment.name}:1" } + let(:second_experiment_version) { "#{experiment.name}_another:1" } + let(:third_experiment_version) { "variation_of_#{experiment.name}:1" } + let(:user_keys) do + { + experiment_version => 'blue', + second_experiment_version => 'red', + third_experiment_version => 'yellow' + } + end + + before(:each) { @subject.cleanup_old_versions!(experiment) } it 'removes key if old experiment is found' do - @subject.cleanup_old_versions!(experiment) - expect(@subject.keys).to be_empty + expect(@subject.keys).not_to include(experiment_version) + end + + it 'does not remove other keys' do + expect(@subject.keys).to include(second_experiment_version, third_experiment_version) end end
17
Merge pull request #661 from serggl/cleanup_old_versions
3
.rb
rb
mit
splitrb/split
10069691
<NME> user_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "split/experiment_catalog" require "split/experiment" require "split/user" describe Split::User do let(:user_keys) { { "link_color" => "blue" } } let(:context) { double(session: { split: user_keys }) } let(:experiment) { Split::Experiment.new("link_color") } before(:each) do @subject = described_class.new(context) end end context '#cleanup_old_versions!' do let(:user_keys) { { 'link_color:1' => 'blue' } } it 'removes key if old experiment is found' do @subject.cleanup_old_versions!(experiment) expect(@subject.keys).to be_empty end end end context "#cleanup_old_experiments!" do it "removes key if experiment is not found" do @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end it "removes key if experiment has a winner" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(experiment).to receive(:start_time).and_return(Date.today) allow(experiment).to receive(:has_winner?).and_return(true) @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end it "removes key if experiment has not started yet" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(experiment).to receive(:has_winner?).and_return(false) @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end context "with finished key" do let(:user_keys) { { "link_color" => "blue", "link_color:finished" => true } } it "does not remove finished key for experiment without a winner" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(Split::ExperimentCatalog).to receive(:find).with("link_color:finished").and_return(nil) allow(experiment).to receive(:start_time).and_return(Date.today) allow(experiment).to receive(:has_winner?).and_return(false) @subject.cleanup_old_experiments! expect(@subject.keys).to include("link_color") expect(@subject.keys).to include("link_color:finished") end end context "when already cleaned up" do before do @subject.cleanup_old_experiments! end it "does not clean up again" do expect(@subject).to_not receive(:keys_without_finished) @subject.cleanup_old_experiments! end end end context "allows user to be loaded from adapter" do it "loads user from adapter (RedisAdapter)" do user = Split::Persistence::RedisAdapter.new(nil, 112233) user["foo"] = "bar" ab_user = Split::User.find(112233, :redis) expect(ab_user["foo"]).to eql("bar") end it "returns nil if adapter does not implement a finder method" do ab_user = Split::User.find(112233, :dual_adapter) expect(ab_user).to be_nil end end context "instantiated with custom adapter" do let(:custom_adapter) { double(:persistence_adapter) } before do @subject = described_class.new(context, custom_adapter) end it "sets user to the custom adapter" do expect(@subject.user).to eq(custom_adapter) end end end <MSG> Merge pull request #661 from serggl/cleanup_old_versions Fix cleanup_old_versions! misbehaviour <DFF> @@ -17,11 +17,25 @@ describe Split::User do end context '#cleanup_old_versions!' do - let(:user_keys) { { 'link_color:1' => 'blue' } } + let(:experiment_version) { "#{experiment.name}:1" } + let(:second_experiment_version) { "#{experiment.name}_another:1" } + let(:third_experiment_version) { "variation_of_#{experiment.name}:1" } + let(:user_keys) do + { + experiment_version => 'blue', + second_experiment_version => 'red', + third_experiment_version => 'yellow' + } + end + + before(:each) { @subject.cleanup_old_versions!(experiment) } it 'removes key if old experiment is found' do - @subject.cleanup_old_versions!(experiment) - expect(@subject.keys).to be_empty + expect(@subject.keys).not_to include(experiment_version) + end + + it 'does not remove other keys' do + expect(@subject.keys).to include(second_experiment_version, third_experiment_version) end end
17
Merge pull request #661 from serggl/cleanup_old_versions
3
.rb
rb
mit
splitrb/split
10069692
<NME> user_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "split/experiment_catalog" require "split/experiment" require "split/user" describe Split::User do let(:user_keys) { { "link_color" => "blue" } } let(:context) { double(session: { split: user_keys }) } let(:experiment) { Split::Experiment.new("link_color") } before(:each) do @subject = described_class.new(context) end end context '#cleanup_old_versions!' do let(:user_keys) { { 'link_color:1' => 'blue' } } it 'removes key if old experiment is found' do @subject.cleanup_old_versions!(experiment) expect(@subject.keys).to be_empty end end end context "#cleanup_old_experiments!" do it "removes key if experiment is not found" do @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end it "removes key if experiment has a winner" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(experiment).to receive(:start_time).and_return(Date.today) allow(experiment).to receive(:has_winner?).and_return(true) @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end it "removes key if experiment has not started yet" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(experiment).to receive(:has_winner?).and_return(false) @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end context "with finished key" do let(:user_keys) { { "link_color" => "blue", "link_color:finished" => true } } it "does not remove finished key for experiment without a winner" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(Split::ExperimentCatalog).to receive(:find).with("link_color:finished").and_return(nil) allow(experiment).to receive(:start_time).and_return(Date.today) allow(experiment).to receive(:has_winner?).and_return(false) @subject.cleanup_old_experiments! expect(@subject.keys).to include("link_color") expect(@subject.keys).to include("link_color:finished") end end context "when already cleaned up" do before do @subject.cleanup_old_experiments! end it "does not clean up again" do expect(@subject).to_not receive(:keys_without_finished) @subject.cleanup_old_experiments! end end end context "allows user to be loaded from adapter" do it "loads user from adapter (RedisAdapter)" do user = Split::Persistence::RedisAdapter.new(nil, 112233) user["foo"] = "bar" ab_user = Split::User.find(112233, :redis) expect(ab_user["foo"]).to eql("bar") end it "returns nil if adapter does not implement a finder method" do ab_user = Split::User.find(112233, :dual_adapter) expect(ab_user).to be_nil end end context "instantiated with custom adapter" do let(:custom_adapter) { double(:persistence_adapter) } before do @subject = described_class.new(context, custom_adapter) end it "sets user to the custom adapter" do expect(@subject.user).to eq(custom_adapter) end end end <MSG> Merge pull request #661 from serggl/cleanup_old_versions Fix cleanup_old_versions! misbehaviour <DFF> @@ -17,11 +17,25 @@ describe Split::User do end context '#cleanup_old_versions!' do - let(:user_keys) { { 'link_color:1' => 'blue' } } + let(:experiment_version) { "#{experiment.name}:1" } + let(:second_experiment_version) { "#{experiment.name}_another:1" } + let(:third_experiment_version) { "variation_of_#{experiment.name}:1" } + let(:user_keys) do + { + experiment_version => 'blue', + second_experiment_version => 'red', + third_experiment_version => 'yellow' + } + end + + before(:each) { @subject.cleanup_old_versions!(experiment) } it 'removes key if old experiment is found' do - @subject.cleanup_old_versions!(experiment) - expect(@subject.keys).to be_empty + expect(@subject.keys).not_to include(experiment_version) + end + + it 'does not remove other keys' do + expect(@subject.keys).to include(second_experiment_version, third_experiment_version) end end
17
Merge pull request #661 from serggl/cleanup_old_versions
3
.rb
rb
mit
splitrb/split
10069693
<NME> Gemfile <BEF> source "http://rubygems.org" gemspec gemspec gem "rubocop", require: false gem "matrix" gem "codeclimate-test-reporter" <MSG> Merge pull request #137 from cifarelli/patch-1 Use secure Rubygems URL <DFF> @@ -1,3 +1,3 @@ -source "http://rubygems.org" +source "https://rubygems.org" gemspec
1
Merge pull request #137 from cifarelli/patch-1
1
Gemfile
mit
splitrb/split
10069694
<NME> Gemfile <BEF> source "http://rubygems.org" gemspec gemspec gem "rubocop", require: false gem "matrix" gem "codeclimate-test-reporter" <MSG> Merge pull request #137 from cifarelli/patch-1 Use secure Rubygems URL <DFF> @@ -1,3 +1,3 @@ -source "http://rubygems.org" +source "https://rubygems.org" gemspec
1
Merge pull request #137 from cifarelli/patch-1
1
Gemfile
mit
splitrb/split
10069695
<NME> Gemfile <BEF> source "http://rubygems.org" gemspec gemspec gem "rubocop", require: false gem "matrix" gem "codeclimate-test-reporter" <MSG> Merge pull request #137 from cifarelli/patch-1 Use secure Rubygems URL <DFF> @@ -1,3 +1,3 @@ -source "http://rubygems.org" +source "https://rubygems.org" gemspec
1
Merge pull request #137 from cifarelli/patch-1
1
Gemfile
mit
splitrb/split
10069696
<NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do ab_test("link_color", "blue", "red") expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should increment the participation counter after assignment to a new user" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should not increment the counter for an not started experiment" do expect(Split.configuration).to receive(:start_manually).and_return(true) e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end finished('link_color', :reset => false) session[:split].should eql("link_color" => alternative_name) end end describe 'conversions' do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("red") alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1) expect(alternative).to eq("red") end it "should not allow an arbitrary alternative" do @params = { "ab_test" => { "link_color" => "pink" } } alternative = ab_test("link_color", "blue") expect(alternative).to eq("blue") end it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should allow alternative weighting interface as a single hash" do ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"]) expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt" end it "lets override existing choice" do pending "this requires user store reset on first call not depending on whelther it is current trial" @params = { "ab_test" => { "test_1" => "test-alt" } } expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control" expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" end end end it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment")).to eq "one" expect(ab_test("my_experiment") do |_, meta| meta end).to eq("Meta1") end end context "is not defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: nil } } end it "should be passed to helper block" do expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq({}) end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment") do |_, meta| meta end).to eq({}) end end end describe "ab_finished" do context "for an experiment that the user participates in" do before(:each) do @experiment_name = "link_color" @alternatives = ["blue", "red"] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count end it "should increment the counter for the completed alternative" do ab_finished(@experiment_name) new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should set experiment's finished key if reset is false" do ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should not increment the counter if reset is false and the experiment has been already finished" do 2.times { ab_finished(@experiment_name, { reset: false }) } new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" a = ab_test("button_size", "small", "big") expect(a).to eq("small") expect { ab_finished("button_size") }.not_to change { Split::Alternative.new(a, "button_size").completed_count } end it "should clear out the user's participation from their session" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) end it "should not call the method without alternative" do ab_user[@experiment.key] = nil expect(self).not_to receive(:some_method) ab_finished(@experiment_name) end end end context "for an experiment that the user is excluded from" do before do alternative = ab_test("link_color", "blue", "red") expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1) alternative = ab_test("button_size", "small", "big") expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0) end it "should not increment the completed counter" do # So, user should be participating in the link_color experiment and # receive the control for button_size. As the user is not participating in # the button size experiment, finishing it should not increase the # completion count for that alternative. expect { ab_finished("button_size") }.not_to change { Split::Alternative.new("small", "button_size").completed_count } end end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric, reset: false expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end end describe "conversions" do it "should return a conversion rate for an alternative" do alternative_name = ab_test("link_color", "blue", "red") previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(previous_convertion_rate).to eq(0.0) ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end end context "and db_failover config option is turned on" do before(:each) do Split.configure do |config| config.db_failover = true end end describe "ab_test" do it "should not raise an exception" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> Merge pull request #27 from econsultancy/26-handle-unstarted-experiments 26 handle unstarted experiments <DFF> @@ -105,6 +105,11 @@ describe Split::Helper do finished('link_color', :reset => false) session[:split].should eql("link_color" => alternative_name) end + + it "should do nothing where the experiment was not started by this user" do + session[:split] = nil + lambda { finished('some_experiment_not_started_by_the_user') }.should_not raise_exception + end end describe 'conversions' do
5
Merge pull request #27 from econsultancy/26-handle-unstarted-experiments
0
.rb
rb
mit
splitrb/split
10069697
<NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do ab_test("link_color", "blue", "red") expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should increment the participation counter after assignment to a new user" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should not increment the counter for an not started experiment" do expect(Split.configuration).to receive(:start_manually).and_return(true) e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end finished('link_color', :reset => false) session[:split].should eql("link_color" => alternative_name) end end describe 'conversions' do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("red") alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1) expect(alternative).to eq("red") end it "should not allow an arbitrary alternative" do @params = { "ab_test" => { "link_color" => "pink" } } alternative = ab_test("link_color", "blue") expect(alternative).to eq("blue") end it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should allow alternative weighting interface as a single hash" do ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"]) expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt" end it "lets override existing choice" do pending "this requires user store reset on first call not depending on whelther it is current trial" @params = { "ab_test" => { "test_1" => "test-alt" } } expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control" expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" end end end it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment")).to eq "one" expect(ab_test("my_experiment") do |_, meta| meta end).to eq("Meta1") end end context "is not defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: nil } } end it "should be passed to helper block" do expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq({}) end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment") do |_, meta| meta end).to eq({}) end end end describe "ab_finished" do context "for an experiment that the user participates in" do before(:each) do @experiment_name = "link_color" @alternatives = ["blue", "red"] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count end it "should increment the counter for the completed alternative" do ab_finished(@experiment_name) new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should set experiment's finished key if reset is false" do ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should not increment the counter if reset is false and the experiment has been already finished" do 2.times { ab_finished(@experiment_name, { reset: false }) } new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" a = ab_test("button_size", "small", "big") expect(a).to eq("small") expect { ab_finished("button_size") }.not_to change { Split::Alternative.new(a, "button_size").completed_count } end it "should clear out the user's participation from their session" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) end it "should not call the method without alternative" do ab_user[@experiment.key] = nil expect(self).not_to receive(:some_method) ab_finished(@experiment_name) end end end context "for an experiment that the user is excluded from" do before do alternative = ab_test("link_color", "blue", "red") expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1) alternative = ab_test("button_size", "small", "big") expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0) end it "should not increment the completed counter" do # So, user should be participating in the link_color experiment and # receive the control for button_size. As the user is not participating in # the button size experiment, finishing it should not increase the # completion count for that alternative. expect { ab_finished("button_size") }.not_to change { Split::Alternative.new("small", "button_size").completed_count } end end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric, reset: false expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end end describe "conversions" do it "should return a conversion rate for an alternative" do alternative_name = ab_test("link_color", "blue", "red") previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(previous_convertion_rate).to eq(0.0) ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end end context "and db_failover config option is turned on" do before(:each) do Split.configure do |config| config.db_failover = true end end describe "ab_test" do it "should not raise an exception" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> Merge pull request #27 from econsultancy/26-handle-unstarted-experiments 26 handle unstarted experiments <DFF> @@ -105,6 +105,11 @@ describe Split::Helper do finished('link_color', :reset => false) session[:split].should eql("link_color" => alternative_name) end + + it "should do nothing where the experiment was not started by this user" do + session[:split] = nil + lambda { finished('some_experiment_not_started_by_the_user') }.should_not raise_exception + end end describe 'conversions' do
5
Merge pull request #27 from econsultancy/26-handle-unstarted-experiments
0
.rb
rb
mit
splitrb/split
10069698
<NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do ab_test("link_color", "blue", "red") expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should increment the participation counter after assignment to a new user" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should not increment the counter for an not started experiment" do expect(Split.configuration).to receive(:start_manually).and_return(true) e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end finished('link_color', :reset => false) session[:split].should eql("link_color" => alternative_name) end end describe 'conversions' do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("red") alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1) expect(alternative).to eq("red") end it "should not allow an arbitrary alternative" do @params = { "ab_test" => { "link_color" => "pink" } } alternative = ab_test("link_color", "blue") expect(alternative).to eq("blue") end it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should allow alternative weighting interface as a single hash" do ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"]) expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt" end it "lets override existing choice" do pending "this requires user store reset on first call not depending on whelther it is current trial" @params = { "ab_test" => { "test_1" => "test-alt" } } expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control" expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" end end end it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment")).to eq "one" expect(ab_test("my_experiment") do |_, meta| meta end).to eq("Meta1") end end context "is not defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: nil } } end it "should be passed to helper block" do expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq({}) end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment") do |_, meta| meta end).to eq({}) end end end describe "ab_finished" do context "for an experiment that the user participates in" do before(:each) do @experiment_name = "link_color" @alternatives = ["blue", "red"] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count end it "should increment the counter for the completed alternative" do ab_finished(@experiment_name) new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should set experiment's finished key if reset is false" do ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should not increment the counter if reset is false and the experiment has been already finished" do 2.times { ab_finished(@experiment_name, { reset: false }) } new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" a = ab_test("button_size", "small", "big") expect(a).to eq("small") expect { ab_finished("button_size") }.not_to change { Split::Alternative.new(a, "button_size").completed_count } end it "should clear out the user's participation from their session" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) end it "should not call the method without alternative" do ab_user[@experiment.key] = nil expect(self).not_to receive(:some_method) ab_finished(@experiment_name) end end end context "for an experiment that the user is excluded from" do before do alternative = ab_test("link_color", "blue", "red") expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1) alternative = ab_test("button_size", "small", "big") expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0) end it "should not increment the completed counter" do # So, user should be participating in the link_color experiment and # receive the control for button_size. As the user is not participating in # the button size experiment, finishing it should not increase the # completion count for that alternative. expect { ab_finished("button_size") }.not_to change { Split::Alternative.new("small", "button_size").completed_count } end end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric, reset: false expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end end describe "conversions" do it "should return a conversion rate for an alternative" do alternative_name = ab_test("link_color", "blue", "red") previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(previous_convertion_rate).to eq(0.0) ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end end context "and db_failover config option is turned on" do before(:each) do Split.configure do |config| config.db_failover = true end end describe "ab_test" do it "should not raise an exception" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> Merge pull request #27 from econsultancy/26-handle-unstarted-experiments 26 handle unstarted experiments <DFF> @@ -105,6 +105,11 @@ describe Split::Helper do finished('link_color', :reset => false) session[:split].should eql("link_color" => alternative_name) end + + it "should do nothing where the experiment was not started by this user" do + session[:split] = nil + lambda { finished('some_experiment_not_started_by_the_user') }.should_not raise_exception + end end describe 'conversions' do
5
Merge pull request #27 from econsultancy/26-handle-unstarted-experiments
0
.rb
rb
mit
splitrb/split
10069699
<NME> alternative.rb <BEF> # frozen_string_literal: true module Split class Alternative attr_accessor :name attr_accessor :experiment_name attr_accessor :weight attr_accessor :recorded_info def initialize(name, experiment_name) @experiment_name = experiment_name if Hash === name @name = name.keys.first @weight = name.values.first else @name = name @weight = 1 end @p_winner = 0.0 end def to_s name end def goals self.experiment.goals end def p_winner(goal = nil) field = set_prob_field(goal) @p_winner = Split.redis.hget(key, field).to_f end def set_p_winner(prob, goal = nil) field = set_prob_field(goal) Split.redis.hset(key, field, prob.to_f) end def participant_count Split.redis.hget(key, "participant_count").to_i end def participant_count=(count) Split.redis.hset(key, "participant_count", count.to_i) end def completed_count(goal = nil) field = set_field(goal) Split.redis.hget(key, field).to_i end def all_completed_count if goals.empty? completed_count else goals.inject(completed_count) do |sum, g| sum + completed_count(g) end end end def unfinished_count participant_count - all_completed_count end def set_field(goal) field = "completed_count" field += ":" + goal unless goal.nil? field end def set_prob_field(goal) field = "p_winner" field += ":" + goal unless goal.nil? field end def set_completed_count(count, goal = nil) field = set_field(goal) Split.redis.hset(key, field, count.to_i) end def increment_participation Split.redis.hincrby key, "participant_count", 1 end def increment_completion(goal = nil) field = set_field(goal) Split.redis.hincrby(key, field, 1) end def control? experiment.control.name == self.name end def conversion_rate(goal = nil) return 0 if participant_count.zero? (completed_count(goal).to_f)/participant_count.to_f end def experiment Split::ExperimentCatalog.find(experiment_name) end def z_score(goal = nil) # p_a = Pa = proportion of users who converted within the experiment split (conversion rate) # p_c = Pc = proportion of users who converted within the control split (conversion rate) # n_a = Na = the number of impressions within the experiment split # n_c = Nc = the number of impressions within the control split control = experiment.control alternative = self end def validate! #TODO unless String === @name || self.class.hash_with_correct_values?(@name) raise ArgumentError, 'Alternative must be a string' end end # can't calculate zscore for P(x) > 1 return "N/A" if p_a > 1 || p_c > 1 Split::Zscore.calculate(p_a, n_a, p_c, n_c) end def extra_info data = Split.redis.hget(key, "recorded_info") if data && data.length > 1 begin JSON.parse(data) Split.redis.del(key) end def self.valid?(name) String === name || hash_with_correct_values?(name) end def self.hash_with_correct_values?(name) Hash === name && String === name.keys.first && Float(name.values.first) rescue false end private def key "#{experiment_name}:#{name}" end @recorded_info[k] ||= 0 @recorded_info[k] += value else @recorded_info[k] = value end Split.redis.hset key, "recorded_info", (@recorded_info || {}).to_json end def save Split.redis.hsetnx key, "participant_count", 0 Split.redis.hsetnx key, "completed_count", 0 Split.redis.hsetnx key, "p_winner", p_winner Split.redis.hsetnx key, "recorded_info", (@recorded_info || {}).to_json end def validate! unless String === @name || hash_with_correct_values?(@name) raise ArgumentError, "Alternative must be a string" end end def reset Split.redis.hmset key, "participant_count", 0, "completed_count", 0, "recorded_info", "" unless goals.empty? goals.each do |g| field = "completed_count:#{g}" Split.redis.hset key, field, 0 end end end def delete Split.redis.del(key) end private def hash_with_correct_values?(name) Hash === name && String === name.keys.first && Float(name.values.first) rescue false end def key "#{experiment_name}:#{name}" end end end <MSG> Refactor Alternative a little <DFF> @@ -115,8 +115,7 @@ module Split end def validate! - #TODO - unless String === @name || self.class.hash_with_correct_values?(@name) + unless String === @name || hash_with_correct_values?(@name) raise ArgumentError, 'Alternative must be a string' end end @@ -135,16 +134,12 @@ module Split Split.redis.del(key) end - def self.valid?(name) - String === name || hash_with_correct_values?(name) - end + private - def self.hash_with_correct_values?(name) + def hash_with_correct_values?(name) Hash === name && String === name.keys.first && Float(name.values.first) rescue false end - private - def key "#{experiment_name}:#{name}" end
3
Refactor Alternative a little
8
.rb
rb
mit
splitrb/split
10069700
<NME> alternative.rb <BEF> # frozen_string_literal: true module Split class Alternative attr_accessor :name attr_accessor :experiment_name attr_accessor :weight attr_accessor :recorded_info def initialize(name, experiment_name) @experiment_name = experiment_name if Hash === name @name = name.keys.first @weight = name.values.first else @name = name @weight = 1 end @p_winner = 0.0 end def to_s name end def goals self.experiment.goals end def p_winner(goal = nil) field = set_prob_field(goal) @p_winner = Split.redis.hget(key, field).to_f end def set_p_winner(prob, goal = nil) field = set_prob_field(goal) Split.redis.hset(key, field, prob.to_f) end def participant_count Split.redis.hget(key, "participant_count").to_i end def participant_count=(count) Split.redis.hset(key, "participant_count", count.to_i) end def completed_count(goal = nil) field = set_field(goal) Split.redis.hget(key, field).to_i end def all_completed_count if goals.empty? completed_count else goals.inject(completed_count) do |sum, g| sum + completed_count(g) end end end def unfinished_count participant_count - all_completed_count end def set_field(goal) field = "completed_count" field += ":" + goal unless goal.nil? field end def set_prob_field(goal) field = "p_winner" field += ":" + goal unless goal.nil? field end def set_completed_count(count, goal = nil) field = set_field(goal) Split.redis.hset(key, field, count.to_i) end def increment_participation Split.redis.hincrby key, "participant_count", 1 end def increment_completion(goal = nil) field = set_field(goal) Split.redis.hincrby(key, field, 1) end def control? experiment.control.name == self.name end def conversion_rate(goal = nil) return 0 if participant_count.zero? (completed_count(goal).to_f)/participant_count.to_f end def experiment Split::ExperimentCatalog.find(experiment_name) end def z_score(goal = nil) # p_a = Pa = proportion of users who converted within the experiment split (conversion rate) # p_c = Pc = proportion of users who converted within the control split (conversion rate) # n_a = Na = the number of impressions within the experiment split # n_c = Nc = the number of impressions within the control split control = experiment.control alternative = self end def validate! #TODO unless String === @name || self.class.hash_with_correct_values?(@name) raise ArgumentError, 'Alternative must be a string' end end # can't calculate zscore for P(x) > 1 return "N/A" if p_a > 1 || p_c > 1 Split::Zscore.calculate(p_a, n_a, p_c, n_c) end def extra_info data = Split.redis.hget(key, "recorded_info") if data && data.length > 1 begin JSON.parse(data) Split.redis.del(key) end def self.valid?(name) String === name || hash_with_correct_values?(name) end def self.hash_with_correct_values?(name) Hash === name && String === name.keys.first && Float(name.values.first) rescue false end private def key "#{experiment_name}:#{name}" end @recorded_info[k] ||= 0 @recorded_info[k] += value else @recorded_info[k] = value end Split.redis.hset key, "recorded_info", (@recorded_info || {}).to_json end def save Split.redis.hsetnx key, "participant_count", 0 Split.redis.hsetnx key, "completed_count", 0 Split.redis.hsetnx key, "p_winner", p_winner Split.redis.hsetnx key, "recorded_info", (@recorded_info || {}).to_json end def validate! unless String === @name || hash_with_correct_values?(@name) raise ArgumentError, "Alternative must be a string" end end def reset Split.redis.hmset key, "participant_count", 0, "completed_count", 0, "recorded_info", "" unless goals.empty? goals.each do |g| field = "completed_count:#{g}" Split.redis.hset key, field, 0 end end end def delete Split.redis.del(key) end private def hash_with_correct_values?(name) Hash === name && String === name.keys.first && Float(name.values.first) rescue false end def key "#{experiment_name}:#{name}" end end end <MSG> Refactor Alternative a little <DFF> @@ -115,8 +115,7 @@ module Split end def validate! - #TODO - unless String === @name || self.class.hash_with_correct_values?(@name) + unless String === @name || hash_with_correct_values?(@name) raise ArgumentError, 'Alternative must be a string' end end @@ -135,16 +134,12 @@ module Split Split.redis.del(key) end - def self.valid?(name) - String === name || hash_with_correct_values?(name) - end + private - def self.hash_with_correct_values?(name) + def hash_with_correct_values?(name) Hash === name && String === name.keys.first && Float(name.values.first) rescue false end - private - def key "#{experiment_name}:#{name}" end
3
Refactor Alternative a little
8
.rb
rb
mit
splitrb/split
10069701
<NME> alternative.rb <BEF> # frozen_string_literal: true module Split class Alternative attr_accessor :name attr_accessor :experiment_name attr_accessor :weight attr_accessor :recorded_info def initialize(name, experiment_name) @experiment_name = experiment_name if Hash === name @name = name.keys.first @weight = name.values.first else @name = name @weight = 1 end @p_winner = 0.0 end def to_s name end def goals self.experiment.goals end def p_winner(goal = nil) field = set_prob_field(goal) @p_winner = Split.redis.hget(key, field).to_f end def set_p_winner(prob, goal = nil) field = set_prob_field(goal) Split.redis.hset(key, field, prob.to_f) end def participant_count Split.redis.hget(key, "participant_count").to_i end def participant_count=(count) Split.redis.hset(key, "participant_count", count.to_i) end def completed_count(goal = nil) field = set_field(goal) Split.redis.hget(key, field).to_i end def all_completed_count if goals.empty? completed_count else goals.inject(completed_count) do |sum, g| sum + completed_count(g) end end end def unfinished_count participant_count - all_completed_count end def set_field(goal) field = "completed_count" field += ":" + goal unless goal.nil? field end def set_prob_field(goal) field = "p_winner" field += ":" + goal unless goal.nil? field end def set_completed_count(count, goal = nil) field = set_field(goal) Split.redis.hset(key, field, count.to_i) end def increment_participation Split.redis.hincrby key, "participant_count", 1 end def increment_completion(goal = nil) field = set_field(goal) Split.redis.hincrby(key, field, 1) end def control? experiment.control.name == self.name end def conversion_rate(goal = nil) return 0 if participant_count.zero? (completed_count(goal).to_f)/participant_count.to_f end def experiment Split::ExperimentCatalog.find(experiment_name) end def z_score(goal = nil) # p_a = Pa = proportion of users who converted within the experiment split (conversion rate) # p_c = Pc = proportion of users who converted within the control split (conversion rate) # n_a = Na = the number of impressions within the experiment split # n_c = Nc = the number of impressions within the control split control = experiment.control alternative = self end def validate! #TODO unless String === @name || self.class.hash_with_correct_values?(@name) raise ArgumentError, 'Alternative must be a string' end end # can't calculate zscore for P(x) > 1 return "N/A" if p_a > 1 || p_c > 1 Split::Zscore.calculate(p_a, n_a, p_c, n_c) end def extra_info data = Split.redis.hget(key, "recorded_info") if data && data.length > 1 begin JSON.parse(data) Split.redis.del(key) end def self.valid?(name) String === name || hash_with_correct_values?(name) end def self.hash_with_correct_values?(name) Hash === name && String === name.keys.first && Float(name.values.first) rescue false end private def key "#{experiment_name}:#{name}" end @recorded_info[k] ||= 0 @recorded_info[k] += value else @recorded_info[k] = value end Split.redis.hset key, "recorded_info", (@recorded_info || {}).to_json end def save Split.redis.hsetnx key, "participant_count", 0 Split.redis.hsetnx key, "completed_count", 0 Split.redis.hsetnx key, "p_winner", p_winner Split.redis.hsetnx key, "recorded_info", (@recorded_info || {}).to_json end def validate! unless String === @name || hash_with_correct_values?(@name) raise ArgumentError, "Alternative must be a string" end end def reset Split.redis.hmset key, "participant_count", 0, "completed_count", 0, "recorded_info", "" unless goals.empty? goals.each do |g| field = "completed_count:#{g}" Split.redis.hset key, field, 0 end end end def delete Split.redis.del(key) end private def hash_with_correct_values?(name) Hash === name && String === name.keys.first && Float(name.values.first) rescue false end def key "#{experiment_name}:#{name}" end end end <MSG> Refactor Alternative a little <DFF> @@ -115,8 +115,7 @@ module Split end def validate! - #TODO - unless String === @name || self.class.hash_with_correct_values?(@name) + unless String === @name || hash_with_correct_values?(@name) raise ArgumentError, 'Alternative must be a string' end end @@ -135,16 +134,12 @@ module Split Split.redis.del(key) end - def self.valid?(name) - String === name || hash_with_correct_values?(name) - end + private - def self.hash_with_correct_values?(name) + def hash_with_correct_values?(name) Hash === name && String === name.keys.first && Float(name.values.first) rescue false end - private - def key "#{experiment_name}:#{name}" end
3
Refactor Alternative a little
8
.rb
rb
mit
splitrb/split
10069702
<NME> alternative.rb <BEF> ADDFILE <MSG> pseudo alternative class <DFF> @@ -0,0 +1,17 @@ +module Multivariate + class Alternative + attr_accessor :name + + def initialize(name) + @name = name + end + + def participant_count + # number of users who have been given this alternative + end + + def completed_count + # number of users who have finished an experiment with this alternative + end + end +end \ No newline at end of file
17
pseudo alternative class
0
.rb
rb
mit
splitrb/split
10069703
<NME> alternative.rb <BEF> ADDFILE <MSG> pseudo alternative class <DFF> @@ -0,0 +1,17 @@ +module Multivariate + class Alternative + attr_accessor :name + + def initialize(name) + @name = name + end + + def participant_count + # number of users who have been given this alternative + end + + def completed_count + # number of users who have finished an experiment with this alternative + end + end +end \ No newline at end of file
17
pseudo alternative class
0
.rb
rb
mit
splitrb/split
10069704
<NME> alternative.rb <BEF> ADDFILE <MSG> pseudo alternative class <DFF> @@ -0,0 +1,17 @@ +module Multivariate + class Alternative + attr_accessor :name + + def initialize(name) + @name = name + end + + def participant_count + # number of users who have been given this alternative + end + + def completed_count + # number of users who have finished an experiment with this alternative + end + end +end \ No newline at end of file
17
pseudo alternative class
0
.rb
rb
mit
splitrb/split
10069705
<NME> split.gemspec <BEF> # -*- encoding: utf-8 -*- # frozen_string_literal: true $:.push File.expand_path("../lib", __FILE__) require "split/version" Gem::Specification.new do |s| s.name = "split" s.version = Split::VERSION s.platform = Gem::Platform::RUBY s.authors = ["Andrew Nesbitt"] s.licenses = ["MIT"] s.email = ["[email protected]"] s.homepage = "https://github.com/splitrb/split" s.summary = "Rack based split testing framework" s.metadata = { "homepage_uri" => "https://github.com/splitrb/split", "changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md", "source_code_uri" => "https://github.com/splitrb/split", "bug_tracker_uri" => "https://github.com/splitrb/split/issues", s.add_dependency 'redis-namespace', '~> 1.0.3' s.add_dependency 'sinatra', '~> 1.2.6' s.add_development_dependency 'rspec', '~> 2.6' s.add_development_dependency 'rack-test', '~> 0.6' end s.files = `git ls-files`.split("\n") s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") s.require_paths = ["lib"] s.add_dependency "redis", ">= 4.2" s.add_dependency "sinatra", ">= 1.2.6" s.add_dependency "rubystats", ">= 0.3.0" s.add_development_dependency "bundler", ">= 1.17" s.add_development_dependency "simplecov", "~> 0.15" s.add_development_dependency "rack-test", "~> 2.0" s.add_development_dependency "rake", "~> 13" s.add_development_dependency "rspec", "~> 3.7" s.add_development_dependency "pry", "~> 0.10" s.add_development_dependency "rails", ">= 5.0" end <MSG> Added bundler as a development dependency <DFF> @@ -22,6 +22,7 @@ Gem::Specification.new do |s| s.add_dependency 'redis-namespace', '~> 1.0.3' s.add_dependency 'sinatra', '~> 1.2.6' - s.add_development_dependency 'rspec', '~> 2.6' - s.add_development_dependency 'rack-test', '~> 0.6' + s.add_development_dependency 'bundler', '~> 1.0' + s.add_development_dependency 'rspec', '~> 2.6' + s.add_development_dependency 'rack-test', '~> 0.6' end
3
Added bundler as a development dependency
2
.gemspec
gemspec
mit
splitrb/split
10069706
<NME> split.gemspec <BEF> # -*- encoding: utf-8 -*- # frozen_string_literal: true $:.push File.expand_path("../lib", __FILE__) require "split/version" Gem::Specification.new do |s| s.name = "split" s.version = Split::VERSION s.platform = Gem::Platform::RUBY s.authors = ["Andrew Nesbitt"] s.licenses = ["MIT"] s.email = ["[email protected]"] s.homepage = "https://github.com/splitrb/split" s.summary = "Rack based split testing framework" s.metadata = { "homepage_uri" => "https://github.com/splitrb/split", "changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md", "source_code_uri" => "https://github.com/splitrb/split", "bug_tracker_uri" => "https://github.com/splitrb/split/issues", s.add_dependency 'redis-namespace', '~> 1.0.3' s.add_dependency 'sinatra', '~> 1.2.6' s.add_development_dependency 'rspec', '~> 2.6' s.add_development_dependency 'rack-test', '~> 0.6' end s.files = `git ls-files`.split("\n") s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") s.require_paths = ["lib"] s.add_dependency "redis", ">= 4.2" s.add_dependency "sinatra", ">= 1.2.6" s.add_dependency "rubystats", ">= 0.3.0" s.add_development_dependency "bundler", ">= 1.17" s.add_development_dependency "simplecov", "~> 0.15" s.add_development_dependency "rack-test", "~> 2.0" s.add_development_dependency "rake", "~> 13" s.add_development_dependency "rspec", "~> 3.7" s.add_development_dependency "pry", "~> 0.10" s.add_development_dependency "rails", ">= 5.0" end <MSG> Added bundler as a development dependency <DFF> @@ -22,6 +22,7 @@ Gem::Specification.new do |s| s.add_dependency 'redis-namespace', '~> 1.0.3' s.add_dependency 'sinatra', '~> 1.2.6' - s.add_development_dependency 'rspec', '~> 2.6' - s.add_development_dependency 'rack-test', '~> 0.6' + s.add_development_dependency 'bundler', '~> 1.0' + s.add_development_dependency 'rspec', '~> 2.6' + s.add_development_dependency 'rack-test', '~> 0.6' end
3
Added bundler as a development dependency
2
.gemspec
gemspec
mit
splitrb/split
10069707
<NME> split.gemspec <BEF> # -*- encoding: utf-8 -*- # frozen_string_literal: true $:.push File.expand_path("../lib", __FILE__) require "split/version" Gem::Specification.new do |s| s.name = "split" s.version = Split::VERSION s.platform = Gem::Platform::RUBY s.authors = ["Andrew Nesbitt"] s.licenses = ["MIT"] s.email = ["[email protected]"] s.homepage = "https://github.com/splitrb/split" s.summary = "Rack based split testing framework" s.metadata = { "homepage_uri" => "https://github.com/splitrb/split", "changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md", "source_code_uri" => "https://github.com/splitrb/split", "bug_tracker_uri" => "https://github.com/splitrb/split/issues", s.add_dependency 'redis-namespace', '~> 1.0.3' s.add_dependency 'sinatra', '~> 1.2.6' s.add_development_dependency 'rspec', '~> 2.6' s.add_development_dependency 'rack-test', '~> 0.6' end s.files = `git ls-files`.split("\n") s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") s.require_paths = ["lib"] s.add_dependency "redis", ">= 4.2" s.add_dependency "sinatra", ">= 1.2.6" s.add_dependency "rubystats", ">= 0.3.0" s.add_development_dependency "bundler", ">= 1.17" s.add_development_dependency "simplecov", "~> 0.15" s.add_development_dependency "rack-test", "~> 2.0" s.add_development_dependency "rake", "~> 13" s.add_development_dependency "rspec", "~> 3.7" s.add_development_dependency "pry", "~> 0.10" s.add_development_dependency "rails", ">= 5.0" end <MSG> Added bundler as a development dependency <DFF> @@ -22,6 +22,7 @@ Gem::Specification.new do |s| s.add_dependency 'redis-namespace', '~> 1.0.3' s.add_dependency 'sinatra', '~> 1.2.6' - s.add_development_dependency 'rspec', '~> 2.6' - s.add_development_dependency 'rack-test', '~> 0.6' + s.add_development_dependency 'bundler', '~> 1.0' + s.add_development_dependency 'rspec', '~> 2.6' + s.add_development_dependency 'rack-test', '~> 0.6' end
3
Added bundler as a development dependency
2
.gemspec
gemspec
mit
splitrb/split
10069708
<NME> transducers.js <BEF> // basic protocol helpers var symbolExists = typeof Symbol !== 'undefined'; var protocols = { iterator: symbolExists ? Symbol.iterator : '@@iterator' }; function throwProtocolError(name, coll) { throw new Error("don't know how to " + name + " collection: " + coll); return names.reduce(function(result, name) { if(symbolExists) { if(name === 'iterator') { // Accept ill-formed iterators that don'y conform to the // protocol by accepting just next() return result && (obj[Symbol.iterator] || obj.next); } } return obj[protocols[name]]; } function getProtocolProperty(obj, name) { return obj[protocols[name]]; } function iterator(coll) { var iter = getProtocolProperty(coll, 'iterator'); if(iter) { return iter.call(coll); } else if(coll.next) { // Basic duck typing to accept an ill-formed iterator that doesn't // conform to the iterator protocol (all iterators should have the // @@iterator method and return themselves, but some engines don't // have that on generators like older v8) return coll; } else if(isArray(coll)) { return new ArrayIterator(coll); } else if(isObject(coll)) { return new ObjectIterator(coll); } } function ArrayIterator(arr) { this.arr = arr; this.index = 0; } ArrayIterator.prototype.next = function() { if(this.index < this.arr.length) { return { value: this.arr[this.index++], done: false }; } return { done: true } function isFunction(x) { typeof x === 'function'; } function isObject(x) { } ObjectIterator.prototype.next = function() { if(this.index < this.keys.length) { var k = this.keys[this.index++]; return { value: [k, this.obj[k]], done: false }; } return { done: true } }; // helpers var toString = Object.prototype.toString; var isArray = typeof Array.isArray === 'function' ? Array.isArray : function(obj) { return toString.call(obj) == '[object Array]'; }; function isFunction(x) { return typeof x === 'function'; } function isObject(x) { return x instanceof Object && Object.getPrototypeOf(x) === Object.getPrototypeOf({}); } function isNumber(x) { return typeof x === 'number'; } function Reduced(value) { this['@@transducer/reduced'] = true; this['@@transducer/value'] = value; } function isReduced(x) { return (x instanceof Reduced) || (x && x['@@transducer/reduced']); } function deref(x) { return x['@@transducer/value']; } /** * This is for transforms that may call their nested transforms before * Reduced-wrapping the result (e.g. "take"), to avoid nested Reduced. */ function ensureReduced(val) { if(isReduced(val)) { return val; } else { return new Reduced(val); } } /** * This is for tranforms that call their nested transforms when * performing completion (like "partition"), to avoid signaling * termination after already completing. */ function ensureUnreduced(v) { if(isReduced(v)) { return deref(v); } else { return v; } } function reduce(coll, xform, init) { if(isArray(coll)) { var result = init; var index = -1; var len = coll.length; while(++index < len) { result = xform['@@transducer/step'](result, coll[index]); if(isReduced(result)) { result = deref(result); break; } } return xform['@@transducer/result'](result); } else if(isObject(coll) || fulfillsProtocol(coll, 'iterator')) { var result = init; var iter = iterator(coll); var val = iter.next(); while(!val.done) { result = xform['@@transducer/step'](result, val.value); if(isReduced(result)) { result = deref(result); break; } val = iter.next(); } return xform['@@transducer/result'](result); } throwProtocolError('iterate', coll); } function transduce(coll, xform, reducer, init) { xform = xform(reducer); if(init === undefined) { init = xform['@@transducer/init'](); } return reduce(coll, xform, init); } function compose() { var funcs = Array.prototype.slice.call(arguments); return function(r) { var value = r; for(var i=funcs.length-1; i>=0; i--) { value = funcs[i](value); } return value; } } // transformations function transformer(f) { var t = {}; t['@@transducer/init'] = function() { throw new Error('init value unavailable'); }; t['@@transducer/result'] = function(v) { return v; }; t['@@transducer/step'] = f; return t; } function bound(f, ctx, count) { count = count != null ? count : 1; if(!ctx) { return f; } else { switch(count) { case 1: return function(x) { return f.call(ctx, x); } case 2: return function(x, y) { return f.call(ctx, x, y); } default: return f.bind(ctx); } } } function arrayMap(arr, f, ctx) { var index = -1; var length = arr.length; var result = Array(length); f = bound(f, ctx, 2); while (++index < length) { result[index] = f(arr[index], index); } return result; } function arrayFilter(arr, f, ctx) { var len = arr.length; var result = []; f = bound(f, ctx, 2); for(var i=0; i<len; i++) { if(f(arr[i], i)) { result.push(arr[i]); } } return result; } function Map(f, xform) { this.xform = xform; this.f = f; } Map.prototype['@@transducer/init'] = function() { return this.xform['@@transducer/init'](); }; Map.prototype['@@transducer/result'] = function(v) { return this.xform['@@transducer/result'](v); }; Map.prototype['@@transducer/step'] = function(res, input) { return this.xform['@@transducer/step'](res, this.f(input)); }; function map(coll, f, ctx) { if(isFunction(coll)) { ctx = f; f = coll; coll = null; } f = bound(f, ctx); if(coll) { if(isArray(coll)) { return arrayMap(coll, f, ctx); } return seq(coll, map(f)); } return function(xform) { return new Map(f, xform); } } function Filter(f, xform) { this.xform = xform; this.f = f; } Filter.prototype['@@transducer/init'] = function() { return this.xform['@@transducer/init'](); }; Filter.prototype['@@transducer/result'] = function(v) { return this.xform['@@transducer/result'](v); }; Filter.prototype['@@transducer/step'] = function(res, input) { if(this.f(input)) { return this.xform['@@transducer/step'](res, input); } return res; }; function filter(coll, f, ctx) { if(isFunction(coll)) { ctx = f; f = coll; coll = null; } f = bound(f, ctx); if(coll) { if(isArray(coll)) { return arrayFilter(coll, f, ctx); } return seq(coll, filter(f)); } return function(xform) { return new Filter(f, xform); }; } function remove(coll, f, ctx) { if(isFunction(coll)) { ctx = f; f = coll; coll = null; } f = bound(f, ctx); return filter(coll, function(x) { return !f(x); }); } function keep(coll) { return filter(coll, function(x) { return x != null }); } function Dedupe(xform) { this.xform = xform; this.last = undefined; } Dedupe.prototype['@@transducer/init'] = function() { return this.xform['@@transducer/init'](); }; Dedupe.prototype['@@transducer/result'] = function(v) { return this.xform['@@transducer/result'](v); }; Dedupe.prototype['@@transducer/step'] = function(result, input) { if(input !== this.last) { this.last = input; return this.xform['@@transducer/step'](result, input); } return result; }; function dedupe(coll) { if(coll) { return seq(coll, dedupe()); } return function(xform) { return new Dedupe(xform); } } function TakeWhile(f, xform) { this.xform = xform; this.f = f; } TakeWhile.prototype['@@transducer/init'] = function() { return this.xform['@@transducer/init'](); }; TakeWhile.prototype['@@transducer/result'] = function(v) { return this.xform['@@transducer/result'](v); }; TakeWhile.prototype['@@transducer/step'] = function(result, input) { if(this.f(input)) { return this.xform['@@transducer/step'](result, input); } return new Reduced(result); }; function takeWhile(coll, f, ctx) { if(isFunction(coll)) { ctx = f; f = coll; coll = null; } f = bound(f, ctx); if(coll) { return seq(coll, takeWhile(f)); } return function(xform) { return new TakeWhile(f, xform); } } function Take(n, xform) { this.n = n; this.i = 0; this.xform = xform; } Take.prototype['@@transducer/init'] = function() { return this.xform['@@transducer/init'](); }; Take.prototype['@@transducer/result'] = function(v) { return this.xform['@@transducer/result'](v); }; Take.prototype['@@transducer/step'] = function(result, input) { if (this.i < this.n) { result = this.xform['@@transducer/step'](result, input); if(this.i + 1 >= this.n) { // Finish reducing on the same step as the final value. TODO: // double-check that this doesn't break any semantics result = ensureReduced(result); } } this.i++; return result; }; function take(coll, n) { if(isNumber(coll)) { n = coll; coll = null } if(coll) { return seq(coll, take(n)); } return function(xform) { return new Take(n, xform); } } function Drop(n, xform) { this.n = n; this.i = 0; this.xform = xform; } Drop.prototype['@@transducer/init'] = function() { return this.xform['@@transducer/init'](); }; Drop.prototype['@@transducer/result'] = function(v) { return this.xform['@@transducer/result'](v); }; Drop.prototype['@@transducer/step'] = function(result, input) { if(this.i++ < this.n) { return result; } return this.xform['@@transducer/step'](result, input); }; function drop(coll, n) { if(isNumber(coll)) { n = coll; coll = null } if(coll) { return seq(coll, drop(n)); } return function(xform) { return new Drop(n, xform); } } function DropWhile(f, xform) { this.xform = xform; this.f = f; this.dropping = true; } DropWhile.prototype['@@transducer/init'] = function() { return this.xform['@@transducer/init'](); }; DropWhile.prototype['@@transducer/result'] = function(v) { return this.xform['@@transducer/result'](v); }; DropWhile.prototype['@@transducer/step'] = function(result, input) { if(this.dropping) { if(this.f(input)) { return result; } else { this.dropping = false; } } return this.xform['@@transducer/step'](result, input); }; function dropWhile(coll, f, ctx) { if(isFunction(coll)) { ctx = f; f = coll; coll = null; } f = bound(f, ctx); if(coll) { return seq(coll, dropWhile(f)); } return function(xform) { return new DropWhile(f, xform); } } function Partition(n, xform) { this.n = n; this.i = 0; this.xform = xform; this.part = new Array(n); } Partition.prototype['@@transducer/init'] = function() { return this.xform['@@transducer/init'](); }; Partition.prototype['@@transducer/result'] = function(v) { if (this.i > 0) { return ensureUnreduced(this.xform['@@transducer/step'](v, this.part.slice(0, this.i))); } return this.xform['@@transducer/result'](v); }; Partition.prototype['@@transducer/step'] = function(result, input) { this.part[this.i] = input; this.i += 1; if (this.i === this.n) { var out = this.part.slice(0, this.n); this.part = new Array(this.n); this.i = 0; return this.xform['@@transducer/step'](result, out); } return result; }; function partition(coll, n) { if (isNumber(coll)) { n = coll; coll = null; } if (coll) { return seq(coll, partition(n)); } return function(xform) { return new Partition(n, xform); }; } var NOTHING = {}; function PartitionBy(f, xform) { // TODO: take an "opts" object that allows the user to specify // equality this.f = f; this.xform = xform; this.part = []; this.last = NOTHING; } PartitionBy.prototype['@@transducer/init'] = function() { return this.xform['@@transducer/init'](); }; PartitionBy.prototype['@@transducer/result'] = function(v) { var l = this.part.length; if (l > 0) { return ensureUnreduced(this.xform['@@transducer/step'](v, this.part.slice(0, l))); } return this.xform['@@transducer/result'](v); }; PartitionBy.prototype['@@transducer/step'] = function(result, input) { var current = this.f(input); if (current === this.last || this.last === NOTHING) { this.part.push(input); } else { result = this.xform['@@transducer/step'](result, this.part); this.part = [input]; } this.last = current; return result; }; function partitionBy(coll, f, ctx) { if (isFunction(coll)) { ctx = f; f = coll; coll = null; } f = bound(f, ctx); if (coll) { return seq(coll, partitionBy(f)); } return function(xform) { return new PartitionBy(f, xform); }; } function Interpose(sep, xform) { this.sep = sep; this.xform = xform; this.started = false; } Interpose.prototype['@@transducer/init'] = function() { return this.xform['@@transducer/init'](); }; Interpose.prototype['@@transducer/result'] = function(v) { return this.xform['@@transducer/result'](v); }; Interpose.prototype['@@transducer/step'] = function(result, input) { if (this.started) { var withSep = this.xform['@@transducer/step'](result, this.sep); if (isReduced(withSep)) { return withSep; } else { return this.xform['@@transducer/step'](withSep, input); } } else { this.started = true; return this.xform['@@transducer/step'](result, input); } }; /** * Returns a new collection containing elements of the given * collection, separated by the specified separator. Returns a * transducer if a collection is not provided. */ function interpose(coll, separator) { if (arguments.length === 1) { separator = coll; return function(xform) { return new Interpose(separator, xform); }; } return seq(coll, interpose(separator)); } function Repeat(n, xform) { this.xform = xform; this.n = n; } Repeat.prototype['@@transducer/init'] = function() { return this.xform['@@transducer/init'](); }; Repeat.prototype['@@transducer/result'] = function(v) { return this.xform['@@transducer/result'](v); }; Repeat.prototype['@@transducer/step'] = function(result, input) { var n = this.n; var r = result; for (var i = 0; i < n; i++) { r = this.xform['@@transducer/step'](r, input); if (isReduced(r)) { break; } } return r; }; /** * Returns a new collection containing elements of the given * collection, each repeated n times. Returns a transducer if a * collection is not provided. */ function repeat(coll, n) { if (arguments.length === 1) { n = coll; return function(xform) { return new Repeat(n, xform); }; } return seq(coll, repeat(n)); } function TakeNth(n, xform) { this.xform = xform; this.n = n; this.i = -1; } TakeNth.prototype['@@transducer/init'] = function() { return this.xform['@@transducer/init'](); }; TakeNth.prototype['@@transducer/result'] = function(v) { return this.xform['@@transducer/result'](v); }; TakeNth.prototype['@@transducer/step'] = function(result, input) { this.i += 1; if (this.i % this.n === 0) { return this.xform['@@transducer/step'](result, input); } return result; }; /** * Returns a new collection of every nth element of the given * collection. Returns a transducer if a collection is not provided. */ function takeNth(coll, nth) { if (arguments.length === 1) { nth = coll; return function(xform) { return new TakeNth(nth, xform); }; } return seq(coll, takeNth(nth)); } // pure transducers (cannot take collections) function Cat(xform) { this.xform = xform; } Cat.prototype['@@transducer/init'] = function() { return this.xform['@@transducer/init'](); }; Cat.prototype['@@transducer/result'] = function(v) { return this.xform['@@transducer/result'](v); }; Cat.prototype['@@transducer/step'] = function(result, input) { var xform = this.xform; var newxform = {}; newxform['@@transducer/init'] = function() { return xform['@@transducer/init'](); }; newxform['@@transducer/result'] = function(v) { return v; }; newxform['@@transducer/step'] = function(result, input) { var val = xform['@@transducer/step'](result, input); return isReduced(val) ? deref(val) : val; }; return reduce(input, newxform, result); }; function cat(xform) { return new Cat(xform); } function mapcat(f, ctx) { f = bound(f, ctx); return compose(map(f), cat); } // collection helpers function push(arr, x) { arr.push(x); return arr; } function merge(obj, x) { if(isArray(x) && x.length === 2) { obj[x[0]] = x[1]; } else { var keys = Object.keys(x); var len = keys.length; for(var i=0; i<len; i++) { obj[keys[i]] = x[keys[i]]; } } return obj; } var arrayReducer = {}; arrayReducer['@@transducer/init'] = function() { return []; }; arrayReducer['@@transducer/result'] = function(v) { return v; }; arrayReducer['@@transducer/step'] = push; var objReducer = {}; objReducer['@@transducer/init'] = function() { return {}; }; objReducer['@@transducer/result'] = function(v) { return v; }; objReducer['@@transducer/step'] = merge; // building new collections function toArray(coll, xform) { if(!xform) { return reduce(coll, arrayReducer, []); } return transduce(coll, xform, arrayReducer, []); } function toObj(coll, xform) { if(!xform) { return reduce(coll, objReducer, {}); } return transduce(coll, xform, objReducer, {}); } function toIter(coll, xform) { if(!xform) { return iterator(coll); } return new LazyTransformer(xform, coll); } function seq(coll, xform) { if(isArray(coll)) { return transduce(coll, xform, arrayReducer, []); } else if(isObject(coll)) { return transduce(coll, xform, objReducer, {}); } else if(coll['@@transducer/step']) { var init; if(coll['@@transducer/init']) { init = coll['@@transducer/init'](); } else { init = new coll.constructor(); } return transduce(coll, xform, coll, init); } else if(fulfillsProtocol(coll, 'iterator')) { return new LazyTransformer(xform, coll); } throwProtocolError('sequence', coll); } function into(to, xform, from) { if(isArray(to)) { return transduce(from, xform, arrayReducer, to); } else if(isObject(to)) { return transduce(from, xform, objReducer, to); } else if(to['@@transducer/step']) { return transduce(from, xform, to, to); } throwProtocolError('into', to); } // laziness var stepper = {}; stepper['@@transducer/result'] = function(v) { return isReduced(v) ? deref(v) : v; }; stepper['@@transducer/step'] = function(lt, x) { lt.items.push(x); return lt.rest; }; function Stepper(xform, iter) { this.xform = xform(stepper); this.iter = iter; } Stepper.prototype['@@transducer/step'] = function(lt) { var len = lt.items.length; while(lt.items.length === len) { var n = this.iter.next(); if(n.done || isReduced(n.value)) { // finalize this.xform['@@transducer/result'](this); break; } // step this.xform['@@transducer/step'](lt, n.value); } } function LazyTransformer(xform, coll) { this.iter = iterator(coll); this.items = []; this.stepper = new Stepper(xform, iterator(coll)); } LazyTransformer.prototype[protocols.iterator] = function() { return this; } LazyTransformer.prototype.next = function() { this['@@transducer/step'](); if(this.items.length) { return { value: this.items.pop(), done: false } } else { return { done: true }; } }; LazyTransformer.prototype['@@transducer/step'] = function() { if(!this.items.length) { this.stepper['@@transducer/step'](this); } } // util function range(n) { var arr = new Array(n); for(var i=0; i<arr.length; i++) { arr[i] = i; } return arr; } module.exports = { reduce: reduce, transformer: transformer, Reduced: Reduced, isReduced: isReduced, iterator: iterator, push: push, merge: merge, transduce: transduce, seq: seq, toArray: toArray, toObj: toObj, toIter: toIter, into: into, compose: compose, map: map, filter: filter, remove: remove, cat: cat, mapcat: mapcat, keep: keep, dedupe: dedupe, take: take, takeWhile: takeWhile, takeNth: takeNth, drop: drop, dropWhile: dropWhile, partition: partition, partitionBy: partitionBy, interpose: interpose, repeat: repeat, range: range, LazyTransformer: LazyTransformer }; <MSG> isFunction should return the boolean value Typo? <DFF> @@ -13,7 +13,7 @@ function fullfillsProtocols(obj /* names ... */) { return names.reduce(function(result, name) { if(symbolExists) { if(name === 'iterator') { - // Accept ill-formed iterators that don'y conform to the + // Accept ill-formed iterators that don't conform to the // protocol by accepting just next() return result && (obj[Symbol.iterator] || obj.next); } @@ -63,7 +63,7 @@ function isArray(x) { } function isFunction(x) { - typeof x === 'function'; + return typeof x === 'function'; } function isObject(x) {
2
isFunction should return the boolean value
2
.js
js
bsd-2-clause
jlongster/transducers.js
10069709
<NME> forms.py <BEF> import os from django import forms from django.conf import settings from djangopypi.models import Project, Classifier, Release from django.utils.translation import ugettext_lazy as _ class ProjectForm(forms.ModelForm): class Meta: model = Project exclude = ['owner', 'classifiers'] class ReleaseForm(forms.ModelForm): class Meta: model = Release exclude = ['project'] # filename, however with .tar.gz files django does the "wrong" thing # and saves it as project-0.1.2.tar_.gz. So remove it before # django sees anything. try: previous_entry = Release.objects.get(version=version, platform=platform, project=project) except Release.DoesNotExist: pass release, created = Release.objects.get_or_create(version=version, platform=platform, project=project) if file: if not created: if os.path.exists(release.distribution.path): os.remove(release.distribution.path) release.distribution.save(file.name, file, save=True) release.save() <MSG> have to delete previous file before creating the new release. <DFF> @@ -62,18 +62,19 @@ class ProjectRegisterForm(forms.Form): # filename, however with .tar.gz files django does the "wrong" thing # and saves it as project-0.1.2.tar_.gz. So remove it before # django sees anything. - try: - previous_entry = Release.objects.get(version=version, - platform=platform, project=project) - except Release.DoesNotExist: - pass + if file: + try: + previous_entry = Release.objects.get(version=version, + platform=platform, project=project) + if os.path.exists(previous_entry.distribution.path): + os.remove(previous_entry.distribution.path) + except Release.DoesNotExist: + pass + release, created = Release.objects.get_or_create(version=version, platform=platform, project=project) if file: - if not created: - if os.path.exists(release.distribution.path): - os.remove(release.distribution.path) release.distribution.save(file.name, file, save=True) release.save()
9
have to delete previous file before creating the new release.
8
.py
py
bsd-3-clause
ask/chishop
10069710
<NME> redis_adapter.rb <BEF> # frozen_string_literal: true module Split module Persistence class RedisAdapter DEFAULT_CONFIG = { namespace: "persistence" }.freeze attr_reader :redis_key def initialize(context, key = nil) if key @redis_key = "#{self.class.config[:namespace]}:#{key}" elsif lookup_by = self.class.config[:lookup_by] if lookup_by.respond_to?(:call) key_frag = lookup_by.call(context) else key_frag = context.send(lookup_by) end @redis_key = "#{self.class.config[:namespace]}:#{key_frag}" else raise "Please configure lookup_by" end end def [](field) Split.redis.hget(redis_key, field) def []=(field, value) Split.redis.hset(redis_key, field, value) end def delete(field) def delete(field) Split.redis.hdel(redis_key, field) end def keys Split.redis.hkeys(redis_key) end def self.find(user_id) new(nil, user_id) end def self.with_config(options = {}) self.config.merge!(options) self end def self.config @config ||= DEFAULT_CONFIG.dup end def self.reset_config! @config = DEFAULT_CONFIG.dup end end end end <MSG> Add `expire_seconds:` TTL option to RedisAdapter <DFF> @@ -27,6 +27,8 @@ module Split def []=(field, value) Split.redis.hset(redis_key, field, value) + expire_seconds = self.class.config[:expire_seconds] + Split.redis.expire(redis_key, expire_seconds) if expire_seconds end def delete(field)
2
Add `expire_seconds:` TTL option to RedisAdapter
0
.rb
rb
mit
splitrb/split
10069711
<NME> redis_adapter.rb <BEF> # frozen_string_literal: true module Split module Persistence class RedisAdapter DEFAULT_CONFIG = { namespace: "persistence" }.freeze attr_reader :redis_key def initialize(context, key = nil) if key @redis_key = "#{self.class.config[:namespace]}:#{key}" elsif lookup_by = self.class.config[:lookup_by] if lookup_by.respond_to?(:call) key_frag = lookup_by.call(context) else key_frag = context.send(lookup_by) end @redis_key = "#{self.class.config[:namespace]}:#{key_frag}" else raise "Please configure lookup_by" end end def [](field) Split.redis.hget(redis_key, field) def []=(field, value) Split.redis.hset(redis_key, field, value) end def delete(field) def delete(field) Split.redis.hdel(redis_key, field) end def keys Split.redis.hkeys(redis_key) end def self.find(user_id) new(nil, user_id) end def self.with_config(options = {}) self.config.merge!(options) self end def self.config @config ||= DEFAULT_CONFIG.dup end def self.reset_config! @config = DEFAULT_CONFIG.dup end end end end <MSG> Add `expire_seconds:` TTL option to RedisAdapter <DFF> @@ -27,6 +27,8 @@ module Split def []=(field, value) Split.redis.hset(redis_key, field, value) + expire_seconds = self.class.config[:expire_seconds] + Split.redis.expire(redis_key, expire_seconds) if expire_seconds end def delete(field)
2
Add `expire_seconds:` TTL option to RedisAdapter
0
.rb
rb
mit
splitrb/split
10069712
<NME> redis_adapter.rb <BEF> # frozen_string_literal: true module Split module Persistence class RedisAdapter DEFAULT_CONFIG = { namespace: "persistence" }.freeze attr_reader :redis_key def initialize(context, key = nil) if key @redis_key = "#{self.class.config[:namespace]}:#{key}" elsif lookup_by = self.class.config[:lookup_by] if lookup_by.respond_to?(:call) key_frag = lookup_by.call(context) else key_frag = context.send(lookup_by) end @redis_key = "#{self.class.config[:namespace]}:#{key_frag}" else raise "Please configure lookup_by" end end def [](field) Split.redis.hget(redis_key, field) def []=(field, value) Split.redis.hset(redis_key, field, value) end def delete(field) def delete(field) Split.redis.hdel(redis_key, field) end def keys Split.redis.hkeys(redis_key) end def self.find(user_id) new(nil, user_id) end def self.with_config(options = {}) self.config.merge!(options) self end def self.config @config ||= DEFAULT_CONFIG.dup end def self.reset_config! @config = DEFAULT_CONFIG.dup end end end end <MSG> Add `expire_seconds:` TTL option to RedisAdapter <DFF> @@ -27,6 +27,8 @@ module Split def []=(field, value) Split.redis.hset(redis_key, field, value) + expire_seconds = self.class.config[:expire_seconds] + Split.redis.expire(redis_key, expire_seconds) if expire_seconds end def delete(field)
2
Add `expire_seconds:` TTL option to RedisAdapter
0
.rb
rb
mit
splitrb/split
10069713
<NME> experiment_catalog.rb <BEF> # frozen_string_literal: true module Split class ExperimentCatalog # Return all experiments def self.all # Call compact to prevent nil experiments from being returned -- seems to happen during gem upgrades Split.redis.smembers(:experiments).map { |e| find(e) }.compact end # Return experiments without a winner (considered "active") first def self.all_active_first all.partition { |e| not e.winner }.map { |es| es.sort_by(&:name) }.flatten end def self.find(name) Experiment.find(name) end def self.find_or_initialize(metric_descriptor, control = nil, *alternatives) # Check if array is passed to ab_test # e.g. ab_test('name', ['Alt 1', 'Alt 2', 'Alt 3']) if control.is_a?(Array) && alternatives.length.zero? control, alternatives = control.first, control[1..-1] end experiment_name_with_version, goals = normalize_experiment(metric_descriptor) experiment_name = experiment_name_with_version.to_s.split(":")[0] Split::Experiment.new(experiment_name, alternatives: [control].compact + alternatives, goals: goals) end def self.find_or_create(metric_descriptor, control = nil, *alternatives) experiment = find_or_initialize(metric_descriptor, control, *alternatives) experiment.save end def self.normalize_experiment(metric_descriptor) if Hash === metric_descriptor experiment_name = metric_descriptor.keys.first goals = Array(metric_descriptor.values.first) else experiment_name = metric_descriptor goals = [] end return experiment_name, goals end private_class_method :normalize_experiment return experiment_name, goals end private_class_method :normalize_experiment def self.clear_cache @cache = {} end end end <MSG> Remove obsolete cache clear left over from before refactoring <DFF> @@ -49,9 +49,5 @@ module Split return experiment_name, goals end private_class_method :normalize_experiment - - def self.clear_cache - @cache = {} - end end end
0
Remove obsolete cache clear left over from before refactoring
4
.rb
rb
mit
splitrb/split
10069714
<NME> experiment_catalog.rb <BEF> # frozen_string_literal: true module Split class ExperimentCatalog # Return all experiments def self.all # Call compact to prevent nil experiments from being returned -- seems to happen during gem upgrades Split.redis.smembers(:experiments).map { |e| find(e) }.compact end # Return experiments without a winner (considered "active") first def self.all_active_first all.partition { |e| not e.winner }.map { |es| es.sort_by(&:name) }.flatten end def self.find(name) Experiment.find(name) end def self.find_or_initialize(metric_descriptor, control = nil, *alternatives) # Check if array is passed to ab_test # e.g. ab_test('name', ['Alt 1', 'Alt 2', 'Alt 3']) if control.is_a?(Array) && alternatives.length.zero? control, alternatives = control.first, control[1..-1] end experiment_name_with_version, goals = normalize_experiment(metric_descriptor) experiment_name = experiment_name_with_version.to_s.split(":")[0] Split::Experiment.new(experiment_name, alternatives: [control].compact + alternatives, goals: goals) end def self.find_or_create(metric_descriptor, control = nil, *alternatives) experiment = find_or_initialize(metric_descriptor, control, *alternatives) experiment.save end def self.normalize_experiment(metric_descriptor) if Hash === metric_descriptor experiment_name = metric_descriptor.keys.first goals = Array(metric_descriptor.values.first) else experiment_name = metric_descriptor goals = [] end return experiment_name, goals end private_class_method :normalize_experiment return experiment_name, goals end private_class_method :normalize_experiment def self.clear_cache @cache = {} end end end <MSG> Remove obsolete cache clear left over from before refactoring <DFF> @@ -49,9 +49,5 @@ module Split return experiment_name, goals end private_class_method :normalize_experiment - - def self.clear_cache - @cache = {} - end end end
0
Remove obsolete cache clear left over from before refactoring
4
.rb
rb
mit
splitrb/split
10069715
<NME> experiment_catalog.rb <BEF> # frozen_string_literal: true module Split class ExperimentCatalog # Return all experiments def self.all # Call compact to prevent nil experiments from being returned -- seems to happen during gem upgrades Split.redis.smembers(:experiments).map { |e| find(e) }.compact end # Return experiments without a winner (considered "active") first def self.all_active_first all.partition { |e| not e.winner }.map { |es| es.sort_by(&:name) }.flatten end def self.find(name) Experiment.find(name) end def self.find_or_initialize(metric_descriptor, control = nil, *alternatives) # Check if array is passed to ab_test # e.g. ab_test('name', ['Alt 1', 'Alt 2', 'Alt 3']) if control.is_a?(Array) && alternatives.length.zero? control, alternatives = control.first, control[1..-1] end experiment_name_with_version, goals = normalize_experiment(metric_descriptor) experiment_name = experiment_name_with_version.to_s.split(":")[0] Split::Experiment.new(experiment_name, alternatives: [control].compact + alternatives, goals: goals) end def self.find_or_create(metric_descriptor, control = nil, *alternatives) experiment = find_or_initialize(metric_descriptor, control, *alternatives) experiment.save end def self.normalize_experiment(metric_descriptor) if Hash === metric_descriptor experiment_name = metric_descriptor.keys.first goals = Array(metric_descriptor.values.first) else experiment_name = metric_descriptor goals = [] end return experiment_name, goals end private_class_method :normalize_experiment return experiment_name, goals end private_class_method :normalize_experiment def self.clear_cache @cache = {} end end end <MSG> Remove obsolete cache clear left over from before refactoring <DFF> @@ -49,9 +49,5 @@ module Split return experiment_name, goals end private_class_method :normalize_experiment - - def self.clear_cache - @cache = {} - end end end
0
Remove obsolete cache clear left over from before refactoring
4
.rb
rb
mit
splitrb/split
10069716
<NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.0. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false ``` This simplifies the calls from your code: ```ruby ab_test(:my_first_experiment) ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Remove unused Gemfiles from tests <DFF> @@ -19,7 +19,7 @@ Split is designed to be hacker friendly, allowing for maximum customisation and ### Requirements -Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.0. +Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)
1
Remove unused Gemfiles from tests
1
.md
md
mit
splitrb/split
10069717
<NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.0. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false ``` This simplifies the calls from your code: ```ruby ab_test(:my_first_experiment) ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Remove unused Gemfiles from tests <DFF> @@ -19,7 +19,7 @@ Split is designed to be hacker friendly, allowing for maximum customisation and ### Requirements -Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.0. +Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)
1
Remove unused Gemfiles from tests
1
.md
md
mit
splitrb/split
10069718
<NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.0. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false ``` This simplifies the calls from your code: ```ruby ab_test(:my_first_experiment) ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Remove unused Gemfiles from tests <DFF> @@ -19,7 +19,7 @@ Split is designed to be hacker friendly, allowing for maximum customisation and ### Requirements -Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.0. +Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)
1
Remove unused Gemfiles from tests
1
.md
md
mit
splitrb/split
10069719
<NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do ab_test("link_color", "blue", "red") expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should increment the participation counter after assignment to a new user" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should not increment the counter for an not started experiment" do expect(Split.configuration).to receive(:start_manually).and_return(true) e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end it "should allow the alternative to be forced by passing it in the params" do # ?ab_test[link_color]=blue @params = { "ab_test" => { "link_color" => "blue" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") @params = { "ab_test" => { "link_color" => "red" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("red") alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1) expect(alternative).to eq("red") end it "should not allow an arbitrary alternative" do @params = { "ab_test" => { "link_color" => "pink" } } alternative = ab_test("link_color", "blue") expect(alternative).to eq("blue") end it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should allow alternative weighting interface as a single hash" do ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"]) expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt" end it "lets override existing choice" do pending "this requires user store reset on first call not depending on whelther it is current trial" @params = { "ab_test" => { "test_1" => "test-alt" } } expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control" expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" end end end it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment")).to eq "one" expect(ab_test("my_experiment") do |_, meta| meta end).to eq("Meta1") end end context "is not defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: nil } } end it "should be passed to helper block" do expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq({}) end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment") do |_, meta| meta end).to eq({}) end end end describe "ab_finished" do context "for an experiment that the user participates in" do before(:each) do @experiment_name = "link_color" @alternatives = ["blue", "red"] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count end it "should increment the counter for the completed alternative" do ab_finished(@experiment_name) new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should set experiment's finished key if reset is false" do ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should not increment the counter if reset is false and the experiment has been already finished" do 2.times { ab_finished(@experiment_name, { reset: false }) } new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" a = ab_test("button_size", "small", "big") expect(a).to eq("small") expect { ab_finished("button_size") }.not_to change { Split::Alternative.new(a, "button_size").completed_count } end it "should clear out the user's participation from their session" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) end it "should not call the method without alternative" do ab_user[@experiment.key] = nil expect(self).not_to receive(:some_method) ab_finished(@experiment_name) end end end context "for an experiment that the user is excluded from" do before do alternative = ab_test("link_color", "blue", "red") expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1) alternative = ab_test("button_size", "small", "big") expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0) end it "should not increment the completed counter" do # So, user should be participating in the link_color experiment and # receive the control for button_size. As the user is not participating in # the button size experiment, finishing it should not increase the # completion count for that alternative. expect { ab_finished("button_size") }.not_to change { Split::Alternative.new("small", "button_size").completed_count } end end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], end end end end describe 'finished' do ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end end context "and db_failover config option is turned on" do before(:each) do Split.configure do |config| config.db_failover = true end end describe "ab_test" do it "should not raise an exception" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> Merge pull request #115 from iangreenleaf/control_failover Fails when Redis is down and loading from config <DFF> @@ -549,6 +549,18 @@ describe Split::Helper do end end end + + context 'and preloaded config given' do + before do + Split.configuration.experiments[:link_color] = { + :alternatives => [ "blue", "red" ], + } + end + + it "uses first alternative" do + ab_test(:link_color).should eq("blue") + end + end end describe 'finished' do
12
Merge pull request #115 from iangreenleaf/control_failover
0
.rb
rb
mit
splitrb/split
10069720
<NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do ab_test("link_color", "blue", "red") expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should increment the participation counter after assignment to a new user" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should not increment the counter for an not started experiment" do expect(Split.configuration).to receive(:start_manually).and_return(true) e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end it "should allow the alternative to be forced by passing it in the params" do # ?ab_test[link_color]=blue @params = { "ab_test" => { "link_color" => "blue" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") @params = { "ab_test" => { "link_color" => "red" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("red") alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1) expect(alternative).to eq("red") end it "should not allow an arbitrary alternative" do @params = { "ab_test" => { "link_color" => "pink" } } alternative = ab_test("link_color", "blue") expect(alternative).to eq("blue") end it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should allow alternative weighting interface as a single hash" do ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"]) expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt" end it "lets override existing choice" do pending "this requires user store reset on first call not depending on whelther it is current trial" @params = { "ab_test" => { "test_1" => "test-alt" } } expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control" expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" end end end it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment")).to eq "one" expect(ab_test("my_experiment") do |_, meta| meta end).to eq("Meta1") end end context "is not defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: nil } } end it "should be passed to helper block" do expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq({}) end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment") do |_, meta| meta end).to eq({}) end end end describe "ab_finished" do context "for an experiment that the user participates in" do before(:each) do @experiment_name = "link_color" @alternatives = ["blue", "red"] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count end it "should increment the counter for the completed alternative" do ab_finished(@experiment_name) new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should set experiment's finished key if reset is false" do ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should not increment the counter if reset is false and the experiment has been already finished" do 2.times { ab_finished(@experiment_name, { reset: false }) } new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" a = ab_test("button_size", "small", "big") expect(a).to eq("small") expect { ab_finished("button_size") }.not_to change { Split::Alternative.new(a, "button_size").completed_count } end it "should clear out the user's participation from their session" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) end it "should not call the method without alternative" do ab_user[@experiment.key] = nil expect(self).not_to receive(:some_method) ab_finished(@experiment_name) end end end context "for an experiment that the user is excluded from" do before do alternative = ab_test("link_color", "blue", "red") expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1) alternative = ab_test("button_size", "small", "big") expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0) end it "should not increment the completed counter" do # So, user should be participating in the link_color experiment and # receive the control for button_size. As the user is not participating in # the button size experiment, finishing it should not increase the # completion count for that alternative. expect { ab_finished("button_size") }.not_to change { Split::Alternative.new("small", "button_size").completed_count } end end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], end end end end describe 'finished' do ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end end context "and db_failover config option is turned on" do before(:each) do Split.configure do |config| config.db_failover = true end end describe "ab_test" do it "should not raise an exception" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> Merge pull request #115 from iangreenleaf/control_failover Fails when Redis is down and loading from config <DFF> @@ -549,6 +549,18 @@ describe Split::Helper do end end end + + context 'and preloaded config given' do + before do + Split.configuration.experiments[:link_color] = { + :alternatives => [ "blue", "red" ], + } + end + + it "uses first alternative" do + ab_test(:link_color).should eq("blue") + end + end end describe 'finished' do
12
Merge pull request #115 from iangreenleaf/control_failover
0
.rb
rb
mit
splitrb/split
10069721
<NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do ab_test("link_color", "blue", "red") expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should increment the participation counter after assignment to a new user" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should not increment the counter for an not started experiment" do expect(Split.configuration).to receive(:start_manually).and_return(true) e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end it "should allow the alternative to be forced by passing it in the params" do # ?ab_test[link_color]=blue @params = { "ab_test" => { "link_color" => "blue" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") @params = { "ab_test" => { "link_color" => "red" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("red") alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1) expect(alternative).to eq("red") end it "should not allow an arbitrary alternative" do @params = { "ab_test" => { "link_color" => "pink" } } alternative = ab_test("link_color", "blue") expect(alternative).to eq("blue") end it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should allow alternative weighting interface as a single hash" do ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"]) expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt" end it "lets override existing choice" do pending "this requires user store reset on first call not depending on whelther it is current trial" @params = { "ab_test" => { "test_1" => "test-alt" } } expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control" expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" end end end it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment")).to eq "one" expect(ab_test("my_experiment") do |_, meta| meta end).to eq("Meta1") end end context "is not defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: nil } } end it "should be passed to helper block" do expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq({}) end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment") do |_, meta| meta end).to eq({}) end end end describe "ab_finished" do context "for an experiment that the user participates in" do before(:each) do @experiment_name = "link_color" @alternatives = ["blue", "red"] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count end it "should increment the counter for the completed alternative" do ab_finished(@experiment_name) new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should set experiment's finished key if reset is false" do ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should not increment the counter if reset is false and the experiment has been already finished" do 2.times { ab_finished(@experiment_name, { reset: false }) } new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" a = ab_test("button_size", "small", "big") expect(a).to eq("small") expect { ab_finished("button_size") }.not_to change { Split::Alternative.new(a, "button_size").completed_count } end it "should clear out the user's participation from their session" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) end it "should not call the method without alternative" do ab_user[@experiment.key] = nil expect(self).not_to receive(:some_method) ab_finished(@experiment_name) end end end context "for an experiment that the user is excluded from" do before do alternative = ab_test("link_color", "blue", "red") expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1) alternative = ab_test("button_size", "small", "big") expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0) end it "should not increment the completed counter" do # So, user should be participating in the link_color experiment and # receive the control for button_size. As the user is not participating in # the button size experiment, finishing it should not increase the # completion count for that alternative. expect { ab_finished("button_size") }.not_to change { Split::Alternative.new("small", "button_size").completed_count } end end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], end end end end describe 'finished' do ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end end context "and db_failover config option is turned on" do before(:each) do Split.configure do |config| config.db_failover = true end end describe "ab_test" do it "should not raise an exception" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> Merge pull request #115 from iangreenleaf/control_failover Fails when Redis is down and loading from config <DFF> @@ -549,6 +549,18 @@ describe Split::Helper do end end end + + context 'and preloaded config given' do + before do + Split.configuration.experiments[:link_color] = { + :alternatives => [ "blue", "red" ], + } + end + + it "uses first alternative" do + ab_test(:link_color).should eq("blue") + end + end end describe 'finished' do
12
Merge pull request #115 from iangreenleaf/control_failover
0
.rb
rb
mit
splitrb/split
10069722
<NME> AUTHORS <BEF> Ask Solem <[email protected]> Rune Halvorsen <[email protected]> Russell Sim <[email protected]> Brian Rosner <[email protected]> Hugo Lopes Tavares <[email protected]> Sverre Johansen <[email protected]> Bo Shi <[email protected]> Carl Meyer <[email protected]> Vinícius das Chagas Silva <[email protected]> Vanderson Mota dos Santos <[email protected]> Stefan Foulis <[email protected]> Halldór Rúnarsson <[email protected]> Brent Tubbs <[email protected]> David Cramer <[email protected]> <MSG> Addded Michael Richardson to AUTHORS <DFF> @@ -9,3 +9,4 @@ Carl Meyer <[email protected]> Vinícius das Chagas Silva <[email protected]> Vanderson Mota dos Santos <[email protected]> Stefan Foulis <[email protected]> +Michael Richardson <[email protected]>
1
Addded Michael Richardson to AUTHORS
0
AUTHORS
bsd-3-clause
ask/chishop
10069723
<NME> alternative.rb <BEF> # frozen_string_literal: true module Split class Alternative attr_accessor :name attr_accessor :experiment_name attr_accessor :weight attr_accessor :recorded_info def initialize(name, experiment_name) @experiment_name = experiment_name if Hash === name @name = name.keys.first @weight = name.values.first else @name = name @weight = 1 end @p_winner = 0.0 end def to_s name end def goals self.experiment.goals end def p_winner(goal = nil) field = set_prob_field(goal) @p_winner = Split.redis.hget(key, field).to_f end def set_p_winner(prob, goal = nil) field = set_prob_field(goal) Split.redis.hset(key, field, prob.to_f) end def participant_count Split.redis.hget(key, "participant_count").to_i end def participant_count=(count) Split.redis.hset(key, "participant_count", count.to_i) end def completed_count(goal = nil) field = set_field(goal) Split.redis.hget(key, field).to_i end def all_completed_count if goals.empty? completed_count else goals.inject(completed_count) do |sum, g| sum + completed_count(g) end end end def unfinished_count participant_count - all_completed_count end def set_field(goal) field = "completed_count" field += ":" + goal unless goal.nil? field end def set_prob_field(goal) field = "p_winner" field += ":" + goal unless goal.nil? field end def set_completed_count(count, goal = nil) field = set_field(goal) Split.redis.hset(key, field, count.to_i) end def increment_participation Split.redis.hincrby key, "participant_count", 1 end def increment_completion(goal = nil) field = set_field(goal) Split.redis.hincrby(key, field, 1) end def control? experiment.control.name == self.name end def conversion_rate(goal = nil) return 0 if participant_count.zero? (completed_count(goal).to_f)/participant_count.to_f end def experiment Split::ExperimentCatalog.find(experiment_name) end def z_score(goal = nil) # p_a = Pa = proportion of users who converted within the experiment split (conversion rate) # p_c = Pc = proportion of users who converted within the control split (conversion rate) # n_a = Na = the number of impressions within the experiment split # n_c = Nc = the number of impressions within the control split control = experiment.control alternative = self return "N/A" if control.name == alternative.name p_a = alternative.conversion_rate(goal) p_c = control.conversion_rate(goal) n_a = alternative.participant_count n_c = control.participant_count # can't calculate zscore for P(x) > 1 return "N/A" if p_a > 1 || p_c > 1 Split::Zscore.calculate(p_a, n_a, p_c, n_c) end def extra_info data = Split.redis.hget(key, "recorded_info") if data && data.length > 1 begin JSON.parse(data) rescue {} end else {} end end def record_extra_info(k, value = 1) @recorded_info = self.extra_info || {} if value.kind_of?(Numeric) @recorded_info[k] ||= 0 @recorded_info[k] += value else @recorded_info[k] = value end Split.redis.hset key, "recorded_info", (@recorded_info || {}).to_json end def save Split.redis.hsetnx key, "participant_count", 0 Split.redis.hsetnx key, "completed_count", 0 Split.redis.hsetnx key, "p_winner", p_winner Split.redis.hsetnx key, "recorded_info", (@recorded_info || {}).to_json end def validate! unless String === @name || hash_with_correct_values?(@name) raise ArgumentError, "Alternative must be a string" end end def reset Split.redis.hmset key, "participant_count", 0, "completed_count", 0, "recorded_info", nil.to_s unless goals.empty? goals.each do |g| field = "completed_count:#{g}" Split.redis.hset key, field, 0 end end end def delete Split.redis.del(key) end private def hash_with_correct_values?(name) Hash === name && String === name.keys.first && Float(name.values.first) rescue false end def key "#{experiment_name}:#{name}" end end end <MSG> Use empty string directly instead of casting nil Co-authored-by: André Luis Leal Cardoso Junior <[email protected]> <DFF> @@ -166,7 +166,7 @@ module Split end def reset - Split.redis.hmset key, "participant_count", 0, "completed_count", 0, "recorded_info", nil.to_s + Split.redis.hmset key, "participant_count", 0, "completed_count", 0, "recorded_info", "" unless goals.empty? goals.each do |g| field = "completed_count:#{g}"
1
Use empty string directly instead of casting nil
1
.rb
rb
mit
splitrb/split
10069724
<NME> alternative.rb <BEF> # frozen_string_literal: true module Split class Alternative attr_accessor :name attr_accessor :experiment_name attr_accessor :weight attr_accessor :recorded_info def initialize(name, experiment_name) @experiment_name = experiment_name if Hash === name @name = name.keys.first @weight = name.values.first else @name = name @weight = 1 end @p_winner = 0.0 end def to_s name end def goals self.experiment.goals end def p_winner(goal = nil) field = set_prob_field(goal) @p_winner = Split.redis.hget(key, field).to_f end def set_p_winner(prob, goal = nil) field = set_prob_field(goal) Split.redis.hset(key, field, prob.to_f) end def participant_count Split.redis.hget(key, "participant_count").to_i end def participant_count=(count) Split.redis.hset(key, "participant_count", count.to_i) end def completed_count(goal = nil) field = set_field(goal) Split.redis.hget(key, field).to_i end def all_completed_count if goals.empty? completed_count else goals.inject(completed_count) do |sum, g| sum + completed_count(g) end end end def unfinished_count participant_count - all_completed_count end def set_field(goal) field = "completed_count" field += ":" + goal unless goal.nil? field end def set_prob_field(goal) field = "p_winner" field += ":" + goal unless goal.nil? field end def set_completed_count(count, goal = nil) field = set_field(goal) Split.redis.hset(key, field, count.to_i) end def increment_participation Split.redis.hincrby key, "participant_count", 1 end def increment_completion(goal = nil) field = set_field(goal) Split.redis.hincrby(key, field, 1) end def control? experiment.control.name == self.name end def conversion_rate(goal = nil) return 0 if participant_count.zero? (completed_count(goal).to_f)/participant_count.to_f end def experiment Split::ExperimentCatalog.find(experiment_name) end def z_score(goal = nil) # p_a = Pa = proportion of users who converted within the experiment split (conversion rate) # p_c = Pc = proportion of users who converted within the control split (conversion rate) # n_a = Na = the number of impressions within the experiment split # n_c = Nc = the number of impressions within the control split control = experiment.control alternative = self return "N/A" if control.name == alternative.name p_a = alternative.conversion_rate(goal) p_c = control.conversion_rate(goal) n_a = alternative.participant_count n_c = control.participant_count # can't calculate zscore for P(x) > 1 return "N/A" if p_a > 1 || p_c > 1 Split::Zscore.calculate(p_a, n_a, p_c, n_c) end def extra_info data = Split.redis.hget(key, "recorded_info") if data && data.length > 1 begin JSON.parse(data) rescue {} end else {} end end def record_extra_info(k, value = 1) @recorded_info = self.extra_info || {} if value.kind_of?(Numeric) @recorded_info[k] ||= 0 @recorded_info[k] += value else @recorded_info[k] = value end Split.redis.hset key, "recorded_info", (@recorded_info || {}).to_json end def save Split.redis.hsetnx key, "participant_count", 0 Split.redis.hsetnx key, "completed_count", 0 Split.redis.hsetnx key, "p_winner", p_winner Split.redis.hsetnx key, "recorded_info", (@recorded_info || {}).to_json end def validate! unless String === @name || hash_with_correct_values?(@name) raise ArgumentError, "Alternative must be a string" end end def reset Split.redis.hmset key, "participant_count", 0, "completed_count", 0, "recorded_info", nil.to_s unless goals.empty? goals.each do |g| field = "completed_count:#{g}" Split.redis.hset key, field, 0 end end end def delete Split.redis.del(key) end private def hash_with_correct_values?(name) Hash === name && String === name.keys.first && Float(name.values.first) rescue false end def key "#{experiment_name}:#{name}" end end end <MSG> Use empty string directly instead of casting nil Co-authored-by: André Luis Leal Cardoso Junior <[email protected]> <DFF> @@ -166,7 +166,7 @@ module Split end def reset - Split.redis.hmset key, "participant_count", 0, "completed_count", 0, "recorded_info", nil.to_s + Split.redis.hmset key, "participant_count", 0, "completed_count", 0, "recorded_info", "" unless goals.empty? goals.each do |g| field = "completed_count:#{g}"
1
Use empty string directly instead of casting nil
1
.rb
rb
mit
splitrb/split
10069725
<NME> alternative.rb <BEF> # frozen_string_literal: true module Split class Alternative attr_accessor :name attr_accessor :experiment_name attr_accessor :weight attr_accessor :recorded_info def initialize(name, experiment_name) @experiment_name = experiment_name if Hash === name @name = name.keys.first @weight = name.values.first else @name = name @weight = 1 end @p_winner = 0.0 end def to_s name end def goals self.experiment.goals end def p_winner(goal = nil) field = set_prob_field(goal) @p_winner = Split.redis.hget(key, field).to_f end def set_p_winner(prob, goal = nil) field = set_prob_field(goal) Split.redis.hset(key, field, prob.to_f) end def participant_count Split.redis.hget(key, "participant_count").to_i end def participant_count=(count) Split.redis.hset(key, "participant_count", count.to_i) end def completed_count(goal = nil) field = set_field(goal) Split.redis.hget(key, field).to_i end def all_completed_count if goals.empty? completed_count else goals.inject(completed_count) do |sum, g| sum + completed_count(g) end end end def unfinished_count participant_count - all_completed_count end def set_field(goal) field = "completed_count" field += ":" + goal unless goal.nil? field end def set_prob_field(goal) field = "p_winner" field += ":" + goal unless goal.nil? field end def set_completed_count(count, goal = nil) field = set_field(goal) Split.redis.hset(key, field, count.to_i) end def increment_participation Split.redis.hincrby key, "participant_count", 1 end def increment_completion(goal = nil) field = set_field(goal) Split.redis.hincrby(key, field, 1) end def control? experiment.control.name == self.name end def conversion_rate(goal = nil) return 0 if participant_count.zero? (completed_count(goal).to_f)/participant_count.to_f end def experiment Split::ExperimentCatalog.find(experiment_name) end def z_score(goal = nil) # p_a = Pa = proportion of users who converted within the experiment split (conversion rate) # p_c = Pc = proportion of users who converted within the control split (conversion rate) # n_a = Na = the number of impressions within the experiment split # n_c = Nc = the number of impressions within the control split control = experiment.control alternative = self return "N/A" if control.name == alternative.name p_a = alternative.conversion_rate(goal) p_c = control.conversion_rate(goal) n_a = alternative.participant_count n_c = control.participant_count # can't calculate zscore for P(x) > 1 return "N/A" if p_a > 1 || p_c > 1 Split::Zscore.calculate(p_a, n_a, p_c, n_c) end def extra_info data = Split.redis.hget(key, "recorded_info") if data && data.length > 1 begin JSON.parse(data) rescue {} end else {} end end def record_extra_info(k, value = 1) @recorded_info = self.extra_info || {} if value.kind_of?(Numeric) @recorded_info[k] ||= 0 @recorded_info[k] += value else @recorded_info[k] = value end Split.redis.hset key, "recorded_info", (@recorded_info || {}).to_json end def save Split.redis.hsetnx key, "participant_count", 0 Split.redis.hsetnx key, "completed_count", 0 Split.redis.hsetnx key, "p_winner", p_winner Split.redis.hsetnx key, "recorded_info", (@recorded_info || {}).to_json end def validate! unless String === @name || hash_with_correct_values?(@name) raise ArgumentError, "Alternative must be a string" end end def reset Split.redis.hmset key, "participant_count", 0, "completed_count", 0, "recorded_info", nil.to_s unless goals.empty? goals.each do |g| field = "completed_count:#{g}" Split.redis.hset key, field, 0 end end end def delete Split.redis.del(key) end private def hash_with_correct_values?(name) Hash === name && String === name.keys.first && Float(name.values.first) rescue false end def key "#{experiment_name}:#{name}" end end end <MSG> Use empty string directly instead of casting nil Co-authored-by: André Luis Leal Cardoso Junior <[email protected]> <DFF> @@ -166,7 +166,7 @@ module Split end def reset - Split.redis.hmset key, "participant_count", 0, "completed_count", 0, "recorded_info", nil.to_s + Split.redis.hmset key, "participant_count", 0, "completed_count", 0, "recorded_info", "" unless goals.empty? goals.each do |g| field = "completed_count:#{g}"
1
Use empty string directly instead of casting nil
1
.rb
rb
mit
splitrb/split
10069726
<NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do ab_test("link_color", "blue", "red") expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should increment the participation counter after assignment to a new user" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should not increment the counter for an not started experiment" do expect(Split.configuration).to receive(:start_manually).and_return(true) e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end it "should allow the alternative to be forced by passing it in the params" do # ?ab_test[link_color]=blue @params = { "ab_test" => { "link_color" => "blue" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") lambda { finished('button_size') }.should_not change { Split::Alternative.new('small', 'button_size').completed_count } end it "should let a user participate in many experiment with allow_multiple_experiments option" do it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should allow alternative weighting interface as a single hash" do ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"]) expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt" end it "lets override existing choice" do pending "this requires user store reset on first call not depending on whelther it is current trial" @params = { "ab_test" => { "test_1" => "test-alt" } } expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control" expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" end end end it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment")).to eq "one" expect(ab_test("my_experiment") do |_, meta| meta end).to eq("Meta1") end end context "is not defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: nil } } end it "should be passed to helper block" do expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq({}) end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment") do |_, meta| meta end).to eq({}) end end end describe "ab_finished" do context "for an experiment that the user participates in" do before(:each) do @experiment_name = "link_color" @alternatives = ["blue", "red"] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count end it "should increment the counter for the completed alternative" do ab_finished(@experiment_name) new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should set experiment's finished key if reset is false" do ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should not increment the counter if reset is false and the experiment has been already finished" do 2.times { ab_finished(@experiment_name, { reset: false }) } new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" a = ab_test("button_size", "small", "big") expect(a).to eq("small") expect { ab_finished("button_size") }.not_to change { Split::Alternative.new(a, "button_size").completed_count } end it "should clear out the user's participation from their session" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) end it "should not call the method without alternative" do ab_user[@experiment.key] = nil expect(self).not_to receive(:some_method) ab_finished(@experiment_name) end end end context "for an experiment that the user is excluded from" do before do alternative = ab_test("link_color", "blue", "red") expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1) alternative = ab_test("button_size", "small", "big") expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0) end it "should not increment the completed counter" do # So, user should be participating in the link_color experiment and # receive the control for button_size. As the user is not participating in # the button size experiment, finishing it should not increase the # completion count for that alternative. expect { ab_finished("button_size") }.not_to change { Split::Alternative.new("small", "button_size").completed_count } end end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric, reset: false expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end end describe "conversions" do it "should return a conversion rate for an alternative" do alternative_name = ab_test("link_color", "blue", "red") previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(previous_convertion_rate).to eq(0.0) ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end end context "and db_failover config option is turned on" do before(:each) do Split.configure do |config| config.db_failover = true end end describe "ab_test" do it "should not raise an exception" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> Failing test for extraneous experiment completions <DFF> @@ -114,7 +114,16 @@ describe Split::Helper do lambda { finished('button_size') }.should_not change { Split::Alternative.new('small', 'button_size').completed_count } + end + it 'should not raise the completion rate of an already-finished experiment' do + e = Split::Experiment.find_or_create('button_size', 'small', 'big') + e.winner = 'small' + a = ab_test('button_size', 'small', 'big') + a.should eq('small') + lambda { + finished('button_size') + }.should_not change { Split::Alternative.new(a, 'button_size').completed_count } end it "should let a user participate in many experiment with allow_multiple_experiments option" do
9
Failing test for extraneous experiment completions
0
.rb
rb
mit
splitrb/split
10069727
<NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do ab_test("link_color", "blue", "red") expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should increment the participation counter after assignment to a new user" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should not increment the counter for an not started experiment" do expect(Split.configuration).to receive(:start_manually).and_return(true) e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end it "should allow the alternative to be forced by passing it in the params" do # ?ab_test[link_color]=blue @params = { "ab_test" => { "link_color" => "blue" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") lambda { finished('button_size') }.should_not change { Split::Alternative.new('small', 'button_size').completed_count } end it "should let a user participate in many experiment with allow_multiple_experiments option" do it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should allow alternative weighting interface as a single hash" do ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"]) expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt" end it "lets override existing choice" do pending "this requires user store reset on first call not depending on whelther it is current trial" @params = { "ab_test" => { "test_1" => "test-alt" } } expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control" expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" end end end it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment")).to eq "one" expect(ab_test("my_experiment") do |_, meta| meta end).to eq("Meta1") end end context "is not defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: nil } } end it "should be passed to helper block" do expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq({}) end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment") do |_, meta| meta end).to eq({}) end end end describe "ab_finished" do context "for an experiment that the user participates in" do before(:each) do @experiment_name = "link_color" @alternatives = ["blue", "red"] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count end it "should increment the counter for the completed alternative" do ab_finished(@experiment_name) new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should set experiment's finished key if reset is false" do ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should not increment the counter if reset is false and the experiment has been already finished" do 2.times { ab_finished(@experiment_name, { reset: false }) } new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" a = ab_test("button_size", "small", "big") expect(a).to eq("small") expect { ab_finished("button_size") }.not_to change { Split::Alternative.new(a, "button_size").completed_count } end it "should clear out the user's participation from their session" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) end it "should not call the method without alternative" do ab_user[@experiment.key] = nil expect(self).not_to receive(:some_method) ab_finished(@experiment_name) end end end context "for an experiment that the user is excluded from" do before do alternative = ab_test("link_color", "blue", "red") expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1) alternative = ab_test("button_size", "small", "big") expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0) end it "should not increment the completed counter" do # So, user should be participating in the link_color experiment and # receive the control for button_size. As the user is not participating in # the button size experiment, finishing it should not increase the # completion count for that alternative. expect { ab_finished("button_size") }.not_to change { Split::Alternative.new("small", "button_size").completed_count } end end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric, reset: false expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end end describe "conversions" do it "should return a conversion rate for an alternative" do alternative_name = ab_test("link_color", "blue", "red") previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(previous_convertion_rate).to eq(0.0) ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end end context "and db_failover config option is turned on" do before(:each) do Split.configure do |config| config.db_failover = true end end describe "ab_test" do it "should not raise an exception" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> Failing test for extraneous experiment completions <DFF> @@ -114,7 +114,16 @@ describe Split::Helper do lambda { finished('button_size') }.should_not change { Split::Alternative.new('small', 'button_size').completed_count } + end + it 'should not raise the completion rate of an already-finished experiment' do + e = Split::Experiment.find_or_create('button_size', 'small', 'big') + e.winner = 'small' + a = ab_test('button_size', 'small', 'big') + a.should eq('small') + lambda { + finished('button_size') + }.should_not change { Split::Alternative.new(a, 'button_size').completed_count } end it "should let a user participate in many experiment with allow_multiple_experiments option" do
9
Failing test for extraneous experiment completions
0
.rb
rb
mit
splitrb/split
10069728
<NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do ab_test("link_color", "blue", "red") expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should increment the participation counter after assignment to a new user" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should not increment the counter for an not started experiment" do expect(Split.configuration).to receive(:start_manually).and_return(true) e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end it "should allow the alternative to be forced by passing it in the params" do # ?ab_test[link_color]=blue @params = { "ab_test" => { "link_color" => "blue" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") lambda { finished('button_size') }.should_not change { Split::Alternative.new('small', 'button_size').completed_count } end it "should let a user participate in many experiment with allow_multiple_experiments option" do it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should allow alternative weighting interface as a single hash" do ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"]) expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt" end it "lets override existing choice" do pending "this requires user store reset on first call not depending on whelther it is current trial" @params = { "ab_test" => { "test_1" => "test-alt" } } expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control" expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" end end end it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment")).to eq "one" expect(ab_test("my_experiment") do |_, meta| meta end).to eq("Meta1") end end context "is not defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: nil } } end it "should be passed to helper block" do expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq({}) end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment") do |_, meta| meta end).to eq({}) end end end describe "ab_finished" do context "for an experiment that the user participates in" do before(:each) do @experiment_name = "link_color" @alternatives = ["blue", "red"] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count end it "should increment the counter for the completed alternative" do ab_finished(@experiment_name) new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should set experiment's finished key if reset is false" do ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should not increment the counter if reset is false and the experiment has been already finished" do 2.times { ab_finished(@experiment_name, { reset: false }) } new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" a = ab_test("button_size", "small", "big") expect(a).to eq("small") expect { ab_finished("button_size") }.not_to change { Split::Alternative.new(a, "button_size").completed_count } end it "should clear out the user's participation from their session" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) end it "should not call the method without alternative" do ab_user[@experiment.key] = nil expect(self).not_to receive(:some_method) ab_finished(@experiment_name) end end end context "for an experiment that the user is excluded from" do before do alternative = ab_test("link_color", "blue", "red") expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1) alternative = ab_test("button_size", "small", "big") expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0) end it "should not increment the completed counter" do # So, user should be participating in the link_color experiment and # receive the control for button_size. As the user is not participating in # the button size experiment, finishing it should not increase the # completion count for that alternative. expect { ab_finished("button_size") }.not_to change { Split::Alternative.new("small", "button_size").completed_count } end end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric, reset: false expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end end describe "conversions" do it "should return a conversion rate for an alternative" do alternative_name = ab_test("link_color", "blue", "red") previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(previous_convertion_rate).to eq(0.0) ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end end context "and db_failover config option is turned on" do before(:each) do Split.configure do |config| config.db_failover = true end end describe "ab_test" do it "should not raise an exception" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> Failing test for extraneous experiment completions <DFF> @@ -114,7 +114,16 @@ describe Split::Helper do lambda { finished('button_size') }.should_not change { Split::Alternative.new('small', 'button_size').completed_count } + end + it 'should not raise the completion rate of an already-finished experiment' do + e = Split::Experiment.find_or_create('button_size', 'small', 'big') + e.winner = 'small' + a = ab_test('button_size', 'small', 'big') + a.should eq('small') + lambda { + finished('button_size') + }.should_not change { Split::Alternative.new(a, 'button_size').completed_count } end it "should let a user participate in many experiment with allow_multiple_experiments option" do
9
Failing test for extraneous experiment completions
0
.rb
rb
mit
splitrb/split
10069729
<NME> dashboard_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "rack/test" require "split/dashboard" describe Split::Dashboard do include Rack::Test::Methods class TestDashboard < Split::Dashboard include Split::Helper get "/my_experiment" do ab_test(params[:experiment], "blue", "red") end end def app @app ||= TestDashboard end def link(color) Split::Alternative.new(color, experiment.name) end let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } let(:experiment_with_goals) { Split::ExperimentCatalog.find_or_create({ "link_color" => ["goal_1", "goal_2"] }, "blue", "red") } let(:metric) { Split::Metric.find_or_create(name: "testmetric", experiments: [experiment, experiment_with_goals]) } let(:red_link) { link("red") } let(:blue_link) { link("blue") } before(:each) do Split.configuration.beta_probability_simulations = 1 end it "should respond to /" do get "/" expect(last_response).to be_ok end context "start experiment manually" do before do Split.configuration.start_manually = true end context "experiment without goals" do it "should display a Start button" do experiment get "/" expect(last_response.body).to include("Start") post "/start?experiment=#{experiment.name}" get "/" expect(last_response.body).to include("Reset Data") expect(last_response.body).not_to include("Metrics:") end end context "experiment with metrics" do it "should display the names of associated metrics" do metric get "/" expect(last_response.body).to include("Metrics:testmetric") end end context "with goals" do it "should display a Start button" do experiment_with_goals get "/" expect(last_response.body).to include("Start") post "/start?experiment=#{experiment.name}" get "/" expect(last_response.body).to include("Reset Data") end end end describe "force alternative" do context "initial version" do let!(:user) do Split::User.new(@app, { experiment.name => "red" }) end before do allow(Split::User).to receive(:new).and_return(user) end it "should set current user's alternative" do blue_link.participant_count = 7 post "/force_alternative?experiment=#{experiment.name}", alternative: "blue" get "/my_experiment?experiment=#{experiment.name}" expect(last_response.body).to include("blue") end it "should not modify an existing user" do blue_link.participant_count = 7 post "/force_alternative?experiment=#{experiment.name}", alternative: "blue" expect(user[experiment.key]).to eq("red") expect(blue_link.participant_count).to eq(7) end end context "incremented version" do let!(:user) do experiment.increment_version Split::User.new(@app, { "#{experiment.name}:#{experiment.version}" => "red" }) end before do allow(Split::User).to receive(:new).and_return(user) end it "should set current user's alternative" do blue_link.participant_count = 7 post "/force_alternative?experiment=#{experiment.name}", alternative: "blue" get "/my_experiment?experiment=#{experiment.name}" expect(last_response.body).to include("blue") end end end describe "index page" do context "with winner" do before { experiment.winner = "red" } it "displays `Reopen Experiment` button" do get "/" expect(last_response.body).to include("Reopen Experiment") end end context "without winner" do it "should not display `Reopen Experiment` button" do get "/" expect(last_response.body).to_not include("Reopen Experiment") end end end describe "reopen experiment" do before { experiment.winner = "red" } it "redirects" do post "/reopen?experiment=#{experiment.name}" expect(last_response).to be_redirect end it "removes winner" do post "/reopen?experiment=#{experiment.name}" expect(Split::ExperimentCatalog.find(experiment.name)).to_not have_winner end it "keeps existing stats" do red_link.participant_count = 5 blue_link.participant_count = 7 experiment.winner = "blue" post "/reopen?experiment=#{experiment.name}" expect(red_link.participant_count).to eq(5) expect(blue_link.participant_count).to eq(7) end end describe "update cohorting" do it "calls enable of cohorting when action is enable" do post "/update_cohorting?experiment=#{experiment.name}", { "cohorting_action": "enable" } expect(experiment.cohorting_disabled?).to eq false end it "calls disable of cohorting when action is disable" do post "/update_cohorting?experiment=#{experiment.name}", { "cohorting_action": "disable" } expect(experiment.cohorting_disabled?).to eq true end it "calls neither enable or disable cohorting when passed invalid action" do previous_value = experiment.cohorting_disabled? post "/update_cohorting?experiment=#{experiment.name}", { "cohorting_action": "other" } end end it "should reset an experiment" do red_link.participant_count = 5 blue_link.participant_count = 7 blue_link.participant_count = 7 experiment.winner = "blue" post "/reset?experiment=#{experiment.name}" expect(last_response).to be_redirect new_red_count = red_link.participant_count new_blue_count = blue_link.participant_count expect(new_blue_count).to eq(0) expect(new_red_count).to eq(0) expect(experiment.winner).to be_nil end it "should delete an experiment" do delete "/experiment?experiment=#{experiment.name}" expect(last_response).to be_redirect expect(Split::ExperimentCatalog.find(experiment.name)).to be_nil end it "should mark an alternative as the winner" do expect(experiment.winner).to be_nil post "/experiment?experiment=#{experiment.name}", alternative: "red" expect(last_response).to be_redirect expect(experiment.winner.name).to eq("red") end it "should display the start date" do experiment.start get "/" expect(last_response.body).to include("<small>#{experiment.start_time.strftime('%Y-%m-%d')}</small>") end it "should handle experiments without a start date" do Split.redis.hdel(:experiment_start_times, experiment.name) get "/" expect(last_response.body).to include("<small>Unknown</small>") end end <MSG> Add ability to initialize experiments <DFF> @@ -201,6 +201,37 @@ describe Split::Dashboard do end end + describe "initialize experiment" do + before do + Split.configuration.experiments = { + :my_experiment => { + :alternatives => [ "control", "alternative" ], + } + } + end + + it "initializes the experiment when the experiment is given" do + expect(Split::ExperimentCatalog.find("my_experiment")).to be nil + + post "/initialize_experiment", { experiment: "my_experiment"} + + experiment = Split::ExperimentCatalog.find("my_experiment") + expect(experiment).to be_a(Split::Experiment) + end + + it "does not attempt to intialize the experiment when empty experiment is given" do + post "/initialize_experiment", { experiment: ""} + + expect(Split::ExperimentCatalog).to_not receive(:find_or_create) + end + + it "does not attempt to intialize the experiment when no experiment is given" do + post "/initialize_experiment" + + expect(Split::ExperimentCatalog).to_not receive(:find_or_create) + end + end + it "should reset an experiment" do red_link.participant_count = 5 blue_link.participant_count = 7
31
Add ability to initialize experiments
0
.rb
rb
mit
splitrb/split
10069730
<NME> dashboard_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "rack/test" require "split/dashboard" describe Split::Dashboard do include Rack::Test::Methods class TestDashboard < Split::Dashboard include Split::Helper get "/my_experiment" do ab_test(params[:experiment], "blue", "red") end end def app @app ||= TestDashboard end def link(color) Split::Alternative.new(color, experiment.name) end let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } let(:experiment_with_goals) { Split::ExperimentCatalog.find_or_create({ "link_color" => ["goal_1", "goal_2"] }, "blue", "red") } let(:metric) { Split::Metric.find_or_create(name: "testmetric", experiments: [experiment, experiment_with_goals]) } let(:red_link) { link("red") } let(:blue_link) { link("blue") } before(:each) do Split.configuration.beta_probability_simulations = 1 end it "should respond to /" do get "/" expect(last_response).to be_ok end context "start experiment manually" do before do Split.configuration.start_manually = true end context "experiment without goals" do it "should display a Start button" do experiment get "/" expect(last_response.body).to include("Start") post "/start?experiment=#{experiment.name}" get "/" expect(last_response.body).to include("Reset Data") expect(last_response.body).not_to include("Metrics:") end end context "experiment with metrics" do it "should display the names of associated metrics" do metric get "/" expect(last_response.body).to include("Metrics:testmetric") end end context "with goals" do it "should display a Start button" do experiment_with_goals get "/" expect(last_response.body).to include("Start") post "/start?experiment=#{experiment.name}" get "/" expect(last_response.body).to include("Reset Data") end end end describe "force alternative" do context "initial version" do let!(:user) do Split::User.new(@app, { experiment.name => "red" }) end before do allow(Split::User).to receive(:new).and_return(user) end it "should set current user's alternative" do blue_link.participant_count = 7 post "/force_alternative?experiment=#{experiment.name}", alternative: "blue" get "/my_experiment?experiment=#{experiment.name}" expect(last_response.body).to include("blue") end it "should not modify an existing user" do blue_link.participant_count = 7 post "/force_alternative?experiment=#{experiment.name}", alternative: "blue" expect(user[experiment.key]).to eq("red") expect(blue_link.participant_count).to eq(7) end end context "incremented version" do let!(:user) do experiment.increment_version Split::User.new(@app, { "#{experiment.name}:#{experiment.version}" => "red" }) end before do allow(Split::User).to receive(:new).and_return(user) end it "should set current user's alternative" do blue_link.participant_count = 7 post "/force_alternative?experiment=#{experiment.name}", alternative: "blue" get "/my_experiment?experiment=#{experiment.name}" expect(last_response.body).to include("blue") end end end describe "index page" do context "with winner" do before { experiment.winner = "red" } it "displays `Reopen Experiment` button" do get "/" expect(last_response.body).to include("Reopen Experiment") end end context "without winner" do it "should not display `Reopen Experiment` button" do get "/" expect(last_response.body).to_not include("Reopen Experiment") end end end describe "reopen experiment" do before { experiment.winner = "red" } it "redirects" do post "/reopen?experiment=#{experiment.name}" expect(last_response).to be_redirect end it "removes winner" do post "/reopen?experiment=#{experiment.name}" expect(Split::ExperimentCatalog.find(experiment.name)).to_not have_winner end it "keeps existing stats" do red_link.participant_count = 5 blue_link.participant_count = 7 experiment.winner = "blue" post "/reopen?experiment=#{experiment.name}" expect(red_link.participant_count).to eq(5) expect(blue_link.participant_count).to eq(7) end end describe "update cohorting" do it "calls enable of cohorting when action is enable" do post "/update_cohorting?experiment=#{experiment.name}", { "cohorting_action": "enable" } expect(experiment.cohorting_disabled?).to eq false end it "calls disable of cohorting when action is disable" do post "/update_cohorting?experiment=#{experiment.name}", { "cohorting_action": "disable" } expect(experiment.cohorting_disabled?).to eq true end it "calls neither enable or disable cohorting when passed invalid action" do previous_value = experiment.cohorting_disabled? post "/update_cohorting?experiment=#{experiment.name}", { "cohorting_action": "other" } end end it "should reset an experiment" do red_link.participant_count = 5 blue_link.participant_count = 7 blue_link.participant_count = 7 experiment.winner = "blue" post "/reset?experiment=#{experiment.name}" expect(last_response).to be_redirect new_red_count = red_link.participant_count new_blue_count = blue_link.participant_count expect(new_blue_count).to eq(0) expect(new_red_count).to eq(0) expect(experiment.winner).to be_nil end it "should delete an experiment" do delete "/experiment?experiment=#{experiment.name}" expect(last_response).to be_redirect expect(Split::ExperimentCatalog.find(experiment.name)).to be_nil end it "should mark an alternative as the winner" do expect(experiment.winner).to be_nil post "/experiment?experiment=#{experiment.name}", alternative: "red" expect(last_response).to be_redirect expect(experiment.winner.name).to eq("red") end it "should display the start date" do experiment.start get "/" expect(last_response.body).to include("<small>#{experiment.start_time.strftime('%Y-%m-%d')}</small>") end it "should handle experiments without a start date" do Split.redis.hdel(:experiment_start_times, experiment.name) get "/" expect(last_response.body).to include("<small>Unknown</small>") end end <MSG> Add ability to initialize experiments <DFF> @@ -201,6 +201,37 @@ describe Split::Dashboard do end end + describe "initialize experiment" do + before do + Split.configuration.experiments = { + :my_experiment => { + :alternatives => [ "control", "alternative" ], + } + } + end + + it "initializes the experiment when the experiment is given" do + expect(Split::ExperimentCatalog.find("my_experiment")).to be nil + + post "/initialize_experiment", { experiment: "my_experiment"} + + experiment = Split::ExperimentCatalog.find("my_experiment") + expect(experiment).to be_a(Split::Experiment) + end + + it "does not attempt to intialize the experiment when empty experiment is given" do + post "/initialize_experiment", { experiment: ""} + + expect(Split::ExperimentCatalog).to_not receive(:find_or_create) + end + + it "does not attempt to intialize the experiment when no experiment is given" do + post "/initialize_experiment" + + expect(Split::ExperimentCatalog).to_not receive(:find_or_create) + end + end + it "should reset an experiment" do red_link.participant_count = 5 blue_link.participant_count = 7
31
Add ability to initialize experiments
0
.rb
rb
mit
splitrb/split
10069731
<NME> dashboard_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "rack/test" require "split/dashboard" describe Split::Dashboard do include Rack::Test::Methods class TestDashboard < Split::Dashboard include Split::Helper get "/my_experiment" do ab_test(params[:experiment], "blue", "red") end end def app @app ||= TestDashboard end def link(color) Split::Alternative.new(color, experiment.name) end let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } let(:experiment_with_goals) { Split::ExperimentCatalog.find_or_create({ "link_color" => ["goal_1", "goal_2"] }, "blue", "red") } let(:metric) { Split::Metric.find_or_create(name: "testmetric", experiments: [experiment, experiment_with_goals]) } let(:red_link) { link("red") } let(:blue_link) { link("blue") } before(:each) do Split.configuration.beta_probability_simulations = 1 end it "should respond to /" do get "/" expect(last_response).to be_ok end context "start experiment manually" do before do Split.configuration.start_manually = true end context "experiment without goals" do it "should display a Start button" do experiment get "/" expect(last_response.body).to include("Start") post "/start?experiment=#{experiment.name}" get "/" expect(last_response.body).to include("Reset Data") expect(last_response.body).not_to include("Metrics:") end end context "experiment with metrics" do it "should display the names of associated metrics" do metric get "/" expect(last_response.body).to include("Metrics:testmetric") end end context "with goals" do it "should display a Start button" do experiment_with_goals get "/" expect(last_response.body).to include("Start") post "/start?experiment=#{experiment.name}" get "/" expect(last_response.body).to include("Reset Data") end end end describe "force alternative" do context "initial version" do let!(:user) do Split::User.new(@app, { experiment.name => "red" }) end before do allow(Split::User).to receive(:new).and_return(user) end it "should set current user's alternative" do blue_link.participant_count = 7 post "/force_alternative?experiment=#{experiment.name}", alternative: "blue" get "/my_experiment?experiment=#{experiment.name}" expect(last_response.body).to include("blue") end it "should not modify an existing user" do blue_link.participant_count = 7 post "/force_alternative?experiment=#{experiment.name}", alternative: "blue" expect(user[experiment.key]).to eq("red") expect(blue_link.participant_count).to eq(7) end end context "incremented version" do let!(:user) do experiment.increment_version Split::User.new(@app, { "#{experiment.name}:#{experiment.version}" => "red" }) end before do allow(Split::User).to receive(:new).and_return(user) end it "should set current user's alternative" do blue_link.participant_count = 7 post "/force_alternative?experiment=#{experiment.name}", alternative: "blue" get "/my_experiment?experiment=#{experiment.name}" expect(last_response.body).to include("blue") end end end describe "index page" do context "with winner" do before { experiment.winner = "red" } it "displays `Reopen Experiment` button" do get "/" expect(last_response.body).to include("Reopen Experiment") end end context "without winner" do it "should not display `Reopen Experiment` button" do get "/" expect(last_response.body).to_not include("Reopen Experiment") end end end describe "reopen experiment" do before { experiment.winner = "red" } it "redirects" do post "/reopen?experiment=#{experiment.name}" expect(last_response).to be_redirect end it "removes winner" do post "/reopen?experiment=#{experiment.name}" expect(Split::ExperimentCatalog.find(experiment.name)).to_not have_winner end it "keeps existing stats" do red_link.participant_count = 5 blue_link.participant_count = 7 experiment.winner = "blue" post "/reopen?experiment=#{experiment.name}" expect(red_link.participant_count).to eq(5) expect(blue_link.participant_count).to eq(7) end end describe "update cohorting" do it "calls enable of cohorting when action is enable" do post "/update_cohorting?experiment=#{experiment.name}", { "cohorting_action": "enable" } expect(experiment.cohorting_disabled?).to eq false end it "calls disable of cohorting when action is disable" do post "/update_cohorting?experiment=#{experiment.name}", { "cohorting_action": "disable" } expect(experiment.cohorting_disabled?).to eq true end it "calls neither enable or disable cohorting when passed invalid action" do previous_value = experiment.cohorting_disabled? post "/update_cohorting?experiment=#{experiment.name}", { "cohorting_action": "other" } end end it "should reset an experiment" do red_link.participant_count = 5 blue_link.participant_count = 7 blue_link.participant_count = 7 experiment.winner = "blue" post "/reset?experiment=#{experiment.name}" expect(last_response).to be_redirect new_red_count = red_link.participant_count new_blue_count = blue_link.participant_count expect(new_blue_count).to eq(0) expect(new_red_count).to eq(0) expect(experiment.winner).to be_nil end it "should delete an experiment" do delete "/experiment?experiment=#{experiment.name}" expect(last_response).to be_redirect expect(Split::ExperimentCatalog.find(experiment.name)).to be_nil end it "should mark an alternative as the winner" do expect(experiment.winner).to be_nil post "/experiment?experiment=#{experiment.name}", alternative: "red" expect(last_response).to be_redirect expect(experiment.winner.name).to eq("red") end it "should display the start date" do experiment.start get "/" expect(last_response.body).to include("<small>#{experiment.start_time.strftime('%Y-%m-%d')}</small>") end it "should handle experiments without a start date" do Split.redis.hdel(:experiment_start_times, experiment.name) get "/" expect(last_response.body).to include("<small>Unknown</small>") end end <MSG> Add ability to initialize experiments <DFF> @@ -201,6 +201,37 @@ describe Split::Dashboard do end end + describe "initialize experiment" do + before do + Split.configuration.experiments = { + :my_experiment => { + :alternatives => [ "control", "alternative" ], + } + } + end + + it "initializes the experiment when the experiment is given" do + expect(Split::ExperimentCatalog.find("my_experiment")).to be nil + + post "/initialize_experiment", { experiment: "my_experiment"} + + experiment = Split::ExperimentCatalog.find("my_experiment") + expect(experiment).to be_a(Split::Experiment) + end + + it "does not attempt to intialize the experiment when empty experiment is given" do + post "/initialize_experiment", { experiment: ""} + + expect(Split::ExperimentCatalog).to_not receive(:find_or_create) + end + + it "does not attempt to intialize the experiment when no experiment is given" do + post "/initialize_experiment" + + expect(Split::ExperimentCatalog).to_not receive(:find_or_create) + end + end + it "should reset an experiment" do red_link.participant_count = 5 blue_link.participant_count = 7
31
Add ability to initialize experiments
0
.rb
rb
mit
splitrb/split
10069732
<NME> README.md <BEF> # transducers.js A small library for generalized transformation of data. This provides a bunch of transformation functions that can be applied to any data structure. It is a direct port of Clojure's [transducers](http://blog.cognitect.com/blog/2014/8/6/transducers-are-coming) in JavaScript. Read more in [this post](http://jlongster.com/Transducers.js--A-JavaScript-Library-for-Transformation-of-Data). The algorithm behind this, explained in the above post, not only allows for it to work with any data structure (arrays, objects, iterators, immutable data structures, you name it) but it also provides better performance than other alternatives such as underscore or lodash. This is because there are no intermediate collections. See [this post](http://jlongster.com/Transducers.js-Round-2-with-Benchmarks) for benchmarks. ``` npm install transducers.js ``` For browsers, grab the file `dist/transducers.js`. When writing programs, we frequently write methods that take in collections, do something with them, and return a result. The problem is that we frequently only write these functions to work a specific data structure, so if we ever change our data type or wanted to reuse that functionality, you can't. We need to decouple these kinds of concerns. A transducer is a function that takes a reducing function and returns a new one. It can perform the necessary work and call the original reducing function to move on to the next "step". In this library, a transducer a little more than that (it's actually an object that also supports init and finalizer methods) but generally you don't have to worry about these internal details. Read [my post](http://jlongster.com/Transducers.js--A-JavaScript-Library-for-Transformation-of-Data) if you want to learn more about the algorithm. ```js var transform = compose( map(x => x * 3), filter(x => x % 2 === 0), take(2) ); seq([1, 2, 3, 4, 5], transform); // -> [ 6, 12 ] function* nums() { var i = 1; while(true) { yield i++; } } into([], transform, nums()); // -> [ 6, 12 ] into([], transform, Immutable.List.of(1, 2, 3, 4, 5)) // -> [ 6, 12 ] ``` All of these work with arrays, objects, and any iterable data structure (like [immutable-js](https://github.com/facebook/immutable-js)) and you get all the high performance guarantees for free. The above code always only performs 2 transformations because of `take(2)`, no matter how large the array. This is done without laziness or any overhead of intermediate structures. ## Transformations The following transformations are available, and there are more to come (like `partition`). * `map(coll?, f, ctx?)` &mdash; call `f` on each item * `filter(coll?, f, ctx?)` &mdash; only include the items where the result of calling `f` with the item is truthy * `remove(coll?, f, ctx?)` &mdash; only include the items where the result of calling `f` with the item is falsy * `keep(coll?)` &mdash; remove all items that are `null` or `undefined` * `take(coll?, n)` &mdash; grab only the first `n` items * `takeWhile(coll?, f, ctx?)` &mdash; grab only the first items where the result of calling `f` with the item is truthy * `drop(coll?, n)` &mdash; drop the first `n` items and only include the rest * `dropWhile(coll?, f, ctx?)` &mdash; drop the first items where the result of calling `f` with the item is truthy * `dedupe(coll?)` &mdash; remove consecutive duplicates (equality compared with ===) The above functions optionally take a collection to immediately perform the transformation on, and a context to bind `this` to when calling `f`. That means you can call them in four ways: * Immediately perform a map: `map([1, 2, 3], x => x + 1)` * Same as above but call the function with `this` as `ctx`: `map([1, 2, 3], function(x) { return x + 1; }, ctx)` * Make a map transducer: `map(x => x + 1)` * Same as above but with `this` as `ctx`: `map(function(x) { return x + 1; }, ctx)` (I will be using the ES6 fat arrow syntax, but if that's not available just `function` instead) The signature of running an immediate map is the same familiar one as seen in lodash and underscore, but now you can drop the collection to make a transducer and run multiple transformations with good performance: ```js var transform = compose( map(x => x + 1), filter(x => x % 2 === 0), take(2) ); ``` `compose` is a provided function that simply turns `compose(f, g)` into `x => f(g(x))`. You use it to build up transformations. The above transformation would always run the map and filter **only twice** because only two items are needed, and it short-circuits once it gets two items. Again, this is done without laziness, read more [here](http://jlongster.com/Transducers.js--A-JavaScript-Library-for-Transformation-of-Data). There are also 2 transducers available for taking collections and "catting" them into the transformation stream: * `cat` &mdash; take collections and forward each item individually, essentially flattening it * `mapcat(f)` &mdash; same as `cat`, but first apply `f` to each collection Just pass `cat` straight through like so: `compose(filter(x => x.length < 10), cat)`. That would take all arrays with a length less than 10 and flatten them out into a single array. ## Applying Transformations Building data structure-agnostic transformations is cool, but how do you actually use them? `transducers.js` provides several integration points. To use a transformation, we need to know how to iterate over the source data structure and how to build up a new one. The former is easy; we can work with arrays, objects, and anything can uses the ES6 iterator protocol (Maps, Sets, generators, etc). All the the below functions works with them. For the latter, you need to specify what you want back. The following functions allow you to make a new data structure and possibly apply a transformation: * `toArray(coll, xform?)` &mdash; Turn `coll` into an array, applying the transformation `xform` to each item if provided. The transform is optional in case you want to do something like turn an iterator into an array. * `toObj(coll, xform?)` &mdash; Turn `coll` into an object if possible, applying the transformation `xform` if provided. When an object is iterated it produces two-element arrays `[key, value]`, and `obj` will turn these back into an object. * `toIter(coll, xform?)` &mdash; Make an iterator over `coll`, and apply the transformation `xform` to each value if specified. Note that `coll` can just be another iterator. **Transformations will be applied lazily**. * `seq(coll, xform)` &mdash; A generalized method that will return the same data type that was passed in as `coll`, with `xform` applied. You will usually use this unless you know you want an array, object, or iterator. If `coll` is an iterator, another iterator will be returned and transformations will be applied lazily. * `into(to, xform, from)` &mdash; Apply `xform` to each item in `from` and append it to `to`. This has the effect of "pouring" elements into `to`. You will commonly use this when converting one type of object to another. * `transduce(coll, xform, reducer, init?)` &mdash; Like `reduce`, but apply `xform` to each value before passing to `reducer`. If `init` is not specify it will attempt to get it from `reducer`. The possibilities are endless: ```js // Map an object seq({ foo: 1, bar: 2 }, map(kv => [kv[0], kv[1] + 1])); // -> { foo: 2, bar: 3 } // Make an array from an object toArray({ foo: 1, bar: 2 }); // -> [ [ 'foo', 1 ], [ 'bar', 2 ] ] // Make an array from an iterable function* nums() { var i = 1; while(true) { yield i++; } } into([], take(3), nums()); // -> [ 1, 2, 3 ] // Lazily transform an iterable var iter = seq(nums(), compose(map(x => x * 2), filter(x => x > 4)); iter.next().value; // -> 6 iter.next().value; // -> 8 iter.next().value; // -> 10 ``` ## Laziness Transducers remove the requirement of being lazy to optimize for things like `take(10)`. However, it can still be useful to "bind" a collection to a set of transformations and pass it around, without actually evaluating the transformations. As noted above, whenever you apply transformations to an iterator it does so lazily. It's easy convert array transformations into a lazy operation, just use the utility function `iterator` to grab an iterator of the array instead: ```js seq(iterator([1, 2, 3]), compose( map(x => x + 1), filter(x => x % 2 === 0))) // -> <Iterator> ``` Our transformations are completely blind to the fact that our transformations may or may not be lazy. ## Utility Functions This library provides a few small utility functions: * `iterator(coll)` &mdash; Get an iterator for `coll`, which can be any type like array, object, iterator, or custom data type * `push(arr, value)` &mdash; Push `value` onto `arr` and return `arr` * `merge(obj, value)` &mdash; Merge `value` into `obj`. `value` can be another object or a two-element array of `[key, value]` * `range(n)` &mdash; Make an array of size `n` filled with numbers from `0..n`. ## immutable-js We've talked about how this can be applied to any data structure &mdash; let's see that in action. Here's how you could use this with [immutable-js](https://github.com/facebook/immutable-js). ```js Immutable.fromJS( seq(Immutable.Vector(1, 2, 3, 4, 5), compose( map(function(x) { return x + 10; }), map(function(x) { return x * 2; }), filter(function(x) { return x % 5 === 0; }), filter(function(x) { return x % 2 === 0; }))) ) ``` We can use our familiar `seq` function because `Immutable.Vector` implements the iterator protocol, so we can iterator over it. Because `seq` is working with an iterator, it returns a new iterator that will *lazily transform each value*. We can simply pass this iterator into `Immutable.Vector.from` to construct a new one, and we have a new transformed immutable vector with no intermediate collections except for one lazy transformer! The builtin transformations perform well because they minimize allocations, but since we don't have any intermediate structures or laziness machinery, this performs slightly better. The point is not to beat it, but to show that both are high-performance but we can apply our performance to any data structure. ## CSP Channels This not only works with all the JavaScript data structures you can think of, but it even works for things like streams. Soon channels from [js-csp](https://github.com/ubolonton/js-csp) will be able to take a transformation and you get all of this for channels for free: ```js var ch = chan(1, compose( cat, map(x => x + 1), dedupe(), drop(3) )); ``` ## The `transducer` protocol While it's great that you can apply transducers to custom data structures, it's a bit annoying to always have to use constructor functions like `Immutable.fromJS`. One option is to define a new protocol complementary to `iterator`. This conforms to the [official transducer spec](https://github.com/cognitect-labs/transducers-js/issues/20) so if you implement this, you can use it with all transducer libraries that conform to it. To implement the transducer protocol, you add methods to the prototype of your data structure. A transformer is an object with three methods: `init`, `result`, and `step`. `init` returns a new empty object, `result`, can perform any finalization steps on the resulting collection, and `step` performs a reduce. These methods are namespaced and in the future could be symbols. Here's what it looks like for `Immutable.List`: ```js Immutable.List.prototype['@@transducer/init'] = function() { return Immutable.List().asMutable(); }; Immutable.List.prototype['@@transducer/result'] = function(lst) { return lst.asImmutable(); }; Immutable.List.prototype['@@transducer/step'] = function(lst, x) { return lst.push(x); }; ``` If you implement the transducer protocol, now your data structure will work with *all* of the builtin functions. You can just use `seq` like normal and you get back an immutable vector! ```js t.seq(Immutable.List.of(1, 2, 3, 4, 5), t.compose( t.map(function(x) { return x + 10; }), t.map(function(x) { return x * 2; }), t.filter(function(x) { return x % 5 === 0; }), t.filter(function(x) { return x % 2 === 0; }))); // -> Vector [ 30 ] ``` ## Running Tests ``` npm install gulp mocha build/tests ``` [BSD LICENSE](https://github.com/jlongster/transducers.js/blob/master/LICENSE) <MSG> fix typo <DFF> @@ -217,7 +217,7 @@ t.seq(Immutable.List.of(1, 2, 3, 4, 5), t.map(function(x) { return x * 2; }), t.filter(function(x) { return x % 5 === 0; }), t.filter(function(x) { return x % 2 === 0; }))); -// -> Vector [ 30 ] +// -> List [ 30 ] ``` ## Running Tests
1
fix typo
1
.md
md
bsd-2-clause
jlongster/transducers.js
10069733
<NME> version.rb <BEF> # frozen_string_literal: true module Split MAJOR = 1 MINOR = 4 PATCH = 0 VERSION = [MAJOR, MINOR, PATCH].join('.') end <MSG> v1.4.1 <DFF> @@ -2,6 +2,6 @@ module Split MAJOR = 1 MINOR = 4 - PATCH = 0 + PATCH = 1 VERSION = [MAJOR, MINOR, PATCH].join('.') end
1
v1.4.1
1
.rb
rb
mit
splitrb/split
10069734
<NME> version.rb <BEF> # frozen_string_literal: true module Split MAJOR = 1 MINOR = 4 PATCH = 0 VERSION = [MAJOR, MINOR, PATCH].join('.') end <MSG> v1.4.1 <DFF> @@ -2,6 +2,6 @@ module Split MAJOR = 1 MINOR = 4 - PATCH = 0 + PATCH = 1 VERSION = [MAJOR, MINOR, PATCH].join('.') end
1
v1.4.1
1
.rb
rb
mit
splitrb/split
10069735
<NME> version.rb <BEF> # frozen_string_literal: true module Split MAJOR = 1 MINOR = 4 PATCH = 0 VERSION = [MAJOR, MINOR, PATCH].join('.') end <MSG> v1.4.1 <DFF> @@ -2,6 +2,6 @@ module Split MAJOR = 1 MINOR = 4 - PATCH = 0 + PATCH = 1 VERSION = [MAJOR, MINOR, PATCH].join('.') end
1
v1.4.1
1
.rb
rb
mit
splitrb/split
10069736
<NME> models.py <BEF> import os from django.conf import settings from django.db import models from django.utils.translation import ugettext_lazy as _ from django.contrib.auth.models import User OS_NAMES = ( ("aix", "AIX"), ("beos", "BeOS"), ("debian", "Debian Linux"), ("dos", "DOS"), ("freebsd", "FreeBSD"), ("hpux", "HP/UX"), ("mac", "Mac System x."), ("macos", "MacOS X"), ("mandrake", "Mandrake Linux"), ("netbsd", "NetBSD"), ("openbsd", "OpenBSD"), ("qnx", "QNX"), ("redhat", "RedHat Linux"), ("solaris", "SUN Solaris"), ("suse", "SuSE Linux"), ("yellowdog", "Yellow Dog Linux"), ) ARCHITECTURES = ( ("alpha", "Alpha"), ("hppa", "HPPA"), ("ix86", "Intel"), ("powerpc", "PowerPC"), ("sparc", "Sparc"), ("ultrasparc", "UltraSparc"), ) UPLOAD_TO = getattr(settings, "DJANGOPYPI_RELEASE_UPLOAD_TO", 'dist') class Classifier(models.Model): name = models.CharField(max_length=255, unique=True) class Meta: verbose_name = _(u"classifier") verbose_name_plural = _(u"classifiers") def __unicode__(self): return self.name class Project(models.Model): name = models.CharField(max_length=255, unique=True) license = models.TextField(blank=True) metadata_version = models.CharField(max_length=64, default=1.0) author = models.CharField(max_length=128, blank=True) home_page = models.URLField(verify_exists=False, blank=True, null=True) download_url = models.CharField(max_length=200, blank=True, null=True) summary = models.TextField(blank=True) description = models.TextField(blank=True) author_email = models.CharField(max_length=255, blank=True) classifiers = models.ManyToManyField(Classifier) owner = models.ForeignKey(User, related_name="projects") updated = models.DateTimeField(auto_now=True) class Meta: verbose_name = _(u"project") verbose_name_plural = _(u"projects") def __unicode__(self): return self.name @models.permalink def get_absolute_url(self): return ('djangopypi-show_links', (), {'dist_name': self.name}) @models.permalink def get_pypi_absolute_url(self): return ('djangopypi-pypi_show_links', (), {'dist_name': self.name}) def get_release(self, version): """Return the release object for version, or None""" try: return self.releases.get(version=version) except Release.DoesNotExist: return None class Release(models.Model): version = models.CharField(max_length=32) distribution = models.FileField(upload_to=UPLOAD_TO) md5_digest = models.CharField(max_length=255, blank=True) platform = models.CharField(max_length=128, blank=True) signature = models.CharField(max_length=128, blank=True) filetype = models.CharField(max_length=255, blank=True) pyversion = models.CharField(max_length=32, blank=True) project = models.ForeignKey(Project, related_name="releases") upload_time = models.DateTimeField(auto_now=True) class Meta: verbose_name = _(u"release") verbose_name_plural = _(u"releases") unique_together = ("project", "version", "platform", "distribution", "pyversion") def __unicode__(self): return u"%s (%s)" % (self.release_name, self.platform) @property def type(self): dist_file_types = { 'sdist':'Source', verbose_name_plural = _(u"releases") def __unicode__(self): return u"%s %s (%s)" (self.project.name, self.version, self.platform) @property def filename(self): @property def filename(self): return os.path.basename(self.distribution.name) @property def release_name(self): return u"%s-%s" % (self.project.name, self.version) @property def path(self): return self.distribution.name @models.permalink def get_absolute_url(self): return ('djangopypi-show_version', (), {'dist_name': self.project, 'version': self.version}) def get_dl_url(self): return "%s#md5=%s" % (self.distribution.url, self.md5_digest) <MSG> Fix syntax error <DFF> @@ -108,7 +108,7 @@ class Release(models.Model): verbose_name_plural = _(u"releases") def __unicode__(self): - return u"%s %s (%s)" (self.project.name, self.version, self.platform) + return u"%s %s (%s)" % (self.project.name, self.version, self.platform) @property def filename(self):
1
Fix syntax error
1
.py
py
bsd-3-clause
ask/chishop
10069737
<NME> helper.rb <BEF> # frozen_string_literal: true module Split module Helper OVERRIDE_PARAM_NAME = "ab_test" module_function def ab_test(metric_descriptor, control = nil, *alternatives) begin experiment = ExperimentCatalog.find_or_initialize(metric_descriptor, control, *alternatives) alternative = if Split.configuration.enabled && !exclude_visitor? experiment.save raise(Split::InvalidExperimentsFormatError) unless (Split.configuration.experiments || {}).fetch(experiment.name.to_sym, {})[:combined_experiments].nil? trial = Trial.new(user: ab_user, experiment: experiment, override: override_alternative(experiment.name), exclude: exclude_visitor?, disabled: split_generically_disabled?) alt = trial.choose!(self) alt ? alt.name : nil else control_variable(experiment.control) end rescue Errno::ECONNREFUSED, Redis::BaseError, SocketError => e raise(e) unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) if Split.configuration.db_failover_allow_parameter_override alternative = override_alternative(experiment.name) if override_present?(experiment.name) end if block_given? if defined?(capture) # a block in a rails view block = Proc.new { yield(alternative, (trial.metadata if trial)) } concat(capture(alternative, &block)) false else yield(alternative, (trial.metadata if trial)) end else alternative end metadata = experiment.metadata[alternative] if experiment.metadata yield(alternative, metadata || {}) else alternative end end def reset!(experiment) ab_user.delete(experiment.key) end def finish_experiment(experiment, options = { reset: true }) return false if active_experiments[experiment.name].nil? return true if experiment.has_winner? should_reset = experiment.resettable? && options[:reset] if ab_user[experiment.finished_key] && !should_reset true else alternative_name = ab_user[experiment.key] trial = Trial.new( user: ab_user, experiment: experiment, alternative: alternative_name, goals: options[:goals], ) trial.complete!(self) if should_reset reset!(experiment) else ab_user[experiment.finished_key] = true end end end def ab_finished(metric_descriptor, options = { reset: true }) return if exclude_visitor? || Split.configuration.disabled? metric_descriptor, goals = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any? experiments.each do |experiment| next if override_present?(experiment.key) finish_experiment(experiment, options.merge(goals: goals)) end end rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def ab_record_extra_info(metric_descriptor, key, value = 1) return if exclude_visitor? || Split.configuration.disabled? metric_descriptor, _ = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any? experiments.each do |experiment| alternative_name = ab_user[experiment.key] if alternative_name alternative = experiment.alternatives.find { |alt| alt.name == alternative_name } alternative.record_extra_info(key, value) if alternative end end end rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def ab_active_experiments ab_user.active_experiments rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def override_present?(experiment_name) override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name) end def override_alternative(experiment_name) override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name) end def override_alternative_by_params(experiment_name) defined?(params) && params[OVERRIDE_PARAM_NAME] && params[OVERRIDE_PARAM_NAME][experiment_name] end def override_alternative_by_cookies(experiment_name) return unless defined?(request) if request.cookies && request.cookies.key?("split_override") experiments = JSON.parse(request.cookies["split_override"]) rescue {} experiments[experiment_name] end end def split_generically_disabled? defined?(params) && params["SPLIT_DISABLE"] end def ab_user @ab_user ||= User.new(self) end def exclude_visitor? defined?(request) && (instance_exec(request, &Split.configuration.ignore_filter) || is_ignored_ip_address? || is_robot? || is_preview?) end def is_robot? defined?(request) && request.user_agent =~ Split.configuration.robot_regex end def is_preview? defined?(request) && defined?(request.headers) && request.headers["x-purpose"] == "preview" end def is_ignored_ip_address? return false if Split.configuration.ignore_ip_addresses.empty? Split.configuration.ignore_ip_addresses.each do |ip| return true if defined?(request) && (request.ip == ip || (ip.class == Regexp && request.ip =~ ip)) end false end def active_experiments ab_user.active_experiments end def normalize_metric(metric_descriptor) if Hash === metric_descriptor experiment_name = metric_descriptor.keys.first goals = Array(metric_descriptor.values.first) else experiment_name = metric_descriptor goals = [] end return experiment_name, goals end def control_variable(control) Hash === control ? control.keys.first.to_s : control.to_s end end end <MSG> Merge pull request #312 from splitrb/dont-use-capture-helper Don't check for capture presence in ab_test <DFF> @@ -29,13 +29,7 @@ module Split end if block_given? - if defined?(capture) # a block in a rails view - block = Proc.new { yield(alternative, (trial.metadata if trial)) } - concat(capture(alternative, &block)) - false - else - yield(alternative, (trial.metadata if trial)) - end + yield(alternative, (trial.metadata if trial)) else alternative end
1
Merge pull request #312 from splitrb/dont-use-capture-helper
7
.rb
rb
mit
splitrb/split
10069738
<NME> helper.rb <BEF> # frozen_string_literal: true module Split module Helper OVERRIDE_PARAM_NAME = "ab_test" module_function def ab_test(metric_descriptor, control = nil, *alternatives) begin experiment = ExperimentCatalog.find_or_initialize(metric_descriptor, control, *alternatives) alternative = if Split.configuration.enabled && !exclude_visitor? experiment.save raise(Split::InvalidExperimentsFormatError) unless (Split.configuration.experiments || {}).fetch(experiment.name.to_sym, {})[:combined_experiments].nil? trial = Trial.new(user: ab_user, experiment: experiment, override: override_alternative(experiment.name), exclude: exclude_visitor?, disabled: split_generically_disabled?) alt = trial.choose!(self) alt ? alt.name : nil else control_variable(experiment.control) end rescue Errno::ECONNREFUSED, Redis::BaseError, SocketError => e raise(e) unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) if Split.configuration.db_failover_allow_parameter_override alternative = override_alternative(experiment.name) if override_present?(experiment.name) end if block_given? if defined?(capture) # a block in a rails view block = Proc.new { yield(alternative, (trial.metadata if trial)) } concat(capture(alternative, &block)) false else yield(alternative, (trial.metadata if trial)) end else alternative end metadata = experiment.metadata[alternative] if experiment.metadata yield(alternative, metadata || {}) else alternative end end def reset!(experiment) ab_user.delete(experiment.key) end def finish_experiment(experiment, options = { reset: true }) return false if active_experiments[experiment.name].nil? return true if experiment.has_winner? should_reset = experiment.resettable? && options[:reset] if ab_user[experiment.finished_key] && !should_reset true else alternative_name = ab_user[experiment.key] trial = Trial.new( user: ab_user, experiment: experiment, alternative: alternative_name, goals: options[:goals], ) trial.complete!(self) if should_reset reset!(experiment) else ab_user[experiment.finished_key] = true end end end def ab_finished(metric_descriptor, options = { reset: true }) return if exclude_visitor? || Split.configuration.disabled? metric_descriptor, goals = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any? experiments.each do |experiment| next if override_present?(experiment.key) finish_experiment(experiment, options.merge(goals: goals)) end end rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def ab_record_extra_info(metric_descriptor, key, value = 1) return if exclude_visitor? || Split.configuration.disabled? metric_descriptor, _ = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any? experiments.each do |experiment| alternative_name = ab_user[experiment.key] if alternative_name alternative = experiment.alternatives.find { |alt| alt.name == alternative_name } alternative.record_extra_info(key, value) if alternative end end end rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def ab_active_experiments ab_user.active_experiments rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def override_present?(experiment_name) override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name) end def override_alternative(experiment_name) override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name) end def override_alternative_by_params(experiment_name) defined?(params) && params[OVERRIDE_PARAM_NAME] && params[OVERRIDE_PARAM_NAME][experiment_name] end def override_alternative_by_cookies(experiment_name) return unless defined?(request) if request.cookies && request.cookies.key?("split_override") experiments = JSON.parse(request.cookies["split_override"]) rescue {} experiments[experiment_name] end end def split_generically_disabled? defined?(params) && params["SPLIT_DISABLE"] end def ab_user @ab_user ||= User.new(self) end def exclude_visitor? defined?(request) && (instance_exec(request, &Split.configuration.ignore_filter) || is_ignored_ip_address? || is_robot? || is_preview?) end def is_robot? defined?(request) && request.user_agent =~ Split.configuration.robot_regex end def is_preview? defined?(request) && defined?(request.headers) && request.headers["x-purpose"] == "preview" end def is_ignored_ip_address? return false if Split.configuration.ignore_ip_addresses.empty? Split.configuration.ignore_ip_addresses.each do |ip| return true if defined?(request) && (request.ip == ip || (ip.class == Regexp && request.ip =~ ip)) end false end def active_experiments ab_user.active_experiments end def normalize_metric(metric_descriptor) if Hash === metric_descriptor experiment_name = metric_descriptor.keys.first goals = Array(metric_descriptor.values.first) else experiment_name = metric_descriptor goals = [] end return experiment_name, goals end def control_variable(control) Hash === control ? control.keys.first.to_s : control.to_s end end end <MSG> Merge pull request #312 from splitrb/dont-use-capture-helper Don't check for capture presence in ab_test <DFF> @@ -29,13 +29,7 @@ module Split end if block_given? - if defined?(capture) # a block in a rails view - block = Proc.new { yield(alternative, (trial.metadata if trial)) } - concat(capture(alternative, &block)) - false - else - yield(alternative, (trial.metadata if trial)) - end + yield(alternative, (trial.metadata if trial)) else alternative end
1
Merge pull request #312 from splitrb/dont-use-capture-helper
7
.rb
rb
mit
splitrb/split
10069739
<NME> helper.rb <BEF> # frozen_string_literal: true module Split module Helper OVERRIDE_PARAM_NAME = "ab_test" module_function def ab_test(metric_descriptor, control = nil, *alternatives) begin experiment = ExperimentCatalog.find_or_initialize(metric_descriptor, control, *alternatives) alternative = if Split.configuration.enabled && !exclude_visitor? experiment.save raise(Split::InvalidExperimentsFormatError) unless (Split.configuration.experiments || {}).fetch(experiment.name.to_sym, {})[:combined_experiments].nil? trial = Trial.new(user: ab_user, experiment: experiment, override: override_alternative(experiment.name), exclude: exclude_visitor?, disabled: split_generically_disabled?) alt = trial.choose!(self) alt ? alt.name : nil else control_variable(experiment.control) end rescue Errno::ECONNREFUSED, Redis::BaseError, SocketError => e raise(e) unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) if Split.configuration.db_failover_allow_parameter_override alternative = override_alternative(experiment.name) if override_present?(experiment.name) end if block_given? if defined?(capture) # a block in a rails view block = Proc.new { yield(alternative, (trial.metadata if trial)) } concat(capture(alternative, &block)) false else yield(alternative, (trial.metadata if trial)) end else alternative end metadata = experiment.metadata[alternative] if experiment.metadata yield(alternative, metadata || {}) else alternative end end def reset!(experiment) ab_user.delete(experiment.key) end def finish_experiment(experiment, options = { reset: true }) return false if active_experiments[experiment.name].nil? return true if experiment.has_winner? should_reset = experiment.resettable? && options[:reset] if ab_user[experiment.finished_key] && !should_reset true else alternative_name = ab_user[experiment.key] trial = Trial.new( user: ab_user, experiment: experiment, alternative: alternative_name, goals: options[:goals], ) trial.complete!(self) if should_reset reset!(experiment) else ab_user[experiment.finished_key] = true end end end def ab_finished(metric_descriptor, options = { reset: true }) return if exclude_visitor? || Split.configuration.disabled? metric_descriptor, goals = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any? experiments.each do |experiment| next if override_present?(experiment.key) finish_experiment(experiment, options.merge(goals: goals)) end end rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def ab_record_extra_info(metric_descriptor, key, value = 1) return if exclude_visitor? || Split.configuration.disabled? metric_descriptor, _ = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any? experiments.each do |experiment| alternative_name = ab_user[experiment.key] if alternative_name alternative = experiment.alternatives.find { |alt| alt.name == alternative_name } alternative.record_extra_info(key, value) if alternative end end end rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def ab_active_experiments ab_user.active_experiments rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def override_present?(experiment_name) override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name) end def override_alternative(experiment_name) override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name) end def override_alternative_by_params(experiment_name) defined?(params) && params[OVERRIDE_PARAM_NAME] && params[OVERRIDE_PARAM_NAME][experiment_name] end def override_alternative_by_cookies(experiment_name) return unless defined?(request) if request.cookies && request.cookies.key?("split_override") experiments = JSON.parse(request.cookies["split_override"]) rescue {} experiments[experiment_name] end end def split_generically_disabled? defined?(params) && params["SPLIT_DISABLE"] end def ab_user @ab_user ||= User.new(self) end def exclude_visitor? defined?(request) && (instance_exec(request, &Split.configuration.ignore_filter) || is_ignored_ip_address? || is_robot? || is_preview?) end def is_robot? defined?(request) && request.user_agent =~ Split.configuration.robot_regex end def is_preview? defined?(request) && defined?(request.headers) && request.headers["x-purpose"] == "preview" end def is_ignored_ip_address? return false if Split.configuration.ignore_ip_addresses.empty? Split.configuration.ignore_ip_addresses.each do |ip| return true if defined?(request) && (request.ip == ip || (ip.class == Regexp && request.ip =~ ip)) end false end def active_experiments ab_user.active_experiments end def normalize_metric(metric_descriptor) if Hash === metric_descriptor experiment_name = metric_descriptor.keys.first goals = Array(metric_descriptor.values.first) else experiment_name = metric_descriptor goals = [] end return experiment_name, goals end def control_variable(control) Hash === control ? control.keys.first.to_s : control.to_s end end end <MSG> Merge pull request #312 from splitrb/dont-use-capture-helper Don't check for capture presence in ab_test <DFF> @@ -29,13 +29,7 @@ module Split end if block_given? - if defined?(capture) # a block in a rails view - block = Proc.new { yield(alternative, (trial.metadata if trial)) } - concat(capture(alternative, &block)) - false - else - yield(alternative, (trial.metadata if trial)) - end + yield(alternative, (trial.metadata if trial)) else alternative end
1
Merge pull request #312 from splitrb/dont-use-capture-helper
7
.rb
rb
mit
splitrb/split
10069740
<NME> experiment_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "time" describe Split::Experiment do def new_experiment(goals = []) Split::Experiment.new("link_color", alternatives: ["blue", "red", "green"], goals: goals) end def alternative(color) Split::Alternative.new(color, "link_color") end let(:experiment) { new_experiment } let(:blue) { alternative("blue") } let(:green) { alternative("green") } context "with an experiment" do let(:experiment) { Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"]) } it "should have a name" do expect(experiment.name).to eq("basket_text") end Split::Experiment.find('basket_text').name.should eql('basket_text') end describe 'winner' do it "should have no winner initially" do experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red') experiment.save expect(Split.redis.exists?("basket_text")).to be true end it "should save the start time to redis" do experiment_start_time = Time.at(1372167761) expect(Time).to receive(:now).and_return(experiment_start_time) experiment.save expect(Split::ExperimentCatalog.find("basket_text").start_time).to eq(experiment_start_time) end it "should not save the start time to redis when start_manually is enabled" do expect(Split.configuration).to receive(:start_manually).and_return(true) experiment.save expect(Split::ExperimentCatalog.find("basket_text").start_time).to be_nil end it "should save the selected algorithm to redis" do experiment_algorithm = Split::Algorithms::Whiplash experiment.algorithm = experiment_algorithm experiment.save expect(Split::ExperimentCatalog.find("basket_text").algorithm).to eq(experiment_algorithm) end it "should handle having a start time stored as a string" do experiment_start_time = Time.parse("Sat Mar 03 14:01:03") expect(Time).to receive(:now).twice.and_return(experiment_start_time) experiment.save Split.redis.hset(:experiment_start_times, experiment.name, experiment_start_time.to_s) expect(Split::ExperimentCatalog.find("basket_text").start_time).to eq(experiment_start_time) end it "should handle not having a start time" do experiment_start_time = Time.parse("Sat Mar 03 14:01:03") expect(Time).to receive(:now).and_return(experiment_start_time) experiment.save Split.redis.hdel(:experiment_start_times, experiment.name) expect(Split::ExperimentCatalog.find("basket_text").start_time).to be_nil end it "should not create duplicates when saving multiple times" do experiment.save experiment.save expect(Split.redis.exists?("basket_text")).to be true expect(Split.redis.lrange("basket_text", 0, -1)).to eq(['{"Basket":1}', '{"Cart":1}']) end describe "new record?" do it "should know if it hasn't been saved yet" do expect(experiment.new_record?).to be_truthy end it "should know if it has been saved yet" do experiment.save expect(experiment.new_record?).to be_falsey end end describe "control" do it "should be the first alternative" do experiment.save expect(experiment.control.name).to eq("Basket") end end end describe "initialization" do it "should set the algorithm when passed as an option to the initializer" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash) expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash) end it "should be possible to make an experiment not resettable" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], resettable: false) expect(experiment.resettable).to be_falsey end context "from configuration" do let(:experiment_name) { :my_experiment } let(:experiments) do { experiment_name => { alternatives: ["Control Opt", "Alt one"] } } end before { Split.configuration.experiments = experiments } it "assigns default values to the experiment" do expect(Split::Experiment.new(experiment_name).resettable).to eq(true) end end end describe "persistent configuration" do it "should persist resettable in redis" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], resettable: false) experiment.save e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.resettable).to be_falsey end describe "#metadata" do let(:experiment) { Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash, metadata: meta) } let(:meta) { { a: "b" } } before do experiment.save end it "should delete the key when metadata is removed" do experiment.metadata = nil experiment.save expect(Split.redis.exists?(experiment.metadata_key)).to be_falsey end context "simple hash" do let(:meta) { { "basket" => "a", "cart" => "b" } } it "should persist metadata in redis" do e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.metadata).to eq(meta) end end context "nested hash" do let(:meta) { { "basket" => { "one" => "two" }, "cart" => "b" } } it "should persist metadata in redis" do e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.metadata).to eq(meta) end end end it "should persist algorithm in redis" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash) experiment.save e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.algorithm).to eq(Split::Algorithms::Whiplash) end it "should persist a new experiment in redis, that does not exist in the configuration file" do experiment = Split::Experiment.new("foobar", alternatives: ["tra", "la"], algorithm: Split::Algorithms::Whiplash) experiment.save e = Split::ExperimentCatalog.find("foobar") expect(e).to eq(experiment) expect(e.alternatives.collect { |a| a.name }).to eq(["tra", "la"]) end end describe "deleting" do it "should delete itself" do experiment = Split::Experiment.new("basket_text", alternatives: [ "Basket", "Cart"]) experiment.save experiment.delete expect(Split.redis.exists?("link_color")).to be false expect(Split::ExperimentCatalog.find("link_color")).to be_nil end it "should increment the version" do expect(experiment.version).to eq(0) experiment.delete expect(experiment.version).to eq(1) end it "should call the on_experiment_delete hook" do expect(Split.configuration.on_experiment_delete).to receive(:call) experiment.delete end it "should call the on_before_experiment_delete hook" do expect(Split.configuration.on_before_experiment_delete).to receive(:call) experiment.delete end it "should reset the start time if the experiment should be manually started" do Split.configuration.start_manually = true experiment.start experiment.delete expect(experiment.start_time).to be_nil end it "should default cohorting back to false" do experiment.disable_cohorting expect(experiment.cohorting_disabled?).to eq(true) experiment.delete expect(experiment.cohorting_disabled?).to eq(false) end end describe "winner" do it "should have no winner initially" do expect(experiment.winner).to be_nil end end describe "winner=" do it "should allow you to specify a winner" do experiment.save experiment.winner = "red" expect(experiment.winner.name).to eq("red") end it "should call the on_experiment_winner_choose hook" do expect(Split.configuration.on_experiment_winner_choose).to receive(:call) experiment.winner = "green" end context "when has_winner state is memoized" do before { expect(experiment).to_not have_winner } it "should keep has_winner state consistent" do experiment.winner = "red" expect(experiment).to have_winner end end end describe "reset_winner" do before { experiment.winner = "green" } it "should reset the winner" do experiment.reset_winner expect(experiment.winner).to be_nil end context "when has_winner state is memoized" do before { expect(experiment).to have_winner } it "should keep has_winner state consistent" do experiment.reset_winner expect(experiment).to_not have_winner end end end describe "has_winner?" do context "with winner" do before { experiment.winner = "red" } it "returns true" do expect(experiment).to have_winner end end context "without winner" do it "returns false" do expect(experiment).to_not have_winner end end it "memoizes has_winner state" do expect(experiment).to receive(:winner).once expect(experiment).to_not have_winner expect(experiment).to_not have_winner end end describe "reset" do let(:reset_manually) { false } before do allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually) experiment.save green.increment_participation green.increment_participation end it "should reset all alternatives" do experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation experiment.reset expect(green.participant_count).to eq(0) expect(green.completed_count).to eq(0) end it "should reset the winner" do experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation experiment.reset expect(experiment.winner).to be_nil end it "should increment the version" do expect(experiment.version).to eq(0) experiment.reset expect(experiment.version).to eq(1) end it "should call the on_experiment_reset hook" do expect(Split.configuration.on_experiment_reset).to receive(:call) experiment.reset end it "should call the on_before_experiment_reset hook" do expect(Split.configuration.on_before_experiment_reset).to receive(:call) experiment.reset end end describe "algorithm" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red", "green") } it "should use the default algorithm if none is specified" do expect(experiment.algorithm).to eq(Split.configuration.algorithm) end it "should use the user specified algorithm for this experiment if specified" do experiment.algorithm = Split::Algorithms::Whiplash expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash) end end describe "#next_alternative" do context "with multiple alternatives" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red", "green") } context "with winner" do it "should always return the winner" do green = Split::Alternative.new("green", "link_color") experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation expect(experiment.next_alternative.name).to eq("green") end end context "without winner" do it "should use the specified algorithm" do experiment.algorithm = Split::Algorithms::Whiplash expect(experiment.algorithm).to receive(:choose_alternative).and_return(Split::Alternative.new("green", "link_color")) expect(experiment.next_alternative.name).to eq("green") end end end context "with single alternative" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue") } it "should always return the only alternative" do expect(experiment.next_alternative.name).to eq("blue") expect(experiment.next_alternative.name).to eq("blue") end end end describe "#cohorting_disabled?" do it "returns false when nothing has been configured" do expect(experiment.cohorting_disabled?).to eq false end it "returns true when enable_cohorting is performed" do experiment.enable_cohorting expect(experiment.cohorting_disabled?).to eq false end it "returns false when nothing has been configured" do experiment.disable_cohorting expect(experiment.cohorting_disabled?).to eq true end end describe "changing an existing experiment" do def same_but_different_alternative Split::ExperimentCatalog.find_or_create("link_color", "blue", "yellow", "orange") end it "should reset an experiment if it is loaded with different alternatives" do experiment.save blue.participant_count = 5 same_experiment = same_but_different_alternative expect(same_experiment.alternatives.map(&:name)).to eq(["blue", "yellow", "orange"]) expect(blue.participant_count).to eq(0) end it "should only reset once" do experiment.save expect(experiment.version).to eq(0) same_experiment = same_but_different_alternative expect(same_experiment.version).to eq(1) same_experiment_again = same_but_different_alternative expect(same_experiment_again.version).to eq(1) end context "when metadata is changed" do it "should increase version" do experiment.save experiment.metadata = { "foo" => "bar" } expect { experiment.save }.to change { experiment.version }.by(1) end it "does not increase version" do experiment.metadata = nil experiment.save expect { experiment.save }.to change { experiment.version }.by(0) end end context "when experiment configuration is changed" do let(:reset_manually) { false } before do experiment.save allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually) green.increment_participation green.increment_participation experiment.set_alternatives_and_options(alternatives: %w(blue red green zip), goals: %w(purchase)) experiment.save end it "resets all alternatives" do expect(green.participant_count).to eq(0) expect(green.completed_count).to eq(0) end context "when reset_manually is set" do let(:reset_manually) { true } it "does not reset alternatives" do expect(green.participant_count).to eq(2) expect(green.completed_count).to eq(0) end end end end describe "alternatives passed as non-strings" do it "should throw an exception if an alternative is passed that is not a string" do expect { Split::ExperimentCatalog.find_or_create("link_color", :blue, :red) }.to raise_error(ArgumentError) expect { Split::ExperimentCatalog.find_or_create("link_enabled", true, false) }.to raise_error(ArgumentError) end end describe "specifying weights" do let(:experiment_with_weight) { Split::ExperimentCatalog.find_or_create("link_color", { "blue" => 1 }, { "red" => 2 }) } it "should work for a new experiment" do expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2]) end it "should work for an existing experiment" do experiment.save expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2]) end end describe "specifying goals" do let(:experiment) { new_experiment(["purchase"]) } context "saving experiment" do let(:same_but_different_goals) { Split::ExperimentCatalog.find_or_create({ "link_color" => ["purchase", "refund"] }, "blue", "red", "green") } before { experiment.save } it "can find existing experiment" do expect(Split::ExperimentCatalog.find("link_color").name).to eq("link_color") end it "should reset an experiment if it is loaded with different goals" do same_but_different_goals expect(Split::ExperimentCatalog.find("link_color").goals).to eq(["purchase", "refund"]) end end it "should have goals" do expect(experiment.goals).to eq(["purchase"]) end context "find or create experiment" do it "should have correct goals" do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") expect(experiment.goals).to eq(["purchase", "refund"]) experiment = Split::ExperimentCatalog.find_or_create("link_color3", "blue", "red", "green") expect(experiment.goals).to eq([]) end end end describe "beta probability calculation" do it "should return a hash with the probability of each alternative being the best" do experiment = Split::ExperimentCatalog.find_or_create("mathematicians", "bernoulli", "poisson", "lagrange") experiment.calc_winning_alternatives expect(experiment.alternative_probabilities).not_to be_nil end it "should return between 46% and 54% probability for an experiment with 2 alternatives and no data" do experiment = Split::ExperimentCatalog.find_or_create("scientists", "einstein", "bohr") experiment.calc_winning_alternatives expect(experiment.alternatives[0].p_winner).to be_within(0.04).of(0.50) end it "should calculate the probability of being the winning alternative separately for each goal", skip: true do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") goal1 = experiment.goals[0] goal2 = experiment.goals[1] experiment.alternatives.each do |alternative| alternative.participant_count = 50 alternative.set_completed_count(10, goal1) alternative.set_completed_count(15+rand(30), goal2) end experiment.calc_winning_alternatives alt = experiment.alternatives[0] p_goal1 = alt.p_winner(goal1) p_goal2 = alt.p_winner(goal2) expect(p_goal1).not_to be_within(0.04).of(p_goal2) end it "should return nil and not re-calculate probabilities if they have already been calculated today" do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") expect(experiment.calc_winning_alternatives).not_to be nil expect(experiment.calc_winning_alternatives).to be nil end end end <MSG> Experiment control test <DFF> @@ -26,6 +26,14 @@ describe Split::Experiment do Split::Experiment.find('basket_text').name.should eql('basket_text') end + describe 'control' do + it 'should be the first alternative' do + experiment = Split::Experiment.new('basket_text', 'Basket', "Cart") + experiment.save + experiment.control.name.should eql('Basket') + end + end + describe 'winner' do it "should have no winner initially" do experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')
8
Experiment control test
0
.rb
rb
mit
splitrb/split
10069741
<NME> experiment_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "time" describe Split::Experiment do def new_experiment(goals = []) Split::Experiment.new("link_color", alternatives: ["blue", "red", "green"], goals: goals) end def alternative(color) Split::Alternative.new(color, "link_color") end let(:experiment) { new_experiment } let(:blue) { alternative("blue") } let(:green) { alternative("green") } context "with an experiment" do let(:experiment) { Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"]) } it "should have a name" do expect(experiment.name).to eq("basket_text") end Split::Experiment.find('basket_text').name.should eql('basket_text') end describe 'winner' do it "should have no winner initially" do experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red') experiment.save expect(Split.redis.exists?("basket_text")).to be true end it "should save the start time to redis" do experiment_start_time = Time.at(1372167761) expect(Time).to receive(:now).and_return(experiment_start_time) experiment.save expect(Split::ExperimentCatalog.find("basket_text").start_time).to eq(experiment_start_time) end it "should not save the start time to redis when start_manually is enabled" do expect(Split.configuration).to receive(:start_manually).and_return(true) experiment.save expect(Split::ExperimentCatalog.find("basket_text").start_time).to be_nil end it "should save the selected algorithm to redis" do experiment_algorithm = Split::Algorithms::Whiplash experiment.algorithm = experiment_algorithm experiment.save expect(Split::ExperimentCatalog.find("basket_text").algorithm).to eq(experiment_algorithm) end it "should handle having a start time stored as a string" do experiment_start_time = Time.parse("Sat Mar 03 14:01:03") expect(Time).to receive(:now).twice.and_return(experiment_start_time) experiment.save Split.redis.hset(:experiment_start_times, experiment.name, experiment_start_time.to_s) expect(Split::ExperimentCatalog.find("basket_text").start_time).to eq(experiment_start_time) end it "should handle not having a start time" do experiment_start_time = Time.parse("Sat Mar 03 14:01:03") expect(Time).to receive(:now).and_return(experiment_start_time) experiment.save Split.redis.hdel(:experiment_start_times, experiment.name) expect(Split::ExperimentCatalog.find("basket_text").start_time).to be_nil end it "should not create duplicates when saving multiple times" do experiment.save experiment.save expect(Split.redis.exists?("basket_text")).to be true expect(Split.redis.lrange("basket_text", 0, -1)).to eq(['{"Basket":1}', '{"Cart":1}']) end describe "new record?" do it "should know if it hasn't been saved yet" do expect(experiment.new_record?).to be_truthy end it "should know if it has been saved yet" do experiment.save expect(experiment.new_record?).to be_falsey end end describe "control" do it "should be the first alternative" do experiment.save expect(experiment.control.name).to eq("Basket") end end end describe "initialization" do it "should set the algorithm when passed as an option to the initializer" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash) expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash) end it "should be possible to make an experiment not resettable" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], resettable: false) expect(experiment.resettable).to be_falsey end context "from configuration" do let(:experiment_name) { :my_experiment } let(:experiments) do { experiment_name => { alternatives: ["Control Opt", "Alt one"] } } end before { Split.configuration.experiments = experiments } it "assigns default values to the experiment" do expect(Split::Experiment.new(experiment_name).resettable).to eq(true) end end end describe "persistent configuration" do it "should persist resettable in redis" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], resettable: false) experiment.save e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.resettable).to be_falsey end describe "#metadata" do let(:experiment) { Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash, metadata: meta) } let(:meta) { { a: "b" } } before do experiment.save end it "should delete the key when metadata is removed" do experiment.metadata = nil experiment.save expect(Split.redis.exists?(experiment.metadata_key)).to be_falsey end context "simple hash" do let(:meta) { { "basket" => "a", "cart" => "b" } } it "should persist metadata in redis" do e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.metadata).to eq(meta) end end context "nested hash" do let(:meta) { { "basket" => { "one" => "two" }, "cart" => "b" } } it "should persist metadata in redis" do e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.metadata).to eq(meta) end end end it "should persist algorithm in redis" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash) experiment.save e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.algorithm).to eq(Split::Algorithms::Whiplash) end it "should persist a new experiment in redis, that does not exist in the configuration file" do experiment = Split::Experiment.new("foobar", alternatives: ["tra", "la"], algorithm: Split::Algorithms::Whiplash) experiment.save e = Split::ExperimentCatalog.find("foobar") expect(e).to eq(experiment) expect(e.alternatives.collect { |a| a.name }).to eq(["tra", "la"]) end end describe "deleting" do it "should delete itself" do experiment = Split::Experiment.new("basket_text", alternatives: [ "Basket", "Cart"]) experiment.save experiment.delete expect(Split.redis.exists?("link_color")).to be false expect(Split::ExperimentCatalog.find("link_color")).to be_nil end it "should increment the version" do expect(experiment.version).to eq(0) experiment.delete expect(experiment.version).to eq(1) end it "should call the on_experiment_delete hook" do expect(Split.configuration.on_experiment_delete).to receive(:call) experiment.delete end it "should call the on_before_experiment_delete hook" do expect(Split.configuration.on_before_experiment_delete).to receive(:call) experiment.delete end it "should reset the start time if the experiment should be manually started" do Split.configuration.start_manually = true experiment.start experiment.delete expect(experiment.start_time).to be_nil end it "should default cohorting back to false" do experiment.disable_cohorting expect(experiment.cohorting_disabled?).to eq(true) experiment.delete expect(experiment.cohorting_disabled?).to eq(false) end end describe "winner" do it "should have no winner initially" do expect(experiment.winner).to be_nil end end describe "winner=" do it "should allow you to specify a winner" do experiment.save experiment.winner = "red" expect(experiment.winner.name).to eq("red") end it "should call the on_experiment_winner_choose hook" do expect(Split.configuration.on_experiment_winner_choose).to receive(:call) experiment.winner = "green" end context "when has_winner state is memoized" do before { expect(experiment).to_not have_winner } it "should keep has_winner state consistent" do experiment.winner = "red" expect(experiment).to have_winner end end end describe "reset_winner" do before { experiment.winner = "green" } it "should reset the winner" do experiment.reset_winner expect(experiment.winner).to be_nil end context "when has_winner state is memoized" do before { expect(experiment).to have_winner } it "should keep has_winner state consistent" do experiment.reset_winner expect(experiment).to_not have_winner end end end describe "has_winner?" do context "with winner" do before { experiment.winner = "red" } it "returns true" do expect(experiment).to have_winner end end context "without winner" do it "returns false" do expect(experiment).to_not have_winner end end it "memoizes has_winner state" do expect(experiment).to receive(:winner).once expect(experiment).to_not have_winner expect(experiment).to_not have_winner end end describe "reset" do let(:reset_manually) { false } before do allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually) experiment.save green.increment_participation green.increment_participation end it "should reset all alternatives" do experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation experiment.reset expect(green.participant_count).to eq(0) expect(green.completed_count).to eq(0) end it "should reset the winner" do experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation experiment.reset expect(experiment.winner).to be_nil end it "should increment the version" do expect(experiment.version).to eq(0) experiment.reset expect(experiment.version).to eq(1) end it "should call the on_experiment_reset hook" do expect(Split.configuration.on_experiment_reset).to receive(:call) experiment.reset end it "should call the on_before_experiment_reset hook" do expect(Split.configuration.on_before_experiment_reset).to receive(:call) experiment.reset end end describe "algorithm" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red", "green") } it "should use the default algorithm if none is specified" do expect(experiment.algorithm).to eq(Split.configuration.algorithm) end it "should use the user specified algorithm for this experiment if specified" do experiment.algorithm = Split::Algorithms::Whiplash expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash) end end describe "#next_alternative" do context "with multiple alternatives" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red", "green") } context "with winner" do it "should always return the winner" do green = Split::Alternative.new("green", "link_color") experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation expect(experiment.next_alternative.name).to eq("green") end end context "without winner" do it "should use the specified algorithm" do experiment.algorithm = Split::Algorithms::Whiplash expect(experiment.algorithm).to receive(:choose_alternative).and_return(Split::Alternative.new("green", "link_color")) expect(experiment.next_alternative.name).to eq("green") end end end context "with single alternative" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue") } it "should always return the only alternative" do expect(experiment.next_alternative.name).to eq("blue") expect(experiment.next_alternative.name).to eq("blue") end end end describe "#cohorting_disabled?" do it "returns false when nothing has been configured" do expect(experiment.cohorting_disabled?).to eq false end it "returns true when enable_cohorting is performed" do experiment.enable_cohorting expect(experiment.cohorting_disabled?).to eq false end it "returns false when nothing has been configured" do experiment.disable_cohorting expect(experiment.cohorting_disabled?).to eq true end end describe "changing an existing experiment" do def same_but_different_alternative Split::ExperimentCatalog.find_or_create("link_color", "blue", "yellow", "orange") end it "should reset an experiment if it is loaded with different alternatives" do experiment.save blue.participant_count = 5 same_experiment = same_but_different_alternative expect(same_experiment.alternatives.map(&:name)).to eq(["blue", "yellow", "orange"]) expect(blue.participant_count).to eq(0) end it "should only reset once" do experiment.save expect(experiment.version).to eq(0) same_experiment = same_but_different_alternative expect(same_experiment.version).to eq(1) same_experiment_again = same_but_different_alternative expect(same_experiment_again.version).to eq(1) end context "when metadata is changed" do it "should increase version" do experiment.save experiment.metadata = { "foo" => "bar" } expect { experiment.save }.to change { experiment.version }.by(1) end it "does not increase version" do experiment.metadata = nil experiment.save expect { experiment.save }.to change { experiment.version }.by(0) end end context "when experiment configuration is changed" do let(:reset_manually) { false } before do experiment.save allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually) green.increment_participation green.increment_participation experiment.set_alternatives_and_options(alternatives: %w(blue red green zip), goals: %w(purchase)) experiment.save end it "resets all alternatives" do expect(green.participant_count).to eq(0) expect(green.completed_count).to eq(0) end context "when reset_manually is set" do let(:reset_manually) { true } it "does not reset alternatives" do expect(green.participant_count).to eq(2) expect(green.completed_count).to eq(0) end end end end describe "alternatives passed as non-strings" do it "should throw an exception if an alternative is passed that is not a string" do expect { Split::ExperimentCatalog.find_or_create("link_color", :blue, :red) }.to raise_error(ArgumentError) expect { Split::ExperimentCatalog.find_or_create("link_enabled", true, false) }.to raise_error(ArgumentError) end end describe "specifying weights" do let(:experiment_with_weight) { Split::ExperimentCatalog.find_or_create("link_color", { "blue" => 1 }, { "red" => 2 }) } it "should work for a new experiment" do expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2]) end it "should work for an existing experiment" do experiment.save expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2]) end end describe "specifying goals" do let(:experiment) { new_experiment(["purchase"]) } context "saving experiment" do let(:same_but_different_goals) { Split::ExperimentCatalog.find_or_create({ "link_color" => ["purchase", "refund"] }, "blue", "red", "green") } before { experiment.save } it "can find existing experiment" do expect(Split::ExperimentCatalog.find("link_color").name).to eq("link_color") end it "should reset an experiment if it is loaded with different goals" do same_but_different_goals expect(Split::ExperimentCatalog.find("link_color").goals).to eq(["purchase", "refund"]) end end it "should have goals" do expect(experiment.goals).to eq(["purchase"]) end context "find or create experiment" do it "should have correct goals" do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") expect(experiment.goals).to eq(["purchase", "refund"]) experiment = Split::ExperimentCatalog.find_or_create("link_color3", "blue", "red", "green") expect(experiment.goals).to eq([]) end end end describe "beta probability calculation" do it "should return a hash with the probability of each alternative being the best" do experiment = Split::ExperimentCatalog.find_or_create("mathematicians", "bernoulli", "poisson", "lagrange") experiment.calc_winning_alternatives expect(experiment.alternative_probabilities).not_to be_nil end it "should return between 46% and 54% probability for an experiment with 2 alternatives and no data" do experiment = Split::ExperimentCatalog.find_or_create("scientists", "einstein", "bohr") experiment.calc_winning_alternatives expect(experiment.alternatives[0].p_winner).to be_within(0.04).of(0.50) end it "should calculate the probability of being the winning alternative separately for each goal", skip: true do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") goal1 = experiment.goals[0] goal2 = experiment.goals[1] experiment.alternatives.each do |alternative| alternative.participant_count = 50 alternative.set_completed_count(10, goal1) alternative.set_completed_count(15+rand(30), goal2) end experiment.calc_winning_alternatives alt = experiment.alternatives[0] p_goal1 = alt.p_winner(goal1) p_goal2 = alt.p_winner(goal2) expect(p_goal1).not_to be_within(0.04).of(p_goal2) end it "should return nil and not re-calculate probabilities if they have already been calculated today" do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") expect(experiment.calc_winning_alternatives).not_to be nil expect(experiment.calc_winning_alternatives).to be nil end end end <MSG> Experiment control test <DFF> @@ -26,6 +26,14 @@ describe Split::Experiment do Split::Experiment.find('basket_text').name.should eql('basket_text') end + describe 'control' do + it 'should be the first alternative' do + experiment = Split::Experiment.new('basket_text', 'Basket', "Cart") + experiment.save + experiment.control.name.should eql('Basket') + end + end + describe 'winner' do it "should have no winner initially" do experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')
8
Experiment control test
0
.rb
rb
mit
splitrb/split
10069742
<NME> experiment_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "time" describe Split::Experiment do def new_experiment(goals = []) Split::Experiment.new("link_color", alternatives: ["blue", "red", "green"], goals: goals) end def alternative(color) Split::Alternative.new(color, "link_color") end let(:experiment) { new_experiment } let(:blue) { alternative("blue") } let(:green) { alternative("green") } context "with an experiment" do let(:experiment) { Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"]) } it "should have a name" do expect(experiment.name).to eq("basket_text") end Split::Experiment.find('basket_text').name.should eql('basket_text') end describe 'winner' do it "should have no winner initially" do experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red') experiment.save expect(Split.redis.exists?("basket_text")).to be true end it "should save the start time to redis" do experiment_start_time = Time.at(1372167761) expect(Time).to receive(:now).and_return(experiment_start_time) experiment.save expect(Split::ExperimentCatalog.find("basket_text").start_time).to eq(experiment_start_time) end it "should not save the start time to redis when start_manually is enabled" do expect(Split.configuration).to receive(:start_manually).and_return(true) experiment.save expect(Split::ExperimentCatalog.find("basket_text").start_time).to be_nil end it "should save the selected algorithm to redis" do experiment_algorithm = Split::Algorithms::Whiplash experiment.algorithm = experiment_algorithm experiment.save expect(Split::ExperimentCatalog.find("basket_text").algorithm).to eq(experiment_algorithm) end it "should handle having a start time stored as a string" do experiment_start_time = Time.parse("Sat Mar 03 14:01:03") expect(Time).to receive(:now).twice.and_return(experiment_start_time) experiment.save Split.redis.hset(:experiment_start_times, experiment.name, experiment_start_time.to_s) expect(Split::ExperimentCatalog.find("basket_text").start_time).to eq(experiment_start_time) end it "should handle not having a start time" do experiment_start_time = Time.parse("Sat Mar 03 14:01:03") expect(Time).to receive(:now).and_return(experiment_start_time) experiment.save Split.redis.hdel(:experiment_start_times, experiment.name) expect(Split::ExperimentCatalog.find("basket_text").start_time).to be_nil end it "should not create duplicates when saving multiple times" do experiment.save experiment.save expect(Split.redis.exists?("basket_text")).to be true expect(Split.redis.lrange("basket_text", 0, -1)).to eq(['{"Basket":1}', '{"Cart":1}']) end describe "new record?" do it "should know if it hasn't been saved yet" do expect(experiment.new_record?).to be_truthy end it "should know if it has been saved yet" do experiment.save expect(experiment.new_record?).to be_falsey end end describe "control" do it "should be the first alternative" do experiment.save expect(experiment.control.name).to eq("Basket") end end end describe "initialization" do it "should set the algorithm when passed as an option to the initializer" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash) expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash) end it "should be possible to make an experiment not resettable" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], resettable: false) expect(experiment.resettable).to be_falsey end context "from configuration" do let(:experiment_name) { :my_experiment } let(:experiments) do { experiment_name => { alternatives: ["Control Opt", "Alt one"] } } end before { Split.configuration.experiments = experiments } it "assigns default values to the experiment" do expect(Split::Experiment.new(experiment_name).resettable).to eq(true) end end end describe "persistent configuration" do it "should persist resettable in redis" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], resettable: false) experiment.save e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.resettable).to be_falsey end describe "#metadata" do let(:experiment) { Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash, metadata: meta) } let(:meta) { { a: "b" } } before do experiment.save end it "should delete the key when metadata is removed" do experiment.metadata = nil experiment.save expect(Split.redis.exists?(experiment.metadata_key)).to be_falsey end context "simple hash" do let(:meta) { { "basket" => "a", "cart" => "b" } } it "should persist metadata in redis" do e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.metadata).to eq(meta) end end context "nested hash" do let(:meta) { { "basket" => { "one" => "two" }, "cart" => "b" } } it "should persist metadata in redis" do e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.metadata).to eq(meta) end end end it "should persist algorithm in redis" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash) experiment.save e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.algorithm).to eq(Split::Algorithms::Whiplash) end it "should persist a new experiment in redis, that does not exist in the configuration file" do experiment = Split::Experiment.new("foobar", alternatives: ["tra", "la"], algorithm: Split::Algorithms::Whiplash) experiment.save e = Split::ExperimentCatalog.find("foobar") expect(e).to eq(experiment) expect(e.alternatives.collect { |a| a.name }).to eq(["tra", "la"]) end end describe "deleting" do it "should delete itself" do experiment = Split::Experiment.new("basket_text", alternatives: [ "Basket", "Cart"]) experiment.save experiment.delete expect(Split.redis.exists?("link_color")).to be false expect(Split::ExperimentCatalog.find("link_color")).to be_nil end it "should increment the version" do expect(experiment.version).to eq(0) experiment.delete expect(experiment.version).to eq(1) end it "should call the on_experiment_delete hook" do expect(Split.configuration.on_experiment_delete).to receive(:call) experiment.delete end it "should call the on_before_experiment_delete hook" do expect(Split.configuration.on_before_experiment_delete).to receive(:call) experiment.delete end it "should reset the start time if the experiment should be manually started" do Split.configuration.start_manually = true experiment.start experiment.delete expect(experiment.start_time).to be_nil end it "should default cohorting back to false" do experiment.disable_cohorting expect(experiment.cohorting_disabled?).to eq(true) experiment.delete expect(experiment.cohorting_disabled?).to eq(false) end end describe "winner" do it "should have no winner initially" do expect(experiment.winner).to be_nil end end describe "winner=" do it "should allow you to specify a winner" do experiment.save experiment.winner = "red" expect(experiment.winner.name).to eq("red") end it "should call the on_experiment_winner_choose hook" do expect(Split.configuration.on_experiment_winner_choose).to receive(:call) experiment.winner = "green" end context "when has_winner state is memoized" do before { expect(experiment).to_not have_winner } it "should keep has_winner state consistent" do experiment.winner = "red" expect(experiment).to have_winner end end end describe "reset_winner" do before { experiment.winner = "green" } it "should reset the winner" do experiment.reset_winner expect(experiment.winner).to be_nil end context "when has_winner state is memoized" do before { expect(experiment).to have_winner } it "should keep has_winner state consistent" do experiment.reset_winner expect(experiment).to_not have_winner end end end describe "has_winner?" do context "with winner" do before { experiment.winner = "red" } it "returns true" do expect(experiment).to have_winner end end context "without winner" do it "returns false" do expect(experiment).to_not have_winner end end it "memoizes has_winner state" do expect(experiment).to receive(:winner).once expect(experiment).to_not have_winner expect(experiment).to_not have_winner end end describe "reset" do let(:reset_manually) { false } before do allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually) experiment.save green.increment_participation green.increment_participation end it "should reset all alternatives" do experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation experiment.reset expect(green.participant_count).to eq(0) expect(green.completed_count).to eq(0) end it "should reset the winner" do experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation experiment.reset expect(experiment.winner).to be_nil end it "should increment the version" do expect(experiment.version).to eq(0) experiment.reset expect(experiment.version).to eq(1) end it "should call the on_experiment_reset hook" do expect(Split.configuration.on_experiment_reset).to receive(:call) experiment.reset end it "should call the on_before_experiment_reset hook" do expect(Split.configuration.on_before_experiment_reset).to receive(:call) experiment.reset end end describe "algorithm" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red", "green") } it "should use the default algorithm if none is specified" do expect(experiment.algorithm).to eq(Split.configuration.algorithm) end it "should use the user specified algorithm for this experiment if specified" do experiment.algorithm = Split::Algorithms::Whiplash expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash) end end describe "#next_alternative" do context "with multiple alternatives" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red", "green") } context "with winner" do it "should always return the winner" do green = Split::Alternative.new("green", "link_color") experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation expect(experiment.next_alternative.name).to eq("green") end end context "without winner" do it "should use the specified algorithm" do experiment.algorithm = Split::Algorithms::Whiplash expect(experiment.algorithm).to receive(:choose_alternative).and_return(Split::Alternative.new("green", "link_color")) expect(experiment.next_alternative.name).to eq("green") end end end context "with single alternative" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue") } it "should always return the only alternative" do expect(experiment.next_alternative.name).to eq("blue") expect(experiment.next_alternative.name).to eq("blue") end end end describe "#cohorting_disabled?" do it "returns false when nothing has been configured" do expect(experiment.cohorting_disabled?).to eq false end it "returns true when enable_cohorting is performed" do experiment.enable_cohorting expect(experiment.cohorting_disabled?).to eq false end it "returns false when nothing has been configured" do experiment.disable_cohorting expect(experiment.cohorting_disabled?).to eq true end end describe "changing an existing experiment" do def same_but_different_alternative Split::ExperimentCatalog.find_or_create("link_color", "blue", "yellow", "orange") end it "should reset an experiment if it is loaded with different alternatives" do experiment.save blue.participant_count = 5 same_experiment = same_but_different_alternative expect(same_experiment.alternatives.map(&:name)).to eq(["blue", "yellow", "orange"]) expect(blue.participant_count).to eq(0) end it "should only reset once" do experiment.save expect(experiment.version).to eq(0) same_experiment = same_but_different_alternative expect(same_experiment.version).to eq(1) same_experiment_again = same_but_different_alternative expect(same_experiment_again.version).to eq(1) end context "when metadata is changed" do it "should increase version" do experiment.save experiment.metadata = { "foo" => "bar" } expect { experiment.save }.to change { experiment.version }.by(1) end it "does not increase version" do experiment.metadata = nil experiment.save expect { experiment.save }.to change { experiment.version }.by(0) end end context "when experiment configuration is changed" do let(:reset_manually) { false } before do experiment.save allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually) green.increment_participation green.increment_participation experiment.set_alternatives_and_options(alternatives: %w(blue red green zip), goals: %w(purchase)) experiment.save end it "resets all alternatives" do expect(green.participant_count).to eq(0) expect(green.completed_count).to eq(0) end context "when reset_manually is set" do let(:reset_manually) { true } it "does not reset alternatives" do expect(green.participant_count).to eq(2) expect(green.completed_count).to eq(0) end end end end describe "alternatives passed as non-strings" do it "should throw an exception if an alternative is passed that is not a string" do expect { Split::ExperimentCatalog.find_or_create("link_color", :blue, :red) }.to raise_error(ArgumentError) expect { Split::ExperimentCatalog.find_or_create("link_enabled", true, false) }.to raise_error(ArgumentError) end end describe "specifying weights" do let(:experiment_with_weight) { Split::ExperimentCatalog.find_or_create("link_color", { "blue" => 1 }, { "red" => 2 }) } it "should work for a new experiment" do expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2]) end it "should work for an existing experiment" do experiment.save expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2]) end end describe "specifying goals" do let(:experiment) { new_experiment(["purchase"]) } context "saving experiment" do let(:same_but_different_goals) { Split::ExperimentCatalog.find_or_create({ "link_color" => ["purchase", "refund"] }, "blue", "red", "green") } before { experiment.save } it "can find existing experiment" do expect(Split::ExperimentCatalog.find("link_color").name).to eq("link_color") end it "should reset an experiment if it is loaded with different goals" do same_but_different_goals expect(Split::ExperimentCatalog.find("link_color").goals).to eq(["purchase", "refund"]) end end it "should have goals" do expect(experiment.goals).to eq(["purchase"]) end context "find or create experiment" do it "should have correct goals" do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") expect(experiment.goals).to eq(["purchase", "refund"]) experiment = Split::ExperimentCatalog.find_or_create("link_color3", "blue", "red", "green") expect(experiment.goals).to eq([]) end end end describe "beta probability calculation" do it "should return a hash with the probability of each alternative being the best" do experiment = Split::ExperimentCatalog.find_or_create("mathematicians", "bernoulli", "poisson", "lagrange") experiment.calc_winning_alternatives expect(experiment.alternative_probabilities).not_to be_nil end it "should return between 46% and 54% probability for an experiment with 2 alternatives and no data" do experiment = Split::ExperimentCatalog.find_or_create("scientists", "einstein", "bohr") experiment.calc_winning_alternatives expect(experiment.alternatives[0].p_winner).to be_within(0.04).of(0.50) end it "should calculate the probability of being the winning alternative separately for each goal", skip: true do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") goal1 = experiment.goals[0] goal2 = experiment.goals[1] experiment.alternatives.each do |alternative| alternative.participant_count = 50 alternative.set_completed_count(10, goal1) alternative.set_completed_count(15+rand(30), goal2) end experiment.calc_winning_alternatives alt = experiment.alternatives[0] p_goal1 = alt.p_winner(goal1) p_goal2 = alt.p_winner(goal2) expect(p_goal1).not_to be_within(0.04).of(p_goal2) end it "should return nil and not re-calculate probabilities if they have already been calculated today" do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") expect(experiment.calc_winning_alternatives).not_to be nil expect(experiment.calc_winning_alternatives).to be nil end end end <MSG> Experiment control test <DFF> @@ -26,6 +26,14 @@ describe Split::Experiment do Split::Experiment.find('basket_text').name.should eql('basket_text') end + describe 'control' do + it 'should be the first alternative' do + experiment = Split::Experiment.new('basket_text', 'Basket', "Cart") + experiment.save + experiment.control.name.should eql('Basket') + end + end + describe 'winner' do it "should have no winner initially" do experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')
8
Experiment control test
0
.rb
rb
mit
splitrb/split
10069743
<NME> zscore.rb <BEF> # frozen_string_literal: true module Split class Zscore include Math def self.calculate(p1, n1, p2, n2) # p_1 = Pa = proportion of users who converted within the experiment split (conversion rate) # p_2 = Pc = proportion of users who converted within the control split (conversion rate) # n_1 = Na = the number of impressions within the experiment split # n_2 = Nc = the number of impressions within the control split # s_1 = SEa = standard error of p_1, the estiamte of the mean # s_2 = SEc = standard error of p_2, the estimate of the control # s_p = SEp = standard error of p_1 - p_2, assuming a pooled variance # s_unp = SEunp = standard error of p_1 - p_2, assuming unpooled variance p_1 = p1.to_f p_2 = p2.to_f n_1 = n1.to_f n_2 = n2.to_f # Perform checks on data to make sure we can validly run our confidence tests if n_1 < 30 || n_2 < 30 error = "Needs 30+ participants." return error elsif p_1 * n_1 < 5 || p_2 * n_2 < 5 error = "Needs 5+ conversions." return error end # Formula for standard error: root(pq/n) = root(p(1-p)/n) s_1 = Math.sqrt((p_1)*(1-p_1)/(n_1)) s_2 = Math.sqrt((p_2)*(1-p_2)/(n_2)) # Formula for pooled error of the difference of the means: root(π*(1-π)*(1/na+1/nc) # π = (xa + xc) / (na + nc) pi = (p_1*n_1 + p_2*n_2)/(n_1 + n_2) s_p = Math.sqrt(pi*(1-pi)*(1/n_1 + 1/n_2)) # Formula for unpooled error of the difference of the means: root(sa**2/na + sc**2/nc) s_unp = Math.sqrt(s_1**2 + s_2**2) # Boolean variable decides whether we can pool our variances pooled = s_1/s_2 < 2 && s_2/s_1 < 2 # Assign standard error either the pooled or unpooled variance se = pooled ? s_p : s_unp # Calculate z-score z_score = (p_1 - p_2)/(se) return z_score end end end <MSG> Fix Layout/EmptyLinesAroundMethodBody <DFF> @@ -51,7 +51,6 @@ module Split z_score = (p_1 - p_2)/(se) return z_score - end end end
0
Fix Layout/EmptyLinesAroundMethodBody
1
.rb
rb
mit
splitrb/split
10069744
<NME> zscore.rb <BEF> # frozen_string_literal: true module Split class Zscore include Math def self.calculate(p1, n1, p2, n2) # p_1 = Pa = proportion of users who converted within the experiment split (conversion rate) # p_2 = Pc = proportion of users who converted within the control split (conversion rate) # n_1 = Na = the number of impressions within the experiment split # n_2 = Nc = the number of impressions within the control split # s_1 = SEa = standard error of p_1, the estiamte of the mean # s_2 = SEc = standard error of p_2, the estimate of the control # s_p = SEp = standard error of p_1 - p_2, assuming a pooled variance # s_unp = SEunp = standard error of p_1 - p_2, assuming unpooled variance p_1 = p1.to_f p_2 = p2.to_f n_1 = n1.to_f n_2 = n2.to_f # Perform checks on data to make sure we can validly run our confidence tests if n_1 < 30 || n_2 < 30 error = "Needs 30+ participants." return error elsif p_1 * n_1 < 5 || p_2 * n_2 < 5 error = "Needs 5+ conversions." return error end # Formula for standard error: root(pq/n) = root(p(1-p)/n) s_1 = Math.sqrt((p_1)*(1-p_1)/(n_1)) s_2 = Math.sqrt((p_2)*(1-p_2)/(n_2)) # Formula for pooled error of the difference of the means: root(π*(1-π)*(1/na+1/nc) # π = (xa + xc) / (na + nc) pi = (p_1*n_1 + p_2*n_2)/(n_1 + n_2) s_p = Math.sqrt(pi*(1-pi)*(1/n_1 + 1/n_2)) # Formula for unpooled error of the difference of the means: root(sa**2/na + sc**2/nc) s_unp = Math.sqrt(s_1**2 + s_2**2) # Boolean variable decides whether we can pool our variances pooled = s_1/s_2 < 2 && s_2/s_1 < 2 # Assign standard error either the pooled or unpooled variance se = pooled ? s_p : s_unp # Calculate z-score z_score = (p_1 - p_2)/(se) return z_score end end end <MSG> Fix Layout/EmptyLinesAroundMethodBody <DFF> @@ -51,7 +51,6 @@ module Split z_score = (p_1 - p_2)/(se) return z_score - end end end
0
Fix Layout/EmptyLinesAroundMethodBody
1
.rb
rb
mit
splitrb/split
10069745
<NME> zscore.rb <BEF> # frozen_string_literal: true module Split class Zscore include Math def self.calculate(p1, n1, p2, n2) # p_1 = Pa = proportion of users who converted within the experiment split (conversion rate) # p_2 = Pc = proportion of users who converted within the control split (conversion rate) # n_1 = Na = the number of impressions within the experiment split # n_2 = Nc = the number of impressions within the control split # s_1 = SEa = standard error of p_1, the estiamte of the mean # s_2 = SEc = standard error of p_2, the estimate of the control # s_p = SEp = standard error of p_1 - p_2, assuming a pooled variance # s_unp = SEunp = standard error of p_1 - p_2, assuming unpooled variance p_1 = p1.to_f p_2 = p2.to_f n_1 = n1.to_f n_2 = n2.to_f # Perform checks on data to make sure we can validly run our confidence tests if n_1 < 30 || n_2 < 30 error = "Needs 30+ participants." return error elsif p_1 * n_1 < 5 || p_2 * n_2 < 5 error = "Needs 5+ conversions." return error end # Formula for standard error: root(pq/n) = root(p(1-p)/n) s_1 = Math.sqrt((p_1)*(1-p_1)/(n_1)) s_2 = Math.sqrt((p_2)*(1-p_2)/(n_2)) # Formula for pooled error of the difference of the means: root(π*(1-π)*(1/na+1/nc) # π = (xa + xc) / (na + nc) pi = (p_1*n_1 + p_2*n_2)/(n_1 + n_2) s_p = Math.sqrt(pi*(1-pi)*(1/n_1 + 1/n_2)) # Formula for unpooled error of the difference of the means: root(sa**2/na + sc**2/nc) s_unp = Math.sqrt(s_1**2 + s_2**2) # Boolean variable decides whether we can pool our variances pooled = s_1/s_2 < 2 && s_2/s_1 < 2 # Assign standard error either the pooled or unpooled variance se = pooled ? s_p : s_unp # Calculate z-score z_score = (p_1 - p_2)/(se) return z_score end end end <MSG> Fix Layout/EmptyLinesAroundMethodBody <DFF> @@ -51,7 +51,6 @@ module Split z_score = (p_1 - p_2)/(se) return z_score - end end end
0
Fix Layout/EmptyLinesAroundMethodBody
1
.rb
rb
mit
splitrb/split
10069746
<NME> configuration_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" describe Split::Configuration do before(:each) { @config = Split::Configuration.new } it "should provide a default value for ignore_ip_addresses" do expect(@config.ignore_ip_addresses).to eq([]) end it "should provide default values for db failover" do expect(@config.db_failover).to be_falsey expect(@config.db_failover_on_db_error).to be_a Proc end it "should not allow multiple experiments by default" do expect(@config.allow_multiple_experiments).to be_falsey end it "should be enabled by default" do expect(@config.enabled).to be_truthy end it "disabled is the opposite of enabled" do @config.enabled = false expect(@config.disabled?).to be_truthy end it "should not store the overridden test group per default" do expect(@config.store_override).to be_falsey end it "should provide a default pattern for robots" do %w[Baidu Gigabot Googlebot libwww-perl lwp-trivial msnbot SiteUptime Slurp WordPress ZIBB ZyBorg YandexBot AdsBot-Google Wget curl bitlybot facebookexternalhit spider].each do |robot| expect(@config.robot_regex).to match(robot) end expect(@config.robot_regex).to match("EventMachine HttpClient") expect(@config.robot_regex).to match("libwww-perl/5.836") expect(@config.robot_regex).to match("Pingdom.com_bot_version_1.4_(http://www.pingdom.com)") expect(@config.robot_regex).to match(" - ") end it "should accept real UAs with the robot regexp" do expect(@config.robot_regex).not_to match("Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.4) Gecko/20091017 SeaMonkey/2.0") expect(@config.robot_regex).not_to match("Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; F-6.0SP2-20041109; .NET CLR 2.0.50727; .NET CLR 3.0.04506.648; .NET CLR 3.5.21022; .NET CLR 1.1.4322; InfoPath.3)") end it "should allow adding a bot to the bot list" do @config.bots["newbot"] = "An amazing test bot" expect(@config.robot_regex).to match("newbot") end it "should use the session adapter for persistence by default" do expect(@config.persistence).to eq(Split::Persistence::SessionAdapter) end it "should load a metric" do @config.experiments = { my_experiment: { alternatives: ["control_opt", "other_opt"], metric: :my_metric } } expect(@config.metrics).not_to be_nil expect(@config.metrics.keys).to eq([:my_metric]) end it "should allow loading of experiment using experment_for" do @config.experiments = { my_experiment: { alternatives: ["control_opt", "other_opt"], metric: :my_metric } } expect(@config.experiment_for(:my_experiment)).to eq({ alternatives: ["control_opt", ["other_opt"]] }) end context "when experiments are defined via YAML" do context "as strings" do context "in a basic configuration" do before do experiments_yaml = <<-eos my_experiment: alternatives: - Control Opt - Alt One - Alt Two resettable: false eos @config.experiments = YAML.load(experiments_yaml) end it "should normalize experiments" do expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: ["Control Opt", ["Alt One", "Alt Two"]] } }) end end context "in a configuration with metadata" do before do experiments_yaml = <<-eos my_experiment: alternatives: - name: Control Opt percent: 67 - name: Alt One percent: 10 - name: Alt Two percent: 23 metadata: Control Opt: text: 'Control Option' Alt One: text: 'Alternative One' Alt Two: text: 'Alternative Two' resettable: false eos @config.experiments = YAML.load(experiments_yaml) end it "should have metadata on the experiment" do meta = @config.normalized_experiments[:my_experiment][:metadata] expect(meta).to_not be nil expect(meta["Control Opt"]["text"]).to eq("Control Option") end end context "in a complex configuration" do before do experiments_yaml = <<-eos my_experiment: alternatives: - name: Control Opt percent: 67 - name: Alt One percent: 10 - name: Alt Two percent: 23 resettable: false metric: my_metric another_experiment: alternatives: - a - b eos @config.experiments = YAML.load(experiments_yaml) end it "should normalize experiments" do expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: [{ "Control Opt"=>0.67 }, [{ "Alt One"=>0.1 }, { "Alt Two"=>0.23 }]] }, another_experiment: { alternatives: ["a", ["b"]] } }) end it "should recognize metrics" do expect(@config.metrics).not_to be_nil expect(@config.metrics.keys).to eq([:my_metric]) end end end context "as symbols" do context "with valid YAML" do before do experiments_yaml = <<-eos :my_experiment: :alternatives: - Control Opt - Alt One - Alt Two :resettable: false eos @config.experiments = YAML.load(experiments_yaml) end it "should normalize experiments" do expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: ["Control Opt", ["Alt One", "Alt Two"]] } }) end end context "with invalid YAML" do let(:yaml) { YAML.load(input) } context "with an empty string" do let(:input) { "" } it "should raise an error" do expect { @config.experiments = yaml }.to raise_error(Split::InvalidExperimentsFormatError) end end context "with just the YAML header" do let(:input) { "---" } it "should raise an error" do expect { @config.experiments = yaml }.to raise_error(Split::InvalidExperimentsFormatError) end end end end end it "should normalize experiments" do @config.experiments = { my_experiment: { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } } expect(@config.normalized_experiments).to eq({ my_experiment: { alternatives: [{ "control_opt"=>0.67 }, [{ "second_opt"=>0.1 }, { "third_opt"=>0.23 }]] } }) end expect(@config.normalized_experiments).to eq({:my_experiment=>{:alternatives=>[{"control_opt"=>0.67}, [{"second_opt"=>0.1}, {"third_opt"=>0.23}]]}}) end end context "persistence cookie length" do it "should default to 1 year" do expect(@config.persistence_cookie_length).to eq(31536000) end it "should allow the persistence cookie length to be configured" do @config.persistence_cookie_length = 2592000 expect(@config.persistence_cookie_length).to eq(2592000) end end context "persistence cookie domain" do it "should default to nil" do expect(@config.persistence_cookie_domain).to eq(nil) end it "should allow the persistence cookie domain to be configured" do @config.persistence_cookie_domain = ".acme.com" expect(@config.persistence_cookie_domain).to eq(".acme.com") end end end <MSG> Merge pull request #323 from davidgrieser/allow_env_in_config allow for custom redis_url different from ENV variable <DFF> @@ -210,4 +210,22 @@ describe Split::Configuration do expect(@config.normalized_experiments).to eq({:my_experiment=>{:alternatives=>[{"control_opt"=>0.67}, [{"second_opt"=>0.1}, {"third_opt"=>0.23}]]}}) end + + context "configuration URL" do + it "should default to local redis server" do + expect(@config.redis_url).to eq("localhost:6379") + end + + it "should allow for redis url to be configured" do + @config.redis_url = "custom_redis_url" + expect(@config.redis_url).to eq("custom_redis_url") + end + + context "provided REDIS_URL environment variable" do + it "should use the ENV variable" do + ENV['REDIS_URL'] = "env_redis_url" + expect(Split::Configuration.new.redis_url).to eq("env_redis_url") + end + end + end end
18
Merge pull request #323 from davidgrieser/allow_env_in_config
0
.rb
rb
mit
splitrb/split
10069747
<NME> configuration_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" describe Split::Configuration do before(:each) { @config = Split::Configuration.new } it "should provide a default value for ignore_ip_addresses" do expect(@config.ignore_ip_addresses).to eq([]) end it "should provide default values for db failover" do expect(@config.db_failover).to be_falsey expect(@config.db_failover_on_db_error).to be_a Proc end it "should not allow multiple experiments by default" do expect(@config.allow_multiple_experiments).to be_falsey end it "should be enabled by default" do expect(@config.enabled).to be_truthy end it "disabled is the opposite of enabled" do @config.enabled = false expect(@config.disabled?).to be_truthy end it "should not store the overridden test group per default" do expect(@config.store_override).to be_falsey end it "should provide a default pattern for robots" do %w[Baidu Gigabot Googlebot libwww-perl lwp-trivial msnbot SiteUptime Slurp WordPress ZIBB ZyBorg YandexBot AdsBot-Google Wget curl bitlybot facebookexternalhit spider].each do |robot| expect(@config.robot_regex).to match(robot) end expect(@config.robot_regex).to match("EventMachine HttpClient") expect(@config.robot_regex).to match("libwww-perl/5.836") expect(@config.robot_regex).to match("Pingdom.com_bot_version_1.4_(http://www.pingdom.com)") expect(@config.robot_regex).to match(" - ") end it "should accept real UAs with the robot regexp" do expect(@config.robot_regex).not_to match("Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.4) Gecko/20091017 SeaMonkey/2.0") expect(@config.robot_regex).not_to match("Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; F-6.0SP2-20041109; .NET CLR 2.0.50727; .NET CLR 3.0.04506.648; .NET CLR 3.5.21022; .NET CLR 1.1.4322; InfoPath.3)") end it "should allow adding a bot to the bot list" do @config.bots["newbot"] = "An amazing test bot" expect(@config.robot_regex).to match("newbot") end it "should use the session adapter for persistence by default" do expect(@config.persistence).to eq(Split::Persistence::SessionAdapter) end it "should load a metric" do @config.experiments = { my_experiment: { alternatives: ["control_opt", "other_opt"], metric: :my_metric } } expect(@config.metrics).not_to be_nil expect(@config.metrics.keys).to eq([:my_metric]) end it "should allow loading of experiment using experment_for" do @config.experiments = { my_experiment: { alternatives: ["control_opt", "other_opt"], metric: :my_metric } } expect(@config.experiment_for(:my_experiment)).to eq({ alternatives: ["control_opt", ["other_opt"]] }) end context "when experiments are defined via YAML" do context "as strings" do context "in a basic configuration" do before do experiments_yaml = <<-eos my_experiment: alternatives: - Control Opt - Alt One - Alt Two resettable: false eos @config.experiments = YAML.load(experiments_yaml) end it "should normalize experiments" do expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: ["Control Opt", ["Alt One", "Alt Two"]] } }) end end context "in a configuration with metadata" do before do experiments_yaml = <<-eos my_experiment: alternatives: - name: Control Opt percent: 67 - name: Alt One percent: 10 - name: Alt Two percent: 23 metadata: Control Opt: text: 'Control Option' Alt One: text: 'Alternative One' Alt Two: text: 'Alternative Two' resettable: false eos @config.experiments = YAML.load(experiments_yaml) end it "should have metadata on the experiment" do meta = @config.normalized_experiments[:my_experiment][:metadata] expect(meta).to_not be nil expect(meta["Control Opt"]["text"]).to eq("Control Option") end end context "in a complex configuration" do before do experiments_yaml = <<-eos my_experiment: alternatives: - name: Control Opt percent: 67 - name: Alt One percent: 10 - name: Alt Two percent: 23 resettable: false metric: my_metric another_experiment: alternatives: - a - b eos @config.experiments = YAML.load(experiments_yaml) end it "should normalize experiments" do expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: [{ "Control Opt"=>0.67 }, [{ "Alt One"=>0.1 }, { "Alt Two"=>0.23 }]] }, another_experiment: { alternatives: ["a", ["b"]] } }) end it "should recognize metrics" do expect(@config.metrics).not_to be_nil expect(@config.metrics.keys).to eq([:my_metric]) end end end context "as symbols" do context "with valid YAML" do before do experiments_yaml = <<-eos :my_experiment: :alternatives: - Control Opt - Alt One - Alt Two :resettable: false eos @config.experiments = YAML.load(experiments_yaml) end it "should normalize experiments" do expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: ["Control Opt", ["Alt One", "Alt Two"]] } }) end end context "with invalid YAML" do let(:yaml) { YAML.load(input) } context "with an empty string" do let(:input) { "" } it "should raise an error" do expect { @config.experiments = yaml }.to raise_error(Split::InvalidExperimentsFormatError) end end context "with just the YAML header" do let(:input) { "---" } it "should raise an error" do expect { @config.experiments = yaml }.to raise_error(Split::InvalidExperimentsFormatError) end end end end end it "should normalize experiments" do @config.experiments = { my_experiment: { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } } expect(@config.normalized_experiments).to eq({ my_experiment: { alternatives: [{ "control_opt"=>0.67 }, [{ "second_opt"=>0.1 }, { "third_opt"=>0.23 }]] } }) end expect(@config.normalized_experiments).to eq({:my_experiment=>{:alternatives=>[{"control_opt"=>0.67}, [{"second_opt"=>0.1}, {"third_opt"=>0.23}]]}}) end end context "persistence cookie length" do it "should default to 1 year" do expect(@config.persistence_cookie_length).to eq(31536000) end it "should allow the persistence cookie length to be configured" do @config.persistence_cookie_length = 2592000 expect(@config.persistence_cookie_length).to eq(2592000) end end context "persistence cookie domain" do it "should default to nil" do expect(@config.persistence_cookie_domain).to eq(nil) end it "should allow the persistence cookie domain to be configured" do @config.persistence_cookie_domain = ".acme.com" expect(@config.persistence_cookie_domain).to eq(".acme.com") end end end <MSG> Merge pull request #323 from davidgrieser/allow_env_in_config allow for custom redis_url different from ENV variable <DFF> @@ -210,4 +210,22 @@ describe Split::Configuration do expect(@config.normalized_experiments).to eq({:my_experiment=>{:alternatives=>[{"control_opt"=>0.67}, [{"second_opt"=>0.1}, {"third_opt"=>0.23}]]}}) end + + context "configuration URL" do + it "should default to local redis server" do + expect(@config.redis_url).to eq("localhost:6379") + end + + it "should allow for redis url to be configured" do + @config.redis_url = "custom_redis_url" + expect(@config.redis_url).to eq("custom_redis_url") + end + + context "provided REDIS_URL environment variable" do + it "should use the ENV variable" do + ENV['REDIS_URL'] = "env_redis_url" + expect(Split::Configuration.new.redis_url).to eq("env_redis_url") + end + end + end end
18
Merge pull request #323 from davidgrieser/allow_env_in_config
0
.rb
rb
mit
splitrb/split
10069748
<NME> configuration_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" describe Split::Configuration do before(:each) { @config = Split::Configuration.new } it "should provide a default value for ignore_ip_addresses" do expect(@config.ignore_ip_addresses).to eq([]) end it "should provide default values for db failover" do expect(@config.db_failover).to be_falsey expect(@config.db_failover_on_db_error).to be_a Proc end it "should not allow multiple experiments by default" do expect(@config.allow_multiple_experiments).to be_falsey end it "should be enabled by default" do expect(@config.enabled).to be_truthy end it "disabled is the opposite of enabled" do @config.enabled = false expect(@config.disabled?).to be_truthy end it "should not store the overridden test group per default" do expect(@config.store_override).to be_falsey end it "should provide a default pattern for robots" do %w[Baidu Gigabot Googlebot libwww-perl lwp-trivial msnbot SiteUptime Slurp WordPress ZIBB ZyBorg YandexBot AdsBot-Google Wget curl bitlybot facebookexternalhit spider].each do |robot| expect(@config.robot_regex).to match(robot) end expect(@config.robot_regex).to match("EventMachine HttpClient") expect(@config.robot_regex).to match("libwww-perl/5.836") expect(@config.robot_regex).to match("Pingdom.com_bot_version_1.4_(http://www.pingdom.com)") expect(@config.robot_regex).to match(" - ") end it "should accept real UAs with the robot regexp" do expect(@config.robot_regex).not_to match("Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.4) Gecko/20091017 SeaMonkey/2.0") expect(@config.robot_regex).not_to match("Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; F-6.0SP2-20041109; .NET CLR 2.0.50727; .NET CLR 3.0.04506.648; .NET CLR 3.5.21022; .NET CLR 1.1.4322; InfoPath.3)") end it "should allow adding a bot to the bot list" do @config.bots["newbot"] = "An amazing test bot" expect(@config.robot_regex).to match("newbot") end it "should use the session adapter for persistence by default" do expect(@config.persistence).to eq(Split::Persistence::SessionAdapter) end it "should load a metric" do @config.experiments = { my_experiment: { alternatives: ["control_opt", "other_opt"], metric: :my_metric } } expect(@config.metrics).not_to be_nil expect(@config.metrics.keys).to eq([:my_metric]) end it "should allow loading of experiment using experment_for" do @config.experiments = { my_experiment: { alternatives: ["control_opt", "other_opt"], metric: :my_metric } } expect(@config.experiment_for(:my_experiment)).to eq({ alternatives: ["control_opt", ["other_opt"]] }) end context "when experiments are defined via YAML" do context "as strings" do context "in a basic configuration" do before do experiments_yaml = <<-eos my_experiment: alternatives: - Control Opt - Alt One - Alt Two resettable: false eos @config.experiments = YAML.load(experiments_yaml) end it "should normalize experiments" do expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: ["Control Opt", ["Alt One", "Alt Two"]] } }) end end context "in a configuration with metadata" do before do experiments_yaml = <<-eos my_experiment: alternatives: - name: Control Opt percent: 67 - name: Alt One percent: 10 - name: Alt Two percent: 23 metadata: Control Opt: text: 'Control Option' Alt One: text: 'Alternative One' Alt Two: text: 'Alternative Two' resettable: false eos @config.experiments = YAML.load(experiments_yaml) end it "should have metadata on the experiment" do meta = @config.normalized_experiments[:my_experiment][:metadata] expect(meta).to_not be nil expect(meta["Control Opt"]["text"]).to eq("Control Option") end end context "in a complex configuration" do before do experiments_yaml = <<-eos my_experiment: alternatives: - name: Control Opt percent: 67 - name: Alt One percent: 10 - name: Alt Two percent: 23 resettable: false metric: my_metric another_experiment: alternatives: - a - b eos @config.experiments = YAML.load(experiments_yaml) end it "should normalize experiments" do expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: [{ "Control Opt"=>0.67 }, [{ "Alt One"=>0.1 }, { "Alt Two"=>0.23 }]] }, another_experiment: { alternatives: ["a", ["b"]] } }) end it "should recognize metrics" do expect(@config.metrics).not_to be_nil expect(@config.metrics.keys).to eq([:my_metric]) end end end context "as symbols" do context "with valid YAML" do before do experiments_yaml = <<-eos :my_experiment: :alternatives: - Control Opt - Alt One - Alt Two :resettable: false eos @config.experiments = YAML.load(experiments_yaml) end it "should normalize experiments" do expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: ["Control Opt", ["Alt One", "Alt Two"]] } }) end end context "with invalid YAML" do let(:yaml) { YAML.load(input) } context "with an empty string" do let(:input) { "" } it "should raise an error" do expect { @config.experiments = yaml }.to raise_error(Split::InvalidExperimentsFormatError) end end context "with just the YAML header" do let(:input) { "---" } it "should raise an error" do expect { @config.experiments = yaml }.to raise_error(Split::InvalidExperimentsFormatError) end end end end end it "should normalize experiments" do @config.experiments = { my_experiment: { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } } expect(@config.normalized_experiments).to eq({ my_experiment: { alternatives: [{ "control_opt"=>0.67 }, [{ "second_opt"=>0.1 }, { "third_opt"=>0.23 }]] } }) end expect(@config.normalized_experiments).to eq({:my_experiment=>{:alternatives=>[{"control_opt"=>0.67}, [{"second_opt"=>0.1}, {"third_opt"=>0.23}]]}}) end end context "persistence cookie length" do it "should default to 1 year" do expect(@config.persistence_cookie_length).to eq(31536000) end it "should allow the persistence cookie length to be configured" do @config.persistence_cookie_length = 2592000 expect(@config.persistence_cookie_length).to eq(2592000) end end context "persistence cookie domain" do it "should default to nil" do expect(@config.persistence_cookie_domain).to eq(nil) end it "should allow the persistence cookie domain to be configured" do @config.persistence_cookie_domain = ".acme.com" expect(@config.persistence_cookie_domain).to eq(".acme.com") end end end <MSG> Merge pull request #323 from davidgrieser/allow_env_in_config allow for custom redis_url different from ENV variable <DFF> @@ -210,4 +210,22 @@ describe Split::Configuration do expect(@config.normalized_experiments).to eq({:my_experiment=>{:alternatives=>[{"control_opt"=>0.67}, [{"second_opt"=>0.1}, {"third_opt"=>0.23}]]}}) end + + context "configuration URL" do + it "should default to local redis server" do + expect(@config.redis_url).to eq("localhost:6379") + end + + it "should allow for redis url to be configured" do + @config.redis_url = "custom_redis_url" + expect(@config.redis_url).to eq("custom_redis_url") + end + + context "provided REDIS_URL environment variable" do + it "should use the ENV variable" do + ENV['REDIS_URL'] = "env_redis_url" + expect(Split::Configuration.new.redis_url).to eq("env_redis_url") + end + end + end end
18
Merge pull request #323 from davidgrieser/allow_env_in_config
0
.rb
rb
mit
splitrb/split
10069749
<NME> alternative_spec.rb <BEF> ADDFILE <MSG> Added alternative tests <DFF> @@ -0,0 +1,95 @@ +require 'spec_helper' +require 'split/alternative' + +describe Split::Alternative do + before(:each) { Split.redis.flushall } + + it "should have a name" do + experiment = Split::Experiment.new('basket_text', 'Basket', "Cart") + alternative = Split::Alternative.new('Basket', 'basket_text') + alternative.name.should eql('Basket') + end + + it "should have a default participation count of 0" do + alternative = Split::Alternative.new('Basket', 'basket_text') + alternative.participant_count.should eql(0) + end + + it "should have a default completed count of 0" do + alternative = Split::Alternative.new('Basket', 'basket_text') + alternative.completed_count.should eql(0) + end + + it "should belong to an experiment" do + experiment = Split::Experiment.new('basket_text', 'Basket', "Cart") + experiment.save + alternative = Split::Alternative.find('Basket', 'basket_text') + alternative.experiment.name.should eql(experiment.name) + end + + it "should save to redis" do + alternative = Split::Alternative.new('Basket', 'basket_text') + alternative.save + Split.redis.exists('basket_text:Basket').should be true + end + + it "should increment participation count" do + experiment = Split::Experiment.new('basket_text', 'Basket', "Cart") + experiment.save + alternative = Split::Alternative.find('Basket', 'basket_text') + old_participant_count = alternative.participant_count + alternative.increment_participation + alternative.participant_count.should eql(old_participant_count+1) + end + + it "should increment completed count" do + experiment = Split::Experiment.new('basket_text', 'Basket', "Cart") + experiment.save + alternative = Split::Alternative.find('Basket', 'basket_text') + old_completed_count = alternative.participant_count + alternative.increment_completion + alternative.completed_count.should eql(old_completed_count+1) + end + + it "can be reset" do + alternative = Split::Alternative.new('Basket', 'basket_text', {'participant_count' => 10, 'completed_count' =>4}) + alternative.save + alternative.reset + alternative.participant_count.should eql(0) + alternative.completed_count.should eql(0) + end + + describe 'conversion rate' do + it "should be 0 if there are no conversions" do + alternative = Split::Alternative.new('Basket', 'basket_text') + alternative.completed_count.should eql(0) + alternative.conversion_rate.should eql(0) + end + + it "does something" do + alternative = Split::Alternative.new('Basket', 'basket_text', {'participant_count' => 10, 'completed_count' =>4}) + alternative.conversion_rate.should eql(0.4) + end + end + + it "should return an existing alternative" do + alternative = Split::Alternative.create('Basket', 'basket_text') + Split::Alternative.find('Basket', 'basket_text').name.should eql('Basket') + end + + describe 'z score' do + it 'should be zero when the control has no conversions' do + experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red') + + alternative = Split::Alternative.find('red', 'link_color') + alternative.z_score.should eql(0) + end + + it "should be N/A for the control" do + experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red') + + control = experiment.control + control.z_score.should eql('N/A') + end + end +end \ No newline at end of file
95
Added alternative tests
0
.rb
rb
mit
splitrb/split