INSTRUCTION
stringlengths 202
35.5k
| RESPONSE
stringlengths 75
161k
|
---|---|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Discussions
class CaptureDiffNotePositionService
def initialize(merge_request, paths)
@project = merge_request.project
@tracer = build_tracer(merge_request, paths)
end
def execute(discussion)
# The service has been implemented for text only
# We don't need to capture positions for images
return unless discussion.on_text?
result = tracer&.trace(discussion.position)
return unless result
position = result[:position]
return unless position
line_code = position.line_code(project.repository)
return unless line_code
# Currently position data is copied across all notes of a discussion
# It makes sense to store a position only for the first note instead
# Within the newly introduced table we can start doing just that
DiffNotePosition.create_or_update_for(discussion.notes.first,
diff_type: :head,
position: position,
line_code: line_code)
end
private
attr_reader :tracer, :project
def build_tracer(merge_request, paths)
return if paths.blank?
old_diff_refs, new_diff_refs = build_diff_refs(merge_request)
return unless old_diff_refs && new_diff_refs
Gitlab::Diff::PositionTracer.new(
project: project,
old_diff_refs: old_diff_refs,
new_diff_refs: new_diff_refs,
paths: paths.uniq)
end
def build_diff_refs(merge_request)
merge_ref_head = merge_request.merge_ref_head
return unless merge_ref_head
start_sha, _ = merge_ref_head.parent_ids
new_diff_refs = Gitlab::Diff::DiffRefs.new(
base_sha: start_sha,
start_sha: start_sha,
head_sha: merge_ref_head.id)
old_diff_refs = merge_request.diff_refs
return if new_diff_refs == old_diff_refs
[old_diff_refs, new_diff_refs]
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Discussions::CaptureDiffNotePositionService, feature_category: :code_review_workflow do
subject { described_class.new(note.noteable, paths) }
context 'image note on diff' do
let!(:note) { create(:image_diff_note_on_merge_request) }
let(:paths) { ['files/images/any_image.png'] }
it 'is note affected by the service' do
expect(Gitlab::Diff::PositionTracer).not_to receive(:new)
expect(subject.execute(note.discussion)).to eq(nil)
expect(note.diff_note_positions).to be_empty
end
end
context 'when empty paths are passed as a param' do
let!(:note) { create(:diff_note_on_merge_request) }
let(:paths) { [] }
it 'does not calculate positons' do
expect(Gitlab::Diff::PositionTracer).not_to receive(:new)
expect(subject.execute(note.discussion)).to eq(nil)
expect(note.diff_note_positions).to be_empty
end
end
context 'when position tracer returned position' do
let!(:note) { create(:diff_note_on_merge_request) }
let(:paths) { ['files/any_file.txt'] }
before do
expect(note.noteable).to receive(:merge_ref_head).and_return(double.as_null_object)
expect_next_instance_of(Gitlab::Diff::PositionTracer) do |tracer|
expect(tracer).to receive(:trace).and_return({ position: position })
end
end
context 'which is nil' do
let(:position) { nil }
it 'does not create diff note position' do
expect(subject.execute(note.discussion)).to eq(nil)
expect(note.diff_note_positions).to be_empty
end
end
context 'which does not have a corresponding line' do
let(:position) { double(line_code: nil) }
it 'does not create diff note position' do
expect(subject.execute(note.discussion)).to eq(nil)
expect(note.diff_note_positions).to be_empty
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Achievements
class AwardService
attr_reader :current_user, :achievement_id, :recipient_id
def initialize(current_user, achievement_id, recipient_id)
@current_user = current_user
@achievement_id = achievement_id
@recipient_id = recipient_id
end
def execute
achievement = Achievements::Achievement.find(achievement_id)
return error_no_permissions unless allowed?(achievement)
recipient = User.find(recipient_id)
user_achievement = Achievements::UserAchievement.create(
achievement: achievement,
user: recipient,
awarded_by_user: current_user)
return error_awarding(user_achievement) unless user_achievement.persisted?
NotificationService.new.new_achievement_email(recipient, achievement).deliver_later
ServiceResponse.success(payload: user_achievement)
rescue ActiveRecord::RecordNotFound => e
error(e.message)
end
private
def allowed?(achievement)
current_user&.can?(:award_achievement, achievement)
end
def error_no_permissions
error('You have insufficient permissions to award this achievement')
end
def error_awarding(user_achievement)
error(user_achievement&.errors&.full_messages || 'Failed to award achievement')
end
def error(message)
ServiceResponse.error(message: Array(message))
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Achievements::AwardService, feature_category: :user_profile do
describe '#execute' do
let_it_be(:developer) { create(:user) }
let_it_be(:maintainer) { create(:user) }
let_it_be(:group) { create(:group) }
let_it_be(:achievement) { create(:achievement, namespace: group) }
let_it_be(:recipient) { create(:user) }
let(:achievement_id) { achievement.id }
let(:recipient_id) { recipient.id }
subject(:response) { described_class.new(current_user, achievement_id, recipient_id).execute }
before_all do
group.add_developer(developer)
group.add_maintainer(maintainer)
end
context 'when user does not have permission' do
let(:current_user) { developer }
it 'returns an error' do
expect(response).to be_error
expect(response.message).to match_array(
['You have insufficient permissions to award this achievement'])
end
end
context 'when user has permission' do
let(:current_user) { maintainer }
let(:notification_service) { instance_double(NotificationService) }
let(:mail_message) { instance_double(ActionMailer::MessageDelivery) }
it 'creates an achievement and sends an e-mail' do
allow(NotificationService).to receive(:new).and_return(notification_service)
expect(notification_service).to receive(:new_achievement_email).with(recipient, achievement)
.and_return(mail_message)
expect(mail_message).to receive(:deliver_later)
expect(response).to be_success
end
context 'when the achievement is not persisted' do
let(:user_achievement) { instance_double('Achievements::UserAchievement') }
it 'returns the correct error' do
allow(user_achievement).to receive(:persisted?).and_return(false)
allow(user_achievement).to receive(:errors).and_return(nil)
allow(Achievements::UserAchievement).to receive(:create).and_return(user_achievement)
expect(response).to be_error
expect(response.message).to match_array(["Failed to award achievement"])
end
end
context 'when the achievement does not exist' do
let(:achievement_id) { non_existing_record_id }
it 'returns the correct error' do
expect(response).to be_error
expect(response.message)
.to contain_exactly("Couldn't find Achievements::Achievement with 'id'=#{non_existing_record_id}")
end
end
context 'when the recipient does not exist' do
let(:recipient_id) { non_existing_record_id }
it 'returns the correct error' do
expect(response).to be_error
expect(response.message).to contain_exactly("Couldn't find User with 'id'=#{non_existing_record_id}")
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Achievements
class UpdateUserAchievementPrioritiesService
attr_reader :current_user, :user_achievements
def initialize(current_user, user_achievements)
@current_user = current_user
@user_achievements = user_achievements
end
def execute
return error_no_permissions unless allowed?
prioritized_user_achievements_map = Hash[user_achievements.map.with_index { |ua, idx| [ua.id, idx] }]
user_achievements_priorities_mapping = current_user.user_achievements.each_with_object({}) do |ua, result|
next if ua.priority.nil? && !prioritized_user_achievements_map.key?(ua.id)
result[ua] = { priority: prioritized_user_achievements_map.fetch(ua.id, nil) }
end
return ServiceResponse.success(payload: []) if user_achievements_priorities_mapping.empty?
::Gitlab::Database::BulkUpdate.execute(%i[priority], user_achievements_priorities_mapping)
ServiceResponse.success(payload: user_achievements_priorities_mapping.keys.map(&:reload))
end
private
def allowed?
user_achievements.all? { |user_achievement| current_user&.can?(:update_owned_user_achievement, user_achievement) }
end
def error(message)
ServiceResponse.error(payload: user_achievements, message: Array(message))
end
def error_no_permissions
error("You can't update at least one of the given user achievements.")
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Achievements::UpdateUserAchievementPrioritiesService, feature_category: :user_profile do
describe '#execute' do
let_it_be(:achievement_owner) { create(:user) }
let_it_be(:group) { create(:group) }
let_it_be(:achievement) { create(:achievement, namespace: group) }
let!(:user_achievement1) do
create(:user_achievement, achievement: achievement, user: achievement_owner, priority: 0)
end
let_it_be(:user_achievement2) { create(:user_achievement, achievement: achievement, user: achievement_owner) }
let_it_be(:user_achievement3) { create(:user_achievement, achievement: achievement, user: achievement_owner) }
subject(:response) { described_class.new(current_user, user_achievements).execute }
context 'when user does not have permission' do
let(:current_user) { create(:user) }
let(:user_achievements) { [user_achievement1] }
it 'returns an error', :aggregate_failures do
expect(response).to be_error
expect(response.message).to match_array(["You can't update at least one of the given user achievements."])
end
end
context 'when user has permission' do
let_it_be_with_reload(:current_user) { achievement_owner }
context 'with empty input' do
let(:user_achievements) { [] }
it 'removes all priorities', :aggregate_failures do
expect(response).to be_success
[user_achievement1, user_achievement2, user_achievement3].each do |ua|
expect(ua.reload.priority).to be_nil
end
end
end
context 'with prioritised achievements' do
let(:user_achievements) { [user_achievement3, user_achievement1] }
it 're-orders the achievements correctly', :aggregate_failures do
expect(response).to be_success
expect(user_achievement1.reload.priority).to eq(1)
expect(user_achievement2.reload.priority).to be_nil
expect(user_achievement3.reload.priority).to be_zero
end
end
context 'when no achievement is prioritized and no prioritizations are made' do
let!(:user_achievement1) { create(:user_achievement, achievement: achievement, user: achievement_owner) }
let(:user_achievements) { [] }
it 'works without errors', :aggregate_failures do
expect(response).to be_success
[user_achievement1, user_achievement2, user_achievement3].each do |ua|
expect(ua.reload.priority).to be_nil
end
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Achievements
class DestroyUserAchievementService
attr_reader :current_user, :user_achievement
def initialize(current_user, user_achievement)
@current_user = current_user
@user_achievement = user_achievement
end
def execute
return error_no_permissions unless allowed?
user_achievement.delete
ServiceResponse.success(payload: user_achievement)
end
private
def allowed?
current_user&.can?(:destroy_user_achievement, user_achievement)
end
def error_no_permissions
error('You have insufficient permissions to delete this user achievement')
end
def error(message)
ServiceResponse.error(message: Array(message))
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Achievements::DestroyUserAchievementService, feature_category: :user_profile do
describe '#execute' do
let_it_be(:maintainer) { create(:user) }
let_it_be(:owner) { create(:user) }
let_it_be(:group) { create(:group) }
let_it_be(:achievement) { create(:achievement, namespace: group) }
let_it_be(:user_achievement) { create(:user_achievement, achievement: achievement) }
subject(:response) { described_class.new(current_user, user_achievement).execute }
before_all do
group.add_maintainer(maintainer)
group.add_owner(owner)
end
context 'when user does not have permission' do
let(:current_user) { maintainer }
it 'returns an error' do
expect(response).to be_error
expect(response.message).to match_array(
['You have insufficient permissions to delete this user achievement'])
end
end
context 'when user has permission' do
let(:current_user) { owner }
it 'deletes the achievement' do
expect(response).to be_success
expect(Achievements::UserAchievement.find_by(id: user_achievement.id)).to be_nil
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Achievements
class RevokeService
attr_reader :current_user, :user_achievement
def initialize(current_user, user_achievement)
@current_user = current_user
@user_achievement = user_achievement
end
def execute
return error_no_permissions unless allowed?(user_achievement.achievement)
return error_already_revoked if user_achievement.revoked?
user_achievement.assign_attributes({
revoked_by_user_id: current_user.id,
revoked_at: Time.zone.now
})
return error_awarding unless user_achievement.save
ServiceResponse.success(payload: user_achievement)
end
private
def allowed?(achievement)
current_user&.can?(:award_achievement, achievement)
end
def error_no_permissions
error('You have insufficient permissions to revoke this achievement')
end
def error_already_revoked
error('This achievement has already been revoked')
end
def error_awarding
error(user_achievement&.errors&.full_messages || 'Failed to revoke achievement')
end
def error(message)
ServiceResponse.error(message: Array(message))
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Achievements::RevokeService, feature_category: :user_profile do
describe '#execute' do
let_it_be(:developer) { create(:user) }
let_it_be(:maintainer) { create(:user) }
let_it_be(:group) { create(:group) }
let_it_be(:achievement) { create(:achievement, namespace: group) }
let_it_be(:user_achievement) { create(:user_achievement, achievement: achievement) }
let(:user_achievement_param) { user_achievement }
subject(:response) { described_class.new(current_user, user_achievement_param).execute }
before_all do
group.add_developer(developer)
group.add_maintainer(maintainer)
end
context 'when user does not have permission' do
let(:current_user) { developer }
it 'returns an error' do
expect(response).to be_error
expect(response.message).to match_array(
['You have insufficient permissions to revoke this achievement'])
end
end
context 'when user has permission' do
let(:current_user) { maintainer }
it 'revokes an achievement' do
expect(response).to be_success
end
context 'when the achievement has already been revoked' do
let_it_be(:revoked_achievement) { create(:user_achievement, :revoked, achievement: achievement) }
let(:user_achievement_param) { revoked_achievement }
it 'returns the correct error' do
expect(response).to be_error
expect(response.message)
.to contain_exactly('This achievement has already been revoked')
end
end
context 'when the user achievement fails to save' do
let(:user_achievement_param) { instance_double('Achievements::UserAchievement') }
it 'returns the correct error' do
allow(user_achievement_param).to receive(:save).and_return(false)
allow(user_achievement_param).to receive(:achievement).and_return(achievement)
allow(user_achievement_param).to receive(:revoked?).and_return(false)
allow(user_achievement_param).to receive(:errors).and_return(nil)
expect(user_achievement_param).to receive(:assign_attributes)
expect(response).to be_error
expect(response.message).to match_array(["Failed to revoke achievement"])
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Achievements
class DestroyService
attr_reader :current_user, :achievement
def initialize(current_user, achievement)
@current_user = current_user
@achievement = achievement
end
def execute
return error_no_permissions unless allowed?
achievement.delete
ServiceResponse.success(payload: achievement)
end
private
def allowed?
current_user&.can?(:admin_achievement, achievement)
end
def error_no_permissions
error('You have insufficient permissions to delete this achievement')
end
def error(message)
ServiceResponse.error(message: Array(message))
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Achievements::DestroyService, feature_category: :user_profile do
describe '#execute' do
let_it_be(:developer) { create(:user) }
let_it_be(:maintainer) { create(:user) }
let_it_be(:group) { create(:group) }
let(:achievement) { create(:achievement, namespace: group) }
subject(:response) { described_class.new(current_user, achievement).execute }
before_all do
group.add_developer(developer)
group.add_maintainer(maintainer)
end
context 'when user does not have permission' do
let(:current_user) { developer }
it 'returns an error' do
expect(response).to be_error
expect(response.message).to match_array(
['You have insufficient permissions to delete this achievement'])
end
end
context 'when user has permission' do
let(:current_user) { maintainer }
it 'deletes the achievement' do
expect(response).to be_success
expect(Achievements::Achievement.find_by(id: achievement.id)).to be_nil
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Achievements
class UpdateService
attr_reader :current_user, :achievement, :params
def initialize(current_user, achievement, params)
@current_user = current_user
@achievement = achievement
@params = params
end
def execute
return error_no_permissions unless allowed?
if achievement.update(params)
ServiceResponse.success(payload: achievement)
else
error_updating
end
end
private
def allowed?
current_user&.can?(:admin_achievement, achievement)
end
def error_no_permissions
error('You have insufficient permission to update this achievement')
end
def error(message)
ServiceResponse.error(payload: achievement, message: Array(message))
end
def error_updating
error(achievement&.errors&.full_messages || 'Failed to update achievement')
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Achievements::UpdateService, feature_category: :user_profile do
describe '#execute' do
let_it_be(:user) { create(:user) }
let(:params) { attributes_for(:achievement, namespace: group) }
subject(:response) { described_class.new(user, group, params).execute }
context 'when user does not have permission' do
let_it_be(:group) { create(:group) }
let_it_be(:achievement) { create(:achievement, namespace: group) }
before_all do
group.add_developer(user)
end
it 'returns an error' do
expect(response).to be_error
expect(response.message).to match_array(
['You have insufficient permission to update this achievement'])
end
end
context 'when user has permission' do
let_it_be(:group) { create(:group) }
let_it_be(:achievement) { create(:achievement, namespace: group) }
before_all do
group.add_maintainer(user)
end
it 'updates an achievement' do
expect(response).to be_success
end
it 'returns an error when the achievement cannot be updated' do
params[:name] = nil
expect(response).to be_error
expect(response.message).to include("Name can't be blank")
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Achievements
class CreateService < BaseService
def execute
return error_no_permissions unless allowed?
achievement = Achievements::Achievement.create(params.merge(namespace_id: @namespace.id))
return error_creating(achievement) unless achievement.persisted?
ServiceResponse.success(payload: achievement)
end
private
def error_no_permissions
error('You have insufficient permissions to create achievements for this namespace')
end
def error_creating(achievement)
error(achievement&.errors&.full_messages || 'Failed to create achievement')
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Achievements::CreateService, feature_category: :user_profile do
describe '#execute' do
let_it_be(:user) { create(:user) }
let(:params) { attributes_for(:achievement, namespace: group) }
subject(:response) { described_class.new(namespace: group, current_user: user, params: params).execute }
context 'when user does not have permission' do
let_it_be(:group) { create(:group) }
before_all do
group.add_developer(user)
end
it 'returns an error' do
expect(response).to be_error
expect(response.message).to match_array(
['You have insufficient permissions to create achievements for this namespace'])
end
end
context 'when user has permission' do
let_it_be(:group) { create(:group) }
before_all do
group.add_maintainer(user)
end
it 'creates an achievement' do
expect(response).to be_success
end
it 'returns an error when the achievement is not persisted' do
params[:name] = nil
expect(response).to be_error
expect(response.message).to match_array(["Name can't be blank"])
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Security
class MergeReportsService
attr_reader :source_reports
def initialize(*source_reports)
@source_reports = source_reports
end
def execute
copy_resources_to_target_report
copy_findings_to_target
target_report
end
private
def target_report
@target_report ||= ::Gitlab::Ci::Reports::Security::Report.new(
source_reports.first.type,
source_reports.first.pipeline,
source_reports.first.created_at
).tap do |report|
report.errors = source_reports.flat_map(&:errors)
report.warnings = source_reports.flat_map(&:warnings)
end
end
def copy_resources_to_target_report
sorted_source_reports.each do |source_report|
copy_scanners_to_target(source_report)
copy_identifiers_to_target(source_report)
copy_scanned_resources_to_target(source_report)
end
end
def sorted_source_reports
source_reports.sort { |a, b| a.primary_scanner_order_to(b) }
end
def copy_scanners_to_target(source_report)
# no need for de-duping: it's done by Report internally
source_report.scanners.values.each { |scanner| target_report.add_scanner(scanner) }
end
def copy_identifiers_to_target(source_report)
# no need for de-duping: it's done by Report internally
source_report.identifiers.values.each { |identifier| target_report.add_identifier(identifier) }
end
def copy_scanned_resources_to_target(source_report)
target_report.scanned_resources.concat(source_report.scanned_resources).uniq!
end
def copy_findings_to_target
deduplicated_findings.sort.each { |finding| target_report.add_finding(finding) }
end
def deduplicated_findings
prioritized_findings.each_with_object([[], Set.new]) do |finding, (deduplicated, seen_identifiers)|
next if seen_identifiers.intersect?(finding.keys.to_set)
seen_identifiers.merge(finding.keys)
deduplicated << finding
end.first
end
def prioritized_findings
source_reports.flat_map(&:findings).sort { |a, b| a.scanner_order_to(b) }
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
# rubocop: disable RSpec/MultipleMemoizedHelpers
RSpec.describe Security::MergeReportsService, '#execute', feature_category: :code_review_workflow do
let(:scanner_1) { build(:ci_reports_security_scanner, external_id: 'scanner-1', name: 'Scanner 1') }
let(:scanner_2) { build(:ci_reports_security_scanner, external_id: 'scanner-2', name: 'Scanner 2') }
let(:scanner_3) { build(:ci_reports_security_scanner, external_id: 'scanner-3', name: 'Scanner 3') }
let(:identifier_1_primary) { build(:ci_reports_security_identifier, external_id: 'VULN-1', external_type: 'scanner-1') }
let(:identifier_1_cve) { build(:ci_reports_security_identifier, external_id: 'CVE-2019-123', external_type: 'cve') }
let(:identifier_2_primary) { build(:ci_reports_security_identifier, external_id: 'VULN-2', external_type: 'scanner-2') }
let(:identifier_2_cve) { build(:ci_reports_security_identifier, external_id: 'CVE-2019-456', external_type: 'cve') }
let(:identifier_cwe) { build(:ci_reports_security_identifier, external_id: '789', external_type: 'cwe') }
let(:identifier_wasc) { build(:ci_reports_security_identifier, external_id: '13', external_type: 'wasc') }
let(:finding_id_1) do
build(
:ci_reports_security_finding,
identifiers: [identifier_1_primary, identifier_1_cve],
scanner: scanner_1,
severity: :low
)
end
let(:finding_id_1_extra) do
build(
:ci_reports_security_finding,
identifiers: [identifier_1_primary, identifier_1_cve],
scanner: scanner_1,
severity: :low
)
end
let(:finding_id_2_loc_1) do
build(
:ci_reports_security_finding,
identifiers: [identifier_2_primary, identifier_2_cve],
location: build(:ci_reports_security_locations_sast, start_line: 32, end_line: 34),
scanner: scanner_2,
severity: :medium
)
end
let(:finding_id_2_loc_1_extra) do
build(
:ci_reports_security_finding,
identifiers: [identifier_2_primary, identifier_2_cve],
location: build(:ci_reports_security_locations_sast, start_line: 32, end_line: 34),
scanner: scanner_2,
severity: :medium
)
end
let(:finding_id_2_loc_2) do
build(
:ci_reports_security_finding,
identifiers: [identifier_2_primary, identifier_2_cve],
location: build(:ci_reports_security_locations_sast, start_line: 42, end_line: 44),
scanner: scanner_2,
severity: :medium
)
end
let(:finding_cwe_1) do
build(
:ci_reports_security_finding,
identifiers: [identifier_cwe],
scanner: scanner_3,
severity: :high
)
end
let(:finding_cwe_2) do
build(
:ci_reports_security_finding,
identifiers: [identifier_cwe],
scanner: scanner_1,
severity: :critical
)
end
let(:finding_wasc_1) do
build(
:ci_reports_security_finding,
identifiers: [identifier_wasc],
scanner: scanner_1,
severity: :medium
)
end
let(:finding_wasc_2) do
build(
:ci_reports_security_finding,
identifiers: [identifier_wasc],
scanner: scanner_2,
severity: :critical
)
end
let(:report_1_findings) { [finding_id_1, finding_id_2_loc_1, finding_id_2_loc_1_extra, finding_cwe_2, finding_wasc_1] }
let(:scanned_resource) do
::Gitlab::Ci::Reports::Security::ScannedResource.new(URI.parse('example.com'), 'GET')
end
let(:scanned_resource_1) do
::Gitlab::Ci::Reports::Security::ScannedResource.new(URI.parse('example.com'), 'POST')
end
let(:scanned_resource_2) do
::Gitlab::Ci::Reports::Security::ScannedResource.new(URI.parse('example.com/2'), 'GET')
end
let(:scanned_resource_3) do
::Gitlab::Ci::Reports::Security::ScannedResource.new(URI.parse('example.com/3'), 'GET')
end
let(:report_1) do
build(
:ci_reports_security_report,
scanners: [scanner_1, scanner_2],
findings: report_1_findings,
identifiers: report_1_findings.flat_map(&:identifiers),
scanned_resources: [scanned_resource, scanned_resource_1, scanned_resource_2]
)
end
let(:report_2_findings) { [finding_id_2_loc_2, finding_wasc_2] }
let(:report_2) do
build(
:ci_reports_security_report,
scanners: [scanner_2],
findings: report_2_findings,
identifiers: finding_id_2_loc_2.identifiers,
scanned_resources: [scanned_resource, scanned_resource_1, scanned_resource_3]
)
end
let(:report_3_findings) { [finding_id_1_extra, finding_cwe_1] }
let(:report_3) do
build(
:ci_reports_security_report,
scanners: [scanner_1, scanner_3],
findings: report_3_findings,
identifiers: report_3_findings.flat_map(&:identifiers)
)
end
let(:merge_service) { described_class.new(report_1, report_2, report_3) }
subject(:merged_report) { merge_service.execute }
describe 'errors on target report' do
subject { merged_report.errors }
before do
report_1.add_error('foo', 'bar')
report_2.add_error('zoo', 'baz')
end
it { is_expected.to match_array([{ type: 'foo', message: 'bar' }, { type: 'zoo', message: 'baz' }]) }
end
describe 'warnings on target report' do
subject { merged_report.warnings }
before do
report_1.add_warning('foo', 'bar')
report_2.add_warning('zoo', 'baz')
end
it { is_expected.to match_array([{ type: 'foo', message: 'bar' }, { type: 'zoo', message: 'baz' }]) }
end
it 'copies scanners into target report and eliminates duplicates' do
expect(merged_report.scanners.values).to contain_exactly(scanner_1, scanner_2, scanner_3)
end
it 'copies identifiers into target report and eliminates duplicates' do
expect(merged_report.identifiers.values).to(
contain_exactly(
identifier_1_primary,
identifier_1_cve,
identifier_2_primary,
identifier_2_cve,
identifier_cwe,
identifier_wasc
)
)
end
it 'deduplicates (except cwe and wasc) and sorts the vulnerabilities by severity (desc) then by compare key' do
expect(merged_report.findings).to(
eq([
finding_cwe_2,
finding_wasc_2,
finding_cwe_1,
finding_id_2_loc_2,
finding_id_2_loc_1,
finding_wasc_1,
finding_id_1
])
)
end
it 'deduplicates scanned resources' do
expect(merged_report.scanned_resources).to(
eq([
scanned_resource,
scanned_resource_1,
scanned_resource_2,
scanned_resource_3
])
)
end
context 'ordering reports for sast analyzers' do
let(:bandit_scanner) { build(:ci_reports_security_scanner, external_id: 'bandit', name: 'Bandit') }
let(:semgrep_scanner) { build(:ci_reports_security_scanner, external_id: 'semgrep', name: 'Semgrep') }
let(:identifier_bandit) { build(:ci_reports_security_identifier, external_id: 'B403', external_type: 'bandit_test_id') }
let(:identifier_cve) { build(:ci_reports_security_identifier, external_id: 'CVE-2019-123', external_type: 'cve') }
let(:identifier_semgrep) { build(:ci_reports_security_identifier, external_id: 'rules.bandit.B105', external_type: 'semgrep_id') }
let(:finding_id_1) { build(:ci_reports_security_finding, identifiers: [identifier_bandit, identifier_cve], scanner: bandit_scanner, report_type: :sast) }
let(:finding_id_2) { build(:ci_reports_security_finding, identifiers: [identifier_cve], scanner: semgrep_scanner, report_type: :sast) }
let(:finding_id_3) { build(:ci_reports_security_finding, identifiers: [identifier_semgrep], scanner: semgrep_scanner, report_type: :sast) }
let(:bandit_report) do
build(:ci_reports_security_report,
type: :sast,
scanners: [bandit_scanner],
findings: [finding_id_1],
identifiers: finding_id_1.identifiers
)
end
let(:semgrep_report) do
build(
:ci_reports_security_report,
type: :sast,
scanners: [semgrep_scanner],
findings: [finding_id_2, finding_id_3],
identifiers: finding_id_2.identifiers + finding_id_3.identifiers
)
end
let(:custom_analyzer_report) do
build(
:ci_reports_security_report,
type: :sast,
scanners: [scanner_2],
findings: [finding_id_2_loc_1],
identifiers: finding_id_2_loc_1.identifiers
)
end
context 'when reports are gathered in an unprioritized order' do
subject(:sast_merged_report) { described_class.new(semgrep_report, bandit_report).execute }
specify { expect(sast_merged_report.scanners.values).to eql([bandit_scanner, semgrep_scanner]) }
specify { expect(sast_merged_report.findings.count).to eq(2) }
specify { expect(sast_merged_report.findings.first.identifiers).to eql([identifier_bandit, identifier_cve]) }
specify { expect(sast_merged_report.findings.last.identifiers).to contain_exactly(identifier_semgrep) }
end
context 'when a custom analyzer is completed before the known analyzers' do
subject(:sast_merged_report) { described_class.new(custom_analyzer_report, semgrep_report, bandit_report).execute }
specify { expect(sast_merged_report.scanners.values).to eql([bandit_scanner, semgrep_scanner, scanner_2]) }
specify { expect(sast_merged_report.findings.count).to eq(3) }
specify { expect(sast_merged_report.findings.last.identifiers).to match_array(finding_id_2_loc_1.identifiers) }
end
end
end
# rubocop: enable RSpec/MultipleMemoizedHelpers
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Security
module CiConfiguration
class DependencyScanningCreateService < ::Security::CiConfiguration::BaseCreateService
private
def action
Security::CiConfiguration::DependencyScanningBuildAction.new(project.auto_devops_enabled?, existing_gitlab_ci_content,
project.ci_config_path).generate
end
def next_branch
'set-dependency-scanning-config'
end
def message
_('Configure Dependency Scanning in `.gitlab-ci.yml`, creating this file if it does not already exist')
end
def description
_('Configure Dependency Scanning in `.gitlab-ci.yml` using the GitLab managed template. You can [add variable overrides](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/#customizing-the-dependency-scanning-settings) to customize Dependency Scanning settings.')
end
def name
'Dependency Scanning'
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Security::CiConfiguration::DependencyScanningCreateService, :snowplow,
feature_category: :software_composition_analysis do
subject(:result) { described_class.new(project, user).execute }
let(:branch_name) { 'set-dependency-scanning-config-1' }
let(:snowplow_event) do
{
category: 'Security::CiConfiguration::DependencyScanningCreateService',
action: 'create',
label: ''
}
end
include_examples 'services security ci configuration create service', true
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Security
module CiConfiguration
class SastCreateService < ::Security::CiConfiguration::BaseCreateService
attr_reader :params
def initialize(project, current_user, params, commit_on_default: false)
super(project, current_user)
@params = params
@commit_on_default = commit_on_default
@branch_name = project.default_branch if @commit_on_default
end
private
def remove_branch_on_exception
super unless @commit_on_default
end
def action
Security::CiConfiguration::SastBuildAction.new(project.auto_devops_enabled?, params, existing_gitlab_ci_content, project.ci_config_path).generate
end
def next_branch
'set-sast-config'
end
def message
_('Configure SAST in `.gitlab-ci.yml`, creating this file if it does not already exist')
end
def description
_('Configure SAST in `.gitlab-ci.yml` using the GitLab managed template. You can [add variable overrides](https://docs.gitlab.com/ee/user/application_security/sast/#customizing-the-sast-settings) to customize SAST settings.')
end
def name
'SAST'
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Security::CiConfiguration::SastCreateService, :snowplow,
feature_category: :static_application_security_testing do
subject(:result) { described_class.new(project, user, params).execute }
let(:branch_name) { 'set-sast-config-1' }
let(:non_empty_params) do
{ 'stage' => 'security',
'SEARCH_MAX_DEPTH' => 1,
'SECURE_ANALYZERS_PREFIX' => 'new_registry',
'SAST_EXCLUDED_PATHS' => 'spec,docs' }
end
let(:snowplow_event) do
{
category: 'Security::CiConfiguration::SastCreateService',
action: 'create',
label: 'false'
}
end
include_examples 'services security ci configuration create service'
RSpec.shared_examples_for 'commits directly to the default branch' do
it 'commits directly to the default branch' do
expect(project).to receive(:default_branch).twice.and_return('master')
expect(result.status).to eq(:success)
expect(result.payload[:success_path]).to match(/#{Gitlab::Routing.url_helpers.project_new_merge_request_url(project, {})}(.*)description(.*)source_branch/)
expect(result.payload[:branch]).to eq('master')
end
end
context 'when the repository is empty' do
let_it_be(:project) { create(:project_empty_repo) }
context 'when initialize_with_sast is false' do
before do
project.add_developer(user)
end
let(:params) { { initialize_with_sast: false } }
it 'returns a ServiceResponse error' do
expect(result).to be_kind_of(ServiceResponse)
expect(result.status).to eq(:error)
expect(result.message).to eq('You must <a target="_blank" rel="noopener noreferrer" ' \
'href="http://localhost/help/user/project/repository/index.md#' \
'add-files-to-a-repository">add at least one file to the ' \
'repository</a> before using Security features.')
end
end
context 'when initialize_with_sast is true' do
let(:params) { { initialize_with_sast: true } }
subject(:result) { described_class.new(project, user, params, commit_on_default: true).execute }
before do
project.add_maintainer(user)
end
it_behaves_like 'commits directly to the default branch'
end
end
context 'when committing to the default branch', :aggregate_failures do
subject(:result) { described_class.new(project, user, params, commit_on_default: true).execute }
let(:params) { {} }
before do
project.add_developer(user)
end
it 'does not try to remove that branch on raised exceptions' do
expect(Files::MultiService).to receive(:new).and_raise(StandardError, '_exception_')
expect(project.repository).not_to receive(:rm_branch)
expect { result }.to raise_error(StandardError, '_exception_')
end
it_behaves_like 'commits directly to the default branch'
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Security
module CiConfiguration
# This class parses SAST template file and .gitlab-ci.yml to populate default and current values into the JSON
# read from app/validators/json_schemas/security_ci_configuration_schemas/sast_ui_schema.json
class SastParserService < ::BaseService
include Gitlab::Utils::StrongMemoize
SAST_UI_SCHEMA_PATH = 'app/validators/json_schemas/security_ci_configuration_schemas/sast_ui_schema.json'
def initialize(project)
@project = project
end
def configuration
result = Gitlab::Json.parse(File.read(Rails.root.join(SAST_UI_SCHEMA_PATH))).with_indifferent_access
populate_default_value_for(result, :global)
populate_default_value_for(result, :pipeline)
fill_current_value_with_default_for(result, :global)
fill_current_value_with_default_for(result, :pipeline)
populate_current_value_for(result, :global)
populate_current_value_for(result, :pipeline)
fill_current_value_with_default_for_analyzers(result)
populate_current_value_for_analyzers(result)
result
end
private
def sast_template_content
Gitlab::Template::GitlabCiYmlTemplate.find('SAST').content
end
def populate_default_value_for(config, level)
set_each(config[level], key: :default_value, with: sast_template_attributes)
end
def populate_current_value_for(config, level)
set_each(config[level], key: :value, with: gitlab_ci_yml_attributes)
end
def fill_current_value_with_default_for(config, level)
set_each(config[level], key: :value, with: sast_template_attributes)
end
def set_each(config_attributes, key:, with:)
config_attributes.each do |entity|
entity[key] = with[entity[:field]] if with[entity[:field]]
end
end
def fill_current_value_with_default_for_analyzers(result)
result[:analyzers].each do |analyzer|
analyzer[:variables].each do |entity|
entity[:value] = entity[:default_value] if entity[:default_value]
end
end
end
def populate_current_value_for_analyzers(result)
result[:analyzers].each do |analyzer|
analyzer[:enabled] = analyzer_enabled?(analyzer[:name])
populate_current_value_for(analyzer, :variables)
end
end
def analyzer_enabled?(analyzer_name)
# Unless explicitly listed in the excluded analyzers, consider it enabled
sast_excluded_analyzers.exclude?(analyzer_name)
end
def sast_excluded_analyzers
strong_memoize(:sast_excluded_analyzers) do
excluded_analyzers = gitlab_ci_yml_attributes["SAST_EXCLUDED_ANALYZERS"] || sast_template_attributes["SAST_EXCLUDED_ANALYZERS"]
begin
excluded_analyzers.split(',').map(&:strip)
rescue StandardError
[]
end
end
end
def sast_template_attributes
@sast_template_attributes ||= build_sast_attributes(sast_template_content)
end
def gitlab_ci_yml_attributes
@gitlab_ci_yml_attributes ||= begin
config_content = @project.repository.blob_data_at(
@project.repository.root_ref_sha, @project.ci_config_path_or_default
)
return {} unless config_content
build_sast_attributes(config_content)
end
end
def build_sast_attributes(content)
options = { project: @project, user: current_user, sha: @project.repository.commit.sha }
yaml_result = Gitlab::Ci::YamlProcessor.new(content, options).execute
return {} unless yaml_result.valid?
extract_required_attributes(yaml_result)
end
def extract_required_attributes(yaml_result)
result = {}
yaml_result.yaml_variables_for(:sast).each do |variable|
result[variable[:key]] = variable[:value]
end
result[:stage] = yaml_result.stage_for(:sast)
result.with_indifferent_access
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Security::CiConfiguration::SastParserService, feature_category: :static_application_security_testing do
describe '#configuration' do
include_context 'read ci configuration for sast enabled project'
let(:configuration) { described_class.new(project).configuration }
let(:secure_analyzers) { configuration['global'][0] }
let(:sast_excluded_paths) { configuration['global'][1] }
let(:sast_pipeline_stage) { configuration['pipeline'][0] }
let(:sast_search_max_depth) { configuration['pipeline'][1] }
let(:brakeman) { configuration['analyzers'][0] }
let(:sast_brakeman_level) { brakeman['variables'][0] }
let(:semgrep) { configuration['analyzers'][1] }
let(:secure_analyzers_prefix) { '$CI_TEMPLATE_REGISTRY_HOST/security-products' }
it 'parses the configuration for SAST' do
expect(secure_analyzers['default_value']).to eql(secure_analyzers_prefix)
expect(sast_excluded_paths['default_value']).to eql('spec, test, tests, tmp')
expect(sast_pipeline_stage['default_value']).to eql('test')
expect(sast_search_max_depth['default_value']).to eql('4')
expect(brakeman['enabled']).to be(true)
expect(sast_brakeman_level['default_value']).to eql('1')
end
context 'while populating current values of the entities' do
context 'when .gitlab-ci.yml is present' do
it 'populates the current values from the file' do
allow(project.repository).to receive(:blob_data_at).and_return(gitlab_ci_yml_content)
expect(secure_analyzers['value']).to eql("registry.gitlab.com/gitlab-org/security-products/analyzers2")
expect(sast_excluded_paths['value']).to eql('spec, executables')
expect(sast_pipeline_stage['value']).to eql('our_custom_security_stage')
expect(sast_search_max_depth['value']).to eql('8')
expect(brakeman['enabled']).to be(false)
expect(semgrep['enabled']).to be(true)
expect(sast_brakeman_level['value']).to eql('2')
end
context 'SAST_EXCLUDED_ANALYZERS is set' do
it 'enables analyzers correctly' do
allow(project.repository).to receive(:blob_data_at).and_return(gitlab_ci_yml_excluded_analyzers_content)
expect(brakeman['enabled']).to be(false)
expect(semgrep['enabled']).to be(true)
end
end
end
context 'when .gitlab-ci.yml is absent' do
it 'populates the current values with the default values' do
allow(project.repository).to receive(:blob_data_at).and_return(nil)
expect(secure_analyzers['value']).to eql(secure_analyzers_prefix)
expect(sast_excluded_paths['value']).to eql('spec, test, tests, tmp')
expect(sast_pipeline_stage['value']).to eql('test')
expect(sast_search_max_depth['value']).to eql('4')
expect(brakeman['enabled']).to be(true)
expect(sast_brakeman_level['value']).to eql('1')
end
end
context 'when .gitlab-ci.yml does not include the sast job' do
before do
allow(project.repository).to receive(:blob_data_at).and_return(
File.read(Rails.root.join('spec/support/gitlab_stubs/gitlab_ci.yml'))
)
end
it 'populates the current values with the default values' do
expect(secure_analyzers['value']).to eql(secure_analyzers_prefix)
expect(sast_excluded_paths['value']).to eql('spec, test, tests, tmp')
expect(sast_pipeline_stage['value']).to eql('test')
expect(sast_search_max_depth['value']).to eql('4')
expect(brakeman['enabled']).to be(true)
expect(sast_brakeman_level['value']).to eql('1')
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Security
module CiConfiguration
class SecretDetectionCreateService < ::Security::CiConfiguration::BaseCreateService
private
def action
Security::CiConfiguration::SecretDetectionBuildAction.new(project.auto_devops_enabled?, existing_gitlab_ci_content,
project.ci_config_path).generate
end
def next_branch
'set-secret-detection-config'
end
def message
_('Configure Secret Detection in `.gitlab-ci.yml`, creating this file if it does not already exist')
end
def description
_('Configure Secret Detection in `.gitlab-ci.yml` using the GitLab managed template. You can [add variable overrides](https://docs.gitlab.com/ee/user/application_security/secret_detection/#customizing-settings) to customize Secret Detection settings.')
end
def name
'Secret Detection'
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Security::CiConfiguration::SecretDetectionCreateService, :snowplow, feature_category: :container_scanning do
subject(:result) { described_class.new(project, user).execute }
let(:branch_name) { 'set-secret-detection-config-1' }
let(:snowplow_event) do
{
category: 'Security::CiConfiguration::SecretDetectionCreateService',
action: 'create',
label: ''
}
end
include_examples 'services security ci configuration create service', true
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Security
module CiConfiguration
class ContainerScanningCreateService < ::Security::CiConfiguration::BaseCreateService
private
def action
Security::CiConfiguration::ContainerScanningBuildAction.new(project.auto_devops_enabled?, existing_gitlab_ci_content,
project.ci_config_path).generate
end
def next_branch
'set-container-scanning-config'
end
def message
_('Configure Container Scanning in `.gitlab-ci.yml`, creating this file if it does not already exist')
end
def description
_('Configure Container Scanning in `.gitlab-ci.yml` using the GitLab managed template. You can [add variable overrides](https://docs.gitlab.com/ee/user/application_security/container_scanning/#customizing-the-container-scanning-settings) to customize Container Scanning settings.')
end
def name
'Container Scanning'
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Security::CiConfiguration::ContainerScanningCreateService, :snowplow, feature_category: :container_scanning do
subject(:result) { described_class.new(project, user).execute }
let(:branch_name) { 'set-container-scanning-config-1' }
let(:snowplow_event) do
{
category: 'Security::CiConfiguration::ContainerScanningCreateService',
action: 'create',
label: ''
}
end
include_examples 'services security ci configuration create service', true
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Security
module CiConfiguration
class SastIacCreateService < ::Security::CiConfiguration::BaseCreateService
private
def action
Security::CiConfiguration::SastIacBuildAction.new(project.auto_devops_enabled?, existing_gitlab_ci_content,
project.ci_config_path).generate
end
def next_branch
'set-sast-iac-config'
end
def message
_('Configure SAST IaC in `.gitlab-ci.yml`, creating this file if it does not already exist')
end
def description
_('Configure SAST IaC in `.gitlab-ci.yml` using the GitLab managed template. You can [add variable overrides](https://docs.gitlab.com/ee/user/application_security/sast/#customizing-the-sast-settings) to customize SAST IaC settings.')
end
def name
'SAST IaC'
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Security::CiConfiguration::SastIacCreateService, :snowplow, feature_category: :static_application_security_testing do
subject(:result) { described_class.new(project, user).execute }
let(:branch_name) { 'set-sast-iac-config-1' }
let(:snowplow_event) do
{
category: 'Security::CiConfiguration::SastIacCreateService',
action: 'create',
label: ''
}
end
include_examples 'services security ci configuration create service', true
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Auth
class DependencyProxyAuthenticationService < BaseService
AUDIENCE = 'dependency_proxy'
HMAC_KEY = 'gitlab-dependency-proxy'
DEFAULT_EXPIRE_TIME = 1.minute
def execute(authentication_abilities:)
return error('dependency proxy not enabled', 404) unless ::Gitlab.config.dependency_proxy.enabled
return error('access forbidden', 403) unless valid_user_actor?
{ token: authorized_token.encoded }
end
class << self
include ::Gitlab::Utils::StrongMemoize
def secret
strong_memoize(:secret) do
OpenSSL::HMAC.hexdigest(
'sha256',
::Settings.attr_encrypted_db_key_base,
HMAC_KEY
)
end
end
def token_expire_at
Time.current + Gitlab::CurrentSettings.container_registry_token_expire_delay.minutes
end
end
private
def valid_user_actor?
current_user || valid_deploy_token?
end
def valid_deploy_token?
deploy_token && deploy_token.valid_for_dependency_proxy?
end
def authorized_token
JSONWebToken::HMACToken.new(self.class.secret).tap do |token|
token['user_id'] = current_user.id if current_user
token['deploy_token'] = deploy_token.token if deploy_token
token.expire_time = self.class.token_expire_at
end
end
def deploy_token
params[:deploy_token]
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Auth::DependencyProxyAuthenticationService, feature_category: :dependency_proxy do
let_it_be(:user) { create(:user) }
let_it_be(:params) { {} }
let(:service) { described_class.new(nil, user, params) }
before do
stub_config(dependency_proxy: { enabled: true }, registry: { enabled: true })
end
describe '#execute' do
subject { service.execute(authentication_abilities: nil) }
shared_examples 'returning' do |status:, message:|
it "returns #{message}", :aggregate_failures do
expect(subject[:http_status]).to eq(status)
expect(subject[:message]).to eq(message)
end
end
shared_examples 'returning a token with an encoded field' do |field|
it 'returns a token with encoded field' do
token = subject[:token]
expect(token).not_to be_nil
decoded_token = decode(token)
expect(decoded_token[field]).not_to be_nil
end
end
context 'dependency proxy is not enabled' do
before do
stub_config(dependency_proxy: { enabled: false })
end
it_behaves_like 'returning', status: 404, message: 'dependency proxy not enabled'
end
context 'without a user' do
let(:user) { nil }
it_behaves_like 'returning', status: 403, message: 'access forbidden'
end
context 'with a deploy token' do
let_it_be(:deploy_token) { create(:deploy_token, :group, :dependency_proxy_scopes) }
let_it_be(:params) { { deploy_token: deploy_token } }
it_behaves_like 'returning a token with an encoded field', 'deploy_token'
end
context 'with a human user' do
it_behaves_like 'returning a token with an encoded field', 'user_id'
end
context 'all other user types' do
User::USER_TYPES.except(:human, :project_bot).each_value do |user_type|
context "with user_type #{user_type}" do
before do
user.update!(user_type: user_type)
end
it_behaves_like 'returning a token with an encoded field', 'user_id'
end
end
end
def decode(token)
DependencyProxy::AuthTokenService.new(token).execute
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Auth
class ContainerRegistryAuthenticationService < BaseService
AUDIENCE = 'container_registry'
REGISTRY_LOGIN_ABILITIES = [
:read_container_image,
:create_container_image,
:destroy_container_image,
:update_container_image,
:admin_container_image,
:build_read_container_image,
:build_create_container_image,
:build_destroy_container_image
].freeze
FORBIDDEN_IMPORTING_SCOPES = %w[push delete *].freeze
ActiveImportError = Class.new(StandardError)
def execute(authentication_abilities:)
@authentication_abilities = authentication_abilities
return error('UNAVAILABLE', status: 404, message: 'registry not enabled') unless registry.enabled
return error('DENIED', status: 403, message: 'access forbidden') unless has_registry_ability?
unless scopes.any? || current_user || deploy_token || project
return error('DENIED', status: 403, message: 'access forbidden')
end
{ token: authorized_token(*scopes).encoded }
rescue ActiveImportError
error(
'DENIED',
status: 403,
message: 'Your repository is currently being migrated to a new platform and writes are temporarily disabled. Go to https://gitlab.com/groups/gitlab-org/-/epics/5523 to learn more.'
)
end
def self.full_access_token(*names)
names_and_actions = names.index_with { %w[*] }
access_token(names_and_actions)
end
def self.import_access_token
access_token({ 'import' => %w[*] }, 'registry')
end
def self.pull_access_token(*names)
names_and_actions = names.index_with { %w[pull] }
access_token(names_and_actions)
end
def self.pull_nested_repositories_access_token(name)
name = name.chomp('/')
access_token({
name => %w[pull],
"#{name}/*" => %w[pull]
})
end
def self.push_pull_nested_repositories_access_token(name)
name = name.chomp('/')
access_token(
{
name => %w[pull push],
"#{name}/*" => %w[pull]
},
override_project_path: name
)
end
def self.access_token(names_and_actions, type = 'repository', override_project_path: nil)
registry = Gitlab.config.registry
token = JSONWebToken::RSAToken.new(registry.key)
token.issuer = registry.issuer
token.audience = AUDIENCE
token.expire_time = token_expire_at
token[:access] = names_and_actions.map do |name, actions|
{
type: type,
name: name,
actions: actions,
meta: access_metadata(path: name, override_project_path: override_project_path)
}.compact
end
token.encoded
end
def self.token_expire_at
Time.current + Gitlab::CurrentSettings.container_registry_token_expire_delay.minutes
end
def self.access_metadata(project: nil, path: nil, override_project_path: nil)
return { project_path: override_project_path.downcase } if override_project_path
# If the project is not given, try to infer it from the provided path
if project.nil?
return if path.nil? # If no path is given, return early
return if path == 'import' # Ignore the special 'import' path
# If the path ends with '/*', remove it so we can parse the actual repository path
path = path.chomp('/*')
# Parse the repository project from the path
begin
project = ContainerRegistry::Path.new(path).repository_project
rescue ContainerRegistry::Path::InvalidRegistryPathError
# If the path is invalid, gracefully handle the error
return
end
end
# Return the project path (lowercase) as metadata
{ project_path: project&.full_path&.downcase }
end
private
def authorized_token(*accesses)
JSONWebToken::RSAToken.new(registry.key).tap do |token|
token.issuer = registry.issuer
token.audience = params[:service]
token.subject = current_user.try(:username)
token.expire_time = self.class.token_expire_at
token[:auth_type] = params[:auth_type]
token[:access] = accesses.compact
token[:user] = user_info_token.encoded
end
end
def user_info_token
info =
if current_user
{
token_type: params[:auth_type],
username: current_user.username,
user_id: current_user.id
}
elsif deploy_token
{
token_type: params[:auth_type],
username: deploy_token.username,
deploy_token_id: deploy_token.id
}
end
JSONWebToken::RSAToken.new(registry.key).tap do |token|
token[:user_info] = info
end
end
def scopes
return [] unless params[:scopes]
@scopes ||= params[:scopes].map do |scope|
process_scope(scope)
end.compact
end
def process_scope(scope)
type, name, actions = scope.split(':', 3)
actions = actions.split(',')
case type
when 'registry'
process_registry_access(type, name, actions)
when 'repository'
path = ContainerRegistry::Path.new(name)
process_repository_access(type, path, actions)
end
end
def process_registry_access(type, name, actions)
return unless current_user&.admin?
return unless name == 'catalog'
return unless actions == ['*']
{ type: type, name: name, actions: ['*'] }
end
def process_repository_access(type, path, actions)
return unless path.valid?
raise ActiveImportError if actively_importing?(actions, path)
requested_project = path.repository_project
return unless requested_project
authorized_actions = actions.select do |action|
can_access?(requested_project, action)
end
log_if_actions_denied(type, requested_project, actions, authorized_actions)
return unless authorized_actions.present?
# At this point user/build is already authenticated.
#
ensure_container_repository!(path, authorized_actions)
{
type: type,
name: path.to_s,
actions: authorized_actions,
meta: self.class.access_metadata(project: requested_project)
}
end
def actively_importing?(actions, path)
return false if FORBIDDEN_IMPORTING_SCOPES.intersection(actions).empty?
container_repository = ContainerRepository.find_by_path(path)
return false unless container_repository
container_repository.migration_importing?
end
##
# Because we do not have two way communication with registry yet,
# we create a container repository image resource when push to the
# registry is successfully authorized.
#
def ensure_container_repository!(path, actions)
return if path.has_repository?
return unless actions.include?('push')
ContainerRepository.find_or_create_from_path(path)
end
# Overridden in EE
def can_access?(requested_project, requested_action)
return false unless requested_project.container_registry_enabled?
case requested_action
when 'pull'
build_can_pull?(requested_project) || user_can_pull?(requested_project) || deploy_token_can_pull?(requested_project)
when 'push'
build_can_push?(requested_project) || user_can_push?(requested_project) || deploy_token_can_push?(requested_project)
when 'delete'
build_can_delete?(requested_project) || user_can_admin?(requested_project)
when '*'
user_can_admin?(requested_project)
else
false
end
end
def build_can_delete?(requested_project)
# Build can delete only from the project from which it originates
has_authentication_ability?(:build_destroy_container_image) &&
requested_project == project
end
def registry
Gitlab.config.registry
end
def can_user?(ability, project)
can?(current_user, ability, project)
end
def build_can_pull?(requested_project)
# Build can:
# 1. pull from its own project (for ex. a build)
# 2. read images from dependent projects if creator of build is a team member
has_authentication_ability?(:build_read_container_image) &&
(requested_project == project || can_user?(:build_read_container_image, requested_project))
end
def user_can_admin?(requested_project)
has_authentication_ability?(:admin_container_image) &&
can_user?(:admin_container_image, requested_project)
end
def user_can_pull?(requested_project)
has_authentication_ability?(:read_container_image) &&
can_user?(:read_container_image, requested_project)
end
def deploy_token_can_pull?(requested_project)
has_authentication_ability?(:read_container_image) &&
deploy_token.present? &&
can?(deploy_token, :read_container_image, requested_project)
end
def deploy_token_can_push?(requested_project)
has_authentication_ability?(:create_container_image) &&
deploy_token.present? &&
can?(deploy_token, :create_container_image, requested_project)
end
##
# We still support legacy pipeline triggers which do not have associated
# actor. New permissions model and new triggers are always associated with
# an actor. So this should be improved once
# https://gitlab.com/gitlab-org/gitlab-foss/issues/37452 is resolved.
#
def build_can_push?(requested_project)
# Build can push only to the project from which it originates
has_authentication_ability?(:build_create_container_image) &&
requested_project == project
end
def user_can_push?(requested_project)
has_authentication_ability?(:create_container_image) &&
can_user?(:create_container_image, requested_project)
end
def error(code, status:, message: '')
{ errors: [{ code: code, message: message }], http_status: status }
end
def has_authentication_ability?(capability)
@authentication_abilities.to_a.include?(capability)
end
def has_registry_ability?
@authentication_abilities.any? do |ability|
REGISTRY_LOGIN_ABILITIES.include?(ability)
end
end
# Overridden in EE
def extra_info
{}
end
def deploy_token
params[:deploy_token]
end
def log_if_actions_denied(type, requested_project, requested_actions, authorized_actions)
return if requested_actions == authorized_actions
log_info = {
message: 'Denied container registry permissions',
scope_type: type,
requested_project_path: requested_project.full_path,
requested_actions: requested_actions,
authorized_actions: authorized_actions,
username: current_user&.username,
user_id: current_user&.id,
project_path: project&.full_path
}.merge!(extra_info).compact
Gitlab::AuthLogger.warn(log_info)
end
end
end
Auth::ContainerRegistryAuthenticationService.prepend_mod_with('Auth::ContainerRegistryAuthenticationService')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Auth::ContainerRegistryAuthenticationService, feature_category: :container_registry do
include AdminModeHelper
it_behaves_like 'a container registry auth service'
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module ApplicationSettings
class UpdateService < ApplicationSettings::BaseService
include ValidatesClassificationLabel
attr_reader :params, :application_setting
MARKDOWN_CACHE_INVALIDATING_PARAMS = %w[asset_proxy_enabled asset_proxy_url asset_proxy_secret_key asset_proxy_whitelist].freeze
def execute
result = update_settings
auto_approve_blocked_users if result
result
end
private
def update_settings
validate_classification_label(application_setting, :external_authorization_service_default_label) unless bypass_external_auth?
if application_setting.errors.any?
return false
end
update_terms(@params.delete(:terms))
update_default_branch_protection_defaults(@params[:default_branch_protection])
add_to_outbound_local_requests_whitelist(@params.delete(:add_to_outbound_local_requests_whitelist))
if params.key?(:performance_bar_allowed_group_path)
group_id = process_performance_bar_allowed_group_id
return false if application_setting.errors.any?
params[:performance_bar_allowed_group_id] = group_id
end
if usage_stats_updated? && !params.delete(:skip_usage_stats_user)
params[:usage_stats_set_by_user_id] = current_user.id
end
@application_setting.assign_attributes(params)
if invalidate_markdown_cache?
@application_setting[:local_markdown_version] = @application_setting.local_markdown_version + 1
end
@application_setting.save
end
def usage_stats_updated?
params.key?(:usage_ping_enabled) || params.key?(:version_check_enabled)
end
def add_to_outbound_local_requests_whitelist(values)
values_array = Array(values).reject(&:empty?)
return if values_array.empty?
@application_setting.add_to_outbound_local_requests_whitelist(values_array)
end
def invalidate_markdown_cache?
!params.key?(:local_markdown_version) &&
(@application_setting.changes.keys & MARKDOWN_CACHE_INVALIDATING_PARAMS).any?
end
def update_terms(terms)
return unless terms.present?
# Avoid creating a new terms record if the text is exactly the same.
terms = terms.strip
return if terms == @application_setting.terms
ApplicationSetting::Term.create(terms: terms)
@application_setting.reset_memoized_terms
end
def update_default_branch_protection_defaults(default_branch_protection)
return unless default_branch_protection.present?
# We are migrating default_branch_protection from an integer
# column to a jsonb column. While completing the rest of the
# work, we want to start translating the updates sent to the
# existing column into the json. Eventually, we will be updating
# the jsonb column directly and deprecating the original update
# path. Until then, we want to sync up both columns.
protection = Gitlab::Access::BranchProtection.new(default_branch_protection.to_i)
@application_setting.default_branch_protection_defaults = protection.to_hash
end
def process_performance_bar_allowed_group_id
group_full_path = params.delete(:performance_bar_allowed_group_path)
enable_param_on = Gitlab::Utils.to_boolean(params.delete(:performance_bar_enabled))
performance_bar_enabled = enable_param_on.nil? || enable_param_on # Default to true
return if group_full_path.blank?
return if enable_param_on == false # Explicitly disabling
unless performance_bar_enabled
application_setting.errors.add(:performance_bar_allowed_group_id, 'not allowed when performance bar is disabled')
return
end
group = Group.find_by_full_path(group_full_path.chomp('/'))
unless group
application_setting.errors.add(:performance_bar_allowed_group_id, 'not found')
return
end
group.id
end
def bypass_external_auth?
params.key?(:external_authorization_service_enabled) && !Gitlab::Utils.to_boolean(params[:external_authorization_service_enabled])
end
def auto_approve_blocked_users
return unless should_auto_approve_blocked_users?
ApproveBlockedPendingApprovalUsersWorker.perform_async(current_user.id)
end
def should_auto_approve_blocked_users?
return false unless application_setting.previous_changes.key?(:require_admin_approval_after_user_signup)
enabled_previous, enabled_current = application_setting.previous_changes[:require_admin_approval_after_user_signup]
enabled_previous && !enabled_current
end
end
end
ApplicationSettings::UpdateService.prepend_mod_with('ApplicationSettings::UpdateService')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe ApplicationSettings::UpdateService, feature_category: :shared do
include ExternalAuthorizationServiceHelpers
let(:application_settings) { ::Gitlab::CurrentSettings.current_application_settings }
let(:admin) { create(:user, :admin) }
let(:params) { {} }
subject { described_class.new(application_settings, admin, params) }
before do
# So the caching behaves like it would in production
stub_env('IN_MEMORY_APPLICATION_SETTINGS', 'false')
# Creating these settings first ensures they're used by other factories
application_settings
end
describe 'updating terms' do
context 'when the passed terms are blank' do
let(:params) { { terms: '' } }
it 'does not create terms' do
expect { subject.execute }.not_to change { ApplicationSetting::Term.count }
end
end
context 'when passing terms' do
let(:params) { { terms: 'Be nice! ' } }
it 'creates the terms' do
expect { subject.execute }.to change { ApplicationSetting::Term.count }.by(1)
end
it 'does not create terms if they are the same as the existing ones' do
create(:term, terms: 'Be nice!')
expect { subject.execute }.not_to change { ApplicationSetting::Term.count }
end
it 'updates terms if they already existed' do
create(:term, terms: 'Other terms')
subject.execute
expect(application_settings.terms).to eq('Be nice!')
end
it 'only queries once when the terms are changed' do
create(:term, terms: 'Other terms')
expect(application_settings.terms).to eq('Other terms')
subject.execute
expect(application_settings.terms).to eq('Be nice!')
expect { 2.times { application_settings.terms } }
.not_to exceed_query_limit(0)
end
end
end
describe 'updating outbound_local_requests_whitelist' do
context 'when params is blank' do
let(:params) { {} }
it 'does not add to allowlist' do
expect { subject.execute }.not_to change {
application_settings.outbound_local_requests_whitelist
}
end
end
context 'when param add_to_outbound_local_requests_whitelist contains values' do
before do
application_settings.outbound_local_requests_whitelist = ['127.0.0.1']
end
let(:params) { { add_to_outbound_local_requests_whitelist: ['example.com', ''] } }
it 'adds to allowlist' do
expect { subject.execute }.to change {
application_settings.outbound_local_requests_whitelist
}
expect(application_settings.outbound_local_requests_whitelist).to contain_exactly(
'127.0.0.1', 'example.com'
)
end
end
context 'when param outbound_local_requests_allowlist_raw is passed' do
before do
application_settings.outbound_local_requests_whitelist = ['127.0.0.1']
end
let(:params) { { outbound_local_requests_allowlist_raw: 'example.com;gitlab.com' } }
it 'overwrites the existing allowlist' do
expect { subject.execute }.to change {
application_settings.outbound_local_requests_whitelist
}
expect(application_settings.outbound_local_requests_whitelist).to contain_exactly(
'example.com', 'gitlab.com'
)
end
end
end
describe 'markdown cache invalidators', feature_category: :team_planning do
shared_examples 'invalidates markdown cache' do |attribute|
let(:params) { attribute }
it 'increments cache' do
expect { subject.execute }.to change(application_settings, :local_markdown_version).by(1)
end
end
it_behaves_like 'invalidates markdown cache', { asset_proxy_enabled: true }
it_behaves_like 'invalidates markdown cache', { asset_proxy_url: 'http://test.com' }
it_behaves_like 'invalidates markdown cache', { asset_proxy_secret_key: 'another secret' }
it_behaves_like 'invalidates markdown cache', { asset_proxy_allowlist: ['domain.com'] }
it_behaves_like 'invalidates markdown cache', { asset_proxy_whitelist: ['domain.com'] }
context 'when also setting the local_markdown_version' do
let(:params) { { asset_proxy_enabled: true, local_markdown_version: 12 } }
it 'does not increment' do
expect { subject.execute }.to change(application_settings, :local_markdown_version).to(12)
end
end
context 'do not invalidate if value does not change' do
let(:params) { { asset_proxy_enabled: true, asset_proxy_secret_key: 'secret', asset_proxy_url: 'http://test.com' } }
it 'does not increment' do
described_class.new(application_settings, admin, params).execute
expect { described_class.new(application_settings, admin, params).execute }.not_to change(application_settings, :local_markdown_version)
end
end
end
describe 'performance bar settings', feature_category: :cloud_connector do
using RSpec::Parameterized::TableSyntax
where(
:params_performance_bar_enabled,
:params_performance_bar_allowed_group_path,
:previous_performance_bar_allowed_group_id,
:expected_performance_bar_allowed_group_id,
:expected_valid
) do
true | '' | nil | nil | true
true | '' | 42_000_000 | nil | true
true | nil | nil | nil | true
true | nil | 42_000_000 | nil | true
true | 'foo' | nil | nil | false
true | 'foo' | 42_000_000 | 42_000_000 | false
true | 'group_a' | nil | 42_000_000 | true
true | 'group_b' | 42_000_000 | 43_000_000 | true
true | 'group_b/' | 42_000_000 | 43_000_000 | true
true | 'group_a' | 42_000_000 | 42_000_000 | true
false | '' | nil | nil | true
false | '' | 42_000_000 | nil | true
false | nil | nil | nil | true
false | nil | 42_000_000 | nil | true
false | 'foo' | nil | nil | true
false | 'foo' | 42_000_000 | nil | true
false | 'group_a' | nil | nil | true
false | 'group_b' | 42_000_000 | nil | true
false | 'group_a' | 42_000_000 | nil | true
nil | '' | nil | nil | true
nil | 'foo' | nil | nil | false
nil | 'group_a' | nil | 42_000_000 | true
end
with_them do
let(:params) do
{
performance_bar_allowed_group_path: params_performance_bar_allowed_group_path
}.tap do |params_hash|
# Treat nil in the table as missing
unless params_performance_bar_enabled.nil?
params_hash[:performance_bar_enabled] = params_performance_bar_enabled
end
end
end
before do
if previous_performance_bar_allowed_group_id == 42_000_000 || params_performance_bar_allowed_group_path == 'group_a'
create(:group, id: 42_000_000, path: 'group_a')
end
if expected_performance_bar_allowed_group_id == 43_000_000 || params_performance_bar_allowed_group_path == 'group_b'
create(:group, id: 43_000_000, path: 'group_b')
end
application_settings.update!(performance_bar_allowed_group_id: previous_performance_bar_allowed_group_id)
end
it 'sets performance_bar_allowed_group_id when present and performance_bar_enabled == true' do
expect(application_settings.performance_bar_allowed_group_id).to eq(previous_performance_bar_allowed_group_id)
if previous_performance_bar_allowed_group_id != expected_performance_bar_allowed_group_id
expect { subject.execute }
.to change(application_settings, :performance_bar_allowed_group_id)
.from(previous_performance_bar_allowed_group_id).to(expected_performance_bar_allowed_group_id)
else
expect { subject.execute }
.not_to change(application_settings, :performance_bar_allowed_group_id)
end
end
it 'adds errors to the model for invalid params' do
expect(subject.execute).to eq(expected_valid)
unless expected_valid
expect(application_settings.errors[:performance_bar_allowed_group_id]).to be_present
end
end
end
context 'when :performance_bar_allowed_group_path is not present' do
let(:group) { create(:group) }
before do
application_settings.update!(performance_bar_allowed_group_id: group.id)
end
it 'does not change the performance bar settings' do
expect { subject.execute }
.not_to change(application_settings, :performance_bar_allowed_group_id)
end
end
context 'when :performance_bar_enabled is not present' do
let(:group) { create(:group) }
let(:params) { { performance_bar_allowed_group_path: group.full_path } }
it 'implicitly defaults to true' do
expect { subject.execute }
.to change(application_settings, :performance_bar_allowed_group_id)
.from(nil).to(group.id)
end
end
end
context 'when external authorization is enabled', feature_category: :system_access do
before do
enable_external_authorization_service_check
end
it 'does not validate labels if external authorization gets disabled' do
expect_any_instance_of(described_class).not_to receive(:validate_classification_label)
described_class.new(application_settings, admin, { external_authorization_service_enabled: false }).execute
end
it 'does validate labels if external authorization gets enabled' do
expect_any_instance_of(described_class).to receive(:validate_classification_label)
described_class.new(application_settings, admin, { external_authorization_service_enabled: true }).execute
end
it 'does validate labels if external authorization is left unchanged' do
expect_any_instance_of(described_class).to receive(:validate_classification_label)
described_class.new(application_settings, admin, { external_authorization_service_default_label: 'new-label' }).execute
end
it 'does not save the settings with an error if the service denies access' do
expect(::Gitlab::ExternalAuthorization)
.to receive(:access_allowed?).with(admin, 'new-label') { false }
described_class.new(application_settings, admin, { external_authorization_service_default_label: 'new-label' }).execute
expect(application_settings.errors[:external_authorization_service_default_label]).to be_present
end
it 'saves the setting when the user has access to the label' do
expect(::Gitlab::ExternalAuthorization)
.to receive(:access_allowed?).with(admin, 'new-label') { true }
described_class.new(application_settings, admin, { external_authorization_service_default_label: 'new-label' }).execute
# Read the attribute directly to avoid the stub from
# `enable_external_authorization_service_check`
expect(application_settings[:external_authorization_service_default_label]).to eq('new-label')
end
it 'does not validate the label if it was not passed' do
expect(::Gitlab::ExternalAuthorization)
.not_to receive(:access_allowed?)
described_class.new(application_settings, admin, { home_page_url: 'http://foo.bar' }).execute
end
end
context 'when raw_blob_request_limit is passsed' do
let(:params) do
{
raw_blob_request_limit: 600
}
end
it 'updates raw_blob_request_limit value' do
subject.execute
application_settings.reload
expect(application_settings.raw_blob_request_limit).to eq(600)
end
end
context 'when default_branch_protection is updated' do
let(:expected) { ::Gitlab::Access::BranchProtection.protected_against_developer_pushes.stringify_keys }
let(:params) { { default_branch_protection: ::Gitlab::Access::PROTECTION_DEV_CAN_MERGE } }
it "updates default_branch_protection_defaults from the default_branch_protection param" do
default_value = ::Gitlab::Access::BranchProtection.protected_fully.deep_stringify_keys
expect { subject.execute }.to change { application_settings.default_branch_protection_defaults }.from(default_value).to(expected)
end
end
context 'when protected path settings are passed' do
let(:params) do
{
throttle_protected_paths_enabled: 1,
throttle_protected_paths_period_in_seconds: 600,
throttle_protected_paths_requests_per_period: 100,
protected_paths_raw: "/users/password\r\n/users/sign_in\r\n",
protected_paths_for_get_request_raw: "/users/password\r\n/users/sign_up\r\n"
}
end
it 'updates protected path settings' do
subject.execute
application_settings.reload
expect(application_settings.throttle_protected_paths_enabled).to be_truthy
expect(application_settings.throttle_protected_paths_period_in_seconds).to eq(600)
expect(application_settings.throttle_protected_paths_requests_per_period).to eq(100)
expect(application_settings.protected_paths).to eq(['/users/password', '/users/sign_in'])
expect(application_settings.protected_paths_for_get_request).to match_array(['/users/password', '/users/sign_up'])
end
end
context 'when general rate limits are passed' do
let(:params) do
{
throttle_authenticated_api_enabled: true,
throttle_authenticated_api_period_in_seconds: 10,
throttle_authenticated_api_requests_per_period: 20,
throttle_authenticated_web_enabled: true,
throttle_authenticated_web_period_in_seconds: 30,
throttle_authenticated_web_requests_per_period: 40,
throttle_unauthenticated_api_enabled: true,
throttle_unauthenticated_api_period_in_seconds: 50,
throttle_unauthenticated_api_requests_per_period: 60,
throttle_unauthenticated_enabled: true,
throttle_unauthenticated_period_in_seconds: 50,
throttle_unauthenticated_requests_per_period: 60
}
end
it 'updates general throttle settings' do
subject.execute
expect(application_settings.reload).to have_attributes(params)
end
end
context 'when package registry rate limits are passed' do
let(:params) do
{
throttle_unauthenticated_packages_api_enabled: 1,
throttle_unauthenticated_packages_api_period_in_seconds: 500,
throttle_unauthenticated_packages_api_requests_per_period: 20,
throttle_authenticated_packages_api_enabled: 1,
throttle_authenticated_packages_api_period_in_seconds: 600,
throttle_authenticated_packages_api_requests_per_period: 10
}
end
it 'updates package registry throttle settings' do
subject.execute
application_settings.reload
expect(application_settings.throttle_unauthenticated_packages_api_enabled).to be_truthy
expect(application_settings.throttle_unauthenticated_packages_api_period_in_seconds).to eq(500)
expect(application_settings.throttle_unauthenticated_packages_api_requests_per_period).to eq(20)
expect(application_settings.throttle_authenticated_packages_api_enabled).to be_truthy
expect(application_settings.throttle_authenticated_packages_api_period_in_seconds).to eq(600)
expect(application_settings.throttle_authenticated_packages_api_requests_per_period).to eq(10)
end
end
context 'when files API rate limits are passed' do
let(:params) do
{
throttle_unauthenticated_files_api_enabled: 1,
throttle_unauthenticated_files_api_period_in_seconds: 500,
throttle_unauthenticated_files_api_requests_per_period: 20,
throttle_authenticated_files_api_enabled: 1,
throttle_authenticated_files_api_period_in_seconds: 600,
throttle_authenticated_files_api_requests_per_period: 10
}
end
it 'updates files API throttle settings' do
subject.execute
application_settings.reload
expect(application_settings.throttle_unauthenticated_files_api_enabled).to be_truthy
expect(application_settings.throttle_unauthenticated_files_api_period_in_seconds).to eq(500)
expect(application_settings.throttle_unauthenticated_files_api_requests_per_period).to eq(20)
expect(application_settings.throttle_authenticated_files_api_enabled).to be_truthy
expect(application_settings.throttle_authenticated_files_api_period_in_seconds).to eq(600)
expect(application_settings.throttle_authenticated_files_api_requests_per_period).to eq(10)
end
end
context 'when deprecated API rate limits are passed' do
let(:params) do
{
throttle_unauthenticated_deprecated_api_enabled: 1,
throttle_unauthenticated_deprecated_api_period_in_seconds: 500,
throttle_unauthenticated_deprecated_api_requests_per_period: 20,
throttle_authenticated_deprecated_api_enabled: 1,
throttle_authenticated_deprecated_api_period_in_seconds: 600,
throttle_authenticated_deprecated_api_requests_per_period: 10
}
end
it 'updates deprecated API throttle settings' do
subject.execute
application_settings.reload
expect(application_settings.throttle_unauthenticated_deprecated_api_enabled).to be_truthy
expect(application_settings.throttle_unauthenticated_deprecated_api_period_in_seconds).to eq(500)
expect(application_settings.throttle_unauthenticated_deprecated_api_requests_per_period).to eq(20)
expect(application_settings.throttle_authenticated_deprecated_api_enabled).to be_truthy
expect(application_settings.throttle_authenticated_deprecated_api_period_in_seconds).to eq(600)
expect(application_settings.throttle_authenticated_deprecated_api_requests_per_period).to eq(10)
end
end
context 'when git lfs rate limits are passed' do
let(:params) do
{
throttle_authenticated_git_lfs_enabled: 1,
throttle_authenticated_git_lfs_period_in_seconds: 600,
throttle_authenticated_git_lfs_requests_per_period: 10
}
end
it 'updates git lfs throttle settings' do
subject.execute
application_settings.reload
expect(application_settings.throttle_authenticated_git_lfs_enabled).to be_truthy
expect(application_settings.throttle_authenticated_git_lfs_period_in_seconds).to eq(600)
expect(application_settings.throttle_authenticated_git_lfs_requests_per_period).to eq(10)
end
end
context 'when issues_create_limit is passed' do
let(:params) do
{
issues_create_limit: 600
}
end
it 'updates issues_create_limit value' do
subject.execute
application_settings.reload
expect(application_settings.issues_create_limit).to eq(600)
end
end
context 'when users_get_by_id_limit and users_get_by_id_limit_allowlist_raw are passed' do
let(:params) do
{
users_get_by_id_limit: 456,
users_get_by_id_limit_allowlist_raw: 'someone, someone_else'
}
end
it 'updates users_get_by_id_limit and users_get_by_id_limit_allowlist value' do
subject.execute
application_settings.reload
expect(application_settings.users_get_by_id_limit).to eq(456)
expect(application_settings.users_get_by_id_limit_allowlist).to eq(%w[someone someone_else])
end
end
context 'when require_admin_approval_after_user_signup changes' do
context 'when it goes from enabled to disabled' do
let(:params) { { require_admin_approval_after_user_signup: false } }
it 'calls ApproveBlockedPendingApprovalUsersWorker' do
expect(ApproveBlockedPendingApprovalUsersWorker).to receive(:perform_async)
subject.execute
end
end
context 'when it goes from disabled to enabled' do
let(:params) { { require_admin_approval_after_user_signup: true } }
it 'does not call ApproveBlockedPendingApprovalUsersWorker' do
application_settings.update!(require_admin_approval_after_user_signup: false)
expect(ApproveBlockedPendingApprovalUsersWorker).not_to receive(:perform_async)
subject.execute
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module BatchedGitRefUpdates
class CleanupSchedulerService
include Gitlab::ExclusiveLeaseHelpers
MAX_PROJECTS = 10_000
BATCH_SIZE = 100
LOCK_TIMEOUT = 10.minutes
def execute
total_projects = 0
in_lock(self.class.name, retries: 0, ttl: LOCK_TIMEOUT) do
Deletion.status_pending.distinct_each_batch(column: :project_id, of: BATCH_SIZE) do |deletions|
ProjectCleanupWorker.bulk_perform_async_with_contexts(
deletions,
arguments_proc: ->(deletion) { deletion.project_id },
context_proc: ->(_) { {} } # No project context because loading the project is wasteful
)
total_projects += deletions.count
break if total_projects >= MAX_PROJECTS
end
end
{ total_projects: total_projects }
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe BatchedGitRefUpdates::CleanupSchedulerService, feature_category: :gitaly do
let(:service) { described_class.new }
describe '#execute' do
before do
BatchedGitRefUpdates::Deletion.create!(project_id: 123, ref: 'ref1')
BatchedGitRefUpdates::Deletion.create!(project_id: 123, ref: 'ref2')
BatchedGitRefUpdates::Deletion.create!(project_id: 456, ref: 'ref3')
BatchedGitRefUpdates::Deletion.create!(project_id: 789, ref: 'ref4', status: :processed)
end
it 'schedules ProjectCleanupWorker for each project in pending BatchedGitRefUpdates::Deletion' do
project_ids = []
expect(BatchedGitRefUpdates::ProjectCleanupWorker)
.to receive(:bulk_perform_async_with_contexts) do |deletions, arguments_proc:, context_proc:| # rubocop:disable Lint/UnusedBlockArgument
project_ids += deletions.map(&arguments_proc)
end
service.execute
expect(project_ids).to contain_exactly(123, 456)
end
it 'returns stats' do
stats = service.execute
expect(stats).to eq({
total_projects: 2
})
end
it 'acquires a lock to avoid running duplicate instances' do
expect(service).to receive(:in_lock) # Mock and don't yield
.with(described_class.name, retries: 0, ttl: described_class::LOCK_TIMEOUT)
expect(BatchedGitRefUpdates::ProjectCleanupWorker).not_to receive(:bulk_perform_async_with_contexts)
service.execute
end
it 'limits to MAX_PROJECTS before it stops' do
stub_const("#{described_class}::BATCH_SIZE", 1)
stub_const("#{described_class}::MAX_PROJECTS", 1)
stats = service.execute
expect(stats).to eq({
total_projects: 1
})
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module BatchedGitRefUpdates
class ProjectCleanupService
include ::Gitlab::ExclusiveLeaseHelpers
LOCK_TIMEOUT = 10.minutes
GITALY_BATCH_SIZE = 100
QUERY_BATCH_SIZE = 1000
MAX_DELETES = 10_000
def initialize(project_id)
@project_id = project_id
end
def execute
total_deletes = 0
in_lock("#{self.class}/#{@project_id}", retries: 0, ttl: LOCK_TIMEOUT) do
project = Project.find_by_id(@project_id)
break unless project
Deletion
.status_pending
.for_project(@project_id)
.select_ref_and_identity
.each_batch(of: QUERY_BATCH_SIZE) do |batch|
refs = batch.map(&:ref)
refs.each_slice(GITALY_BATCH_SIZE) do |refs_to_delete|
project.repository.delete_refs(*refs_to_delete.uniq)
end
total_deletes += refs.count
Deletion.mark_records_processed(batch)
break if total_deletes >= MAX_DELETES
end
end
{ total_deletes: total_deletes }
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe BatchedGitRefUpdates::ProjectCleanupService, feature_category: :gitaly do
let(:service) { described_class.new(project1.id) }
let_it_be(:project1) { create(:project, :repository) }
let_it_be(:project2) { create(:project, :repository) }
let!(:project1_ref1) do
BatchedGitRefUpdates::Deletion.create!(project_id: project1.id, ref: 'refs/test/project1-ref1')
end
let!(:project1_ref2) do
BatchedGitRefUpdates::Deletion.create!(project_id: project1.id, ref: 'refs/test/project1-ref2')
end
let!(:project1_ref3) do
BatchedGitRefUpdates::Deletion.create!(project_id: project1.id, ref: 'refs/test/already-processed',
status: :processed)
end
let!(:project2_ref1) do
BatchedGitRefUpdates::Deletion.create!(project_id: project2.id, ref: 'refs/test/project2-ref1')
end
describe '#execute' do
before do
project1.repository.create_ref('HEAD', 'refs/test/ref-to-not-be-deleted')
project1.repository.create_ref('HEAD', project1_ref1.ref)
project1.repository.create_ref('HEAD', project1_ref2.ref)
project1.repository.create_ref('HEAD', 'refs/test/already-processed')
project2.repository.create_ref('HEAD', project2_ref1.ref)
end
it 'deletes the named refs in batches for the given project only' do
expect(test_refs(project1)).to include(
'refs/test/ref-to-not-be-deleted',
'refs/test/already-processed',
'refs/test/project1-ref1',
'refs/test/project1-ref1',
'refs/test/project1-ref2')
service.execute
expect(test_refs(project1)).to include(
'refs/test/already-processed',
'refs/test/ref-to-not-be-deleted')
expect(test_refs(project1)).not_to include(
'refs/test/project1-ref1',
'refs/test/project1-ref2')
expect(test_refs(project2)).to include('refs/test/project2-ref1')
end
it 'handles duplicates' do
BatchedGitRefUpdates::Deletion.create!(project_id: project1.id, ref: 'refs/test/some-duplicate')
BatchedGitRefUpdates::Deletion.create!(project_id: project1.id, ref: 'refs/test/some-duplicate')
service.execute
expect(test_refs(project1)).not_to include('refs/test/some-duplicate')
end
it 'marks the processed BatchedGitRefUpdates::Deletion as processed' do
service.execute
expect(BatchedGitRefUpdates::Deletion.status_pending.map(&:ref)).to contain_exactly('refs/test/project2-ref1')
expect(BatchedGitRefUpdates::Deletion.status_processed.map(&:ref)).to contain_exactly(
'refs/test/project1-ref1',
'refs/test/project1-ref2',
'refs/test/already-processed')
end
it 'returns stats' do
result = service.execute
expect(result[:total_deletes]).to eq(2)
end
it 'acquires a lock for the given project_id to avoid running duplicate instances' do
expect(service).to receive(:in_lock) # Mock and don't yield
.with("#{described_class}/#{project1.id}", retries: 0, ttl: described_class::LOCK_TIMEOUT)
expect { service.execute }.not_to change { BatchedGitRefUpdates::Deletion.status_pending.count }
end
it 'does nothing when the project does not exist' do
result = described_class.new(non_existing_record_id).execute
expect(result[:total_deletes]).to eq(0)
end
it 'stops after it reaches limit of deleted refs' do
stub_const("#{described_class}::QUERY_BATCH_SIZE", 1)
stub_const("#{described_class}::MAX_DELETES", 1)
result = service.execute
expect(result[:total_deletes]).to eq(1)
end
def test_refs(project)
project.repository.list_refs(['refs/test/']).map(&:name)
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class ProcessSyncEventsService
include ExclusiveLeaseGuard
BATCH_SIZE = 1000
def initialize(sync_event_class, sync_class)
@sync_event_class = sync_event_class
@sync_class = sync_class
@results = {}
end
def execute
# preventing parallel processing over the same event table
try_obtain_lease { process_events }
enqueue_worker_if_there_still_event
@results
end
private
def process_events
add_result(estimated_total_events: @sync_event_class.upper_bound_count)
events = @sync_event_class.preload_synced_relation.first(BATCH_SIZE)
add_result(consumable_events: events.size)
return if events.empty?
processed_events = []
begin
events.each do |event|
@sync_class.sync!(event)
processed_events << event
end
ensure
add_result(processed_events: processed_events.size)
@sync_event_class.id_in(processed_events).delete_all
end
end
def enqueue_worker_if_there_still_event
@sync_event_class.enqueue_worker if @sync_event_class.exists?
end
def lease_key
"#{super}::#{@sync_event_class}"
end
def lease_timeout
1.minute
end
def add_result(result)
@results.merge!(result)
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::ProcessSyncEventsService, feature_category: :continuous_integration do
let!(:group) { create(:group) }
let!(:project1) { create(:project, group: group) }
let!(:project2) { create(:project, group: group) }
let!(:parent_group_1) { create(:group) }
let!(:parent_group_2) { create(:group) }
subject(:service) { described_class.new(sync_event_class, hierarchy_class) }
describe '#perform' do
subject(:execute) { service.execute }
context 'for Projects::SyncEvent' do
let(:sync_event_class) { Projects::SyncEvent }
let(:hierarchy_class) { ::Ci::ProjectMirror }
before do
Projects::SyncEvent.delete_all
project1.update!(group: parent_group_1)
project2.update!(group: parent_group_2)
end
it { is_expected.to eq(service_results(2, 2, 2)) }
it 'consumes events' do
expect { execute }.to change(Projects::SyncEvent, :count).from(2).to(0)
expect(project1.reload.ci_project_mirror).to have_attributes(
namespace_id: parent_group_1.id
)
expect(project2.reload.ci_project_mirror).to have_attributes(
namespace_id: parent_group_2.id
)
end
context 'when any event left after processing' do
before do
stub_const("#{described_class}::BATCH_SIZE", 1)
end
it { is_expected.to eq(service_results(2, 1, 1)) }
it 'enqueues Projects::ProcessSyncEventsWorker' do
expect(Projects::ProcessSyncEventsWorker).to receive(:perform_async)
execute
end
end
context 'when no event left after processing' do
before do
stub_const("#{described_class}::BATCH_SIZE", 2)
end
it { is_expected.to eq(service_results(2, 2, 2)) }
it 'does not enqueue Projects::ProcessSyncEventsWorker' do
expect(Projects::ProcessSyncEventsWorker).not_to receive(:perform_async)
execute
end
end
context 'when there is no event' do
before do
Projects::SyncEvent.delete_all
end
it { is_expected.to eq(service_results(0, 0, nil)) }
it 'does nothing' do
expect { execute }.not_to change(Projects::SyncEvent, :count)
end
end
context 'when there is non-executed events' do
before do
new_project = create(:project)
sync_event_class.delete_all
project1.update!(group: parent_group_2)
new_project.update!(group: parent_group_1)
project2.update!(group: parent_group_1)
@new_project_sync_event = new_project.sync_events.last
allow(sync_event_class).to receive(:preload_synced_relation).and_return(
sync_event_class.where.not(id: @new_project_sync_event)
)
end
it { is_expected.to eq(service_results(3, 2, 2)) }
it 'does not delete non-executed events' do
expect { execute }.to change(Projects::SyncEvent, :count).from(3).to(1)
expect(@new_project_sync_event.reload).to be_persisted
end
end
private
def service_results(total, consumable, processed)
{
estimated_total_events: total,
consumable_events: consumable,
processed_events: processed
}.compact
end
end
context 'for Namespaces::SyncEvent' do
let(:sync_event_class) { Namespaces::SyncEvent }
let(:hierarchy_class) { ::Ci::NamespaceMirror }
before do
Namespaces::SyncEvent.delete_all
# Creates a sync event for group, and the ProjectNamespace of project1 & project2: 3 in total
group.update!(parent: parent_group_2)
# Creates a sync event for parent_group2 and all the children: 4 in total
parent_group_2.update!(parent: parent_group_1)
end
shared_examples 'event consuming' do
it 'consumes events' do
expect { execute }.to change(Namespaces::SyncEvent, :count).from(7).to(0)
expect(group.reload.ci_namespace_mirror).to have_attributes(
traversal_ids: [parent_group_1.id, parent_group_2.id, group.id]
)
expect(parent_group_2.reload.ci_namespace_mirror).to have_attributes(
traversal_ids: [parent_group_1.id, parent_group_2.id]
)
expect(project1.reload.project_namespace).to have_attributes(
traversal_ids: [parent_group_1.id, parent_group_2.id, group.id, project1.project_namespace.id]
)
expect(project2.reload.project_namespace).to have_attributes(
traversal_ids: [parent_group_1.id, parent_group_2.id, group.id, project2.project_namespace.id]
)
end
end
it_behaves_like 'event consuming'
it 'enqueues Namespaces::ProcessSyncEventsWorker if any left' do
stub_const("#{described_class}::BATCH_SIZE", 1)
expect(Namespaces::ProcessSyncEventsWorker).to receive(:perform_async)
execute
end
it 'does not enqueue Namespaces::ProcessSyncEventsWorker if no left' do
stub_const("#{described_class}::BATCH_SIZE", 7)
expect(Namespaces::ProcessSyncEventsWorker).not_to receive(:perform_async)
execute
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class TestFailureHistoryService
class Async
attr_reader :service
def initialize(service)
@service = service
end
def perform_if_needed
TestFailureHistoryWorker.perform_async(service.pipeline.id) if service.should_track_failures?
end
end
MAX_TRACKABLE_FAILURES = 200
attr_reader :pipeline
delegate :project, to: :pipeline
def initialize(pipeline)
@pipeline = pipeline
end
def execute
return unless should_track_failures?
track_failures
end
def should_track_failures?
return false unless project.default_branch_or_main == pipeline.ref
# We fetch for up to MAX_TRACKABLE_FAILURES + 1 builds. So if ever we get
# 201 total number of builds with the assumption that each job has at least
# 1 failed unit test, then we have at least 201 failed unit tests which exceeds
# the MAX_TRACKABLE_FAILURES of 200. If this is the case, let's early exit so we
# don't have to parse each JUnit report of each of the 201 builds.
failed_builds.length <= MAX_TRACKABLE_FAILURES
end
def async
Async.new(self)
end
private
def failed_builds
@failed_builds ||= pipeline.builds_with_failed_tests(limit: MAX_TRACKABLE_FAILURES + 1)
end
def track_failures
failed_unit_tests = gather_failed_unit_tests_from_reports(failed_builds)
return if failed_unit_tests.size > MAX_TRACKABLE_FAILURES
failed_unit_tests.each_slice(100) do |batch|
Ci::UnitTest.transaction do
unit_test_attrs = ci_unit_test_attrs(batch)
ci_unit_tests = Ci::UnitTest.find_or_create_by_batch(project, unit_test_attrs)
failures = ci_unit_test_failure_attrs(ci_unit_tests, failed_unit_tests)
Ci::UnitTestFailure.insert_all(failures)
end
end
end
def gather_failed_unit_tests_from_reports(failed_builds)
failed_builds.each_with_object({}) do |build, failed_unit_tests|
test_suite = generate_test_suite!(build)
test_suite.failed.each do |key, unit_test|
failed_unit_tests[key] = {
build: build, # This will be used in ci_unit_test_failure_attrs
unit_test: unit_test # This will be used in ci_unit_test_attrs
}
end
end
end
def generate_test_suite!(build)
test_report = build.collect_test_reports!(Gitlab::Ci::Reports::TestReport.new)
test_report.get_suite(build.test_suite_name)
end
def ci_unit_test_attrs(batch)
batch.map do |item|
unit_test = item.last[:unit_test]
{
key_hash: unit_test.key,
name: unit_test.name,
suite_name: unit_test.suite_name
}
end
end
def ci_unit_test_failure_attrs(ci_unit_tests, failed_unit_tests)
ci_unit_tests.map do |ci_unit_test|
build = failed_unit_tests[ci_unit_test.key_hash][:build]
{
unit_test_id: ci_unit_test.id,
build_id: build.id,
failed_at: build.finished_at,
partition_id: build.partition_id
}
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::TestFailureHistoryService, :aggregate_failures, feature_category: :continuous_integration do
let_it_be(:project) { create(:project, :repository) }
let_it_be_with_reload(:pipeline) do
create(:ci_pipeline, status: :created, project: project, ref: project.default_branch)
end
describe '#execute' do
subject(:execute_service) { described_class.new(pipeline).execute }
context 'when pipeline has failed builds with test reports' do
let_it_be(:job) do
# The test report has 2 unit test failures
create(:ci_build, :failed, :test_reports, pipeline: pipeline)
end
it 'creates unit test failures records' do
execute_service
expect(Ci::UnitTest.count).to eq(2)
expect(Ci::UnitTestFailure.count).to eq(2)
end
it 'assigns partition_id to Ci::UnitTestFailure' do
execute_service
unit_test_failure_partition_ids = Ci::UnitTestFailure.distinct.pluck(:partition_id)
expect(unit_test_failure_partition_ids).to match_array([job.partition_id])
end
context 'when pipeline is not for the default branch' do
before do
pipeline.update_column(:ref, 'new-feature')
end
it 'does not persist data' do
execute_service
expect(Ci::UnitTest.count).to eq(0)
expect(Ci::UnitTestFailure.count).to eq(0)
end
end
context 'when test failure data have already been persisted with the same exact attributes' do
before do
execute_service
end
it 'does not fail but does not persist new data' do
expect { described_class.new(pipeline).execute }.not_to raise_error
expect(Ci::UnitTest.count).to eq(2)
expect(Ci::UnitTestFailure.count).to eq(2)
end
end
context 'when number of failed unit tests exceed the limit' do
before do
stub_const("#{described_class.name}::MAX_TRACKABLE_FAILURES", 1)
end
it 'does not persist data' do
execute_service
expect(Ci::UnitTest.count).to eq(0)
expect(Ci::UnitTestFailure.count).to eq(0)
end
end
context 'when number of failed unit tests across multiple builds exceed the limit' do
before do
stub_const("#{described_class.name}::MAX_TRACKABLE_FAILURES", 2)
# This other test report has 1 unique unit test failure which brings us to 3 total failures across all builds
# thus exceeding the limit of 2 for MAX_TRACKABLE_FAILURES
create(:ci_build, :failed, :test_reports_with_duplicate_failed_test_names, pipeline: pipeline)
end
it 'does not persist data' do
execute_service
expect(Ci::UnitTest.count).to eq(0)
expect(Ci::UnitTestFailure.count).to eq(0)
end
end
end
context 'when test failure data have duplicates within the same payload (happens when the JUnit report has duplicate unit test names but have different failures)' do
before do
# The test report has 2 unit test failures but with the same unit test keys
create(:ci_build, :failed, :test_reports_with_duplicate_failed_test_names, pipeline: pipeline)
end
it 'does not fail but does not persist duplicate data' do
expect { execute_service }.not_to raise_error
expect(Ci::UnitTest.count).to eq(1)
expect(Ci::UnitTestFailure.count).to eq(1)
end
end
context 'when pipeline has no failed builds with test reports' do
before do
create(:ci_build, :test_reports, pipeline: pipeline)
create(:ci_build, :failed, pipeline: pipeline)
end
it 'does not persist data' do
execute_service
expect(Ci::UnitTest.count).to eq(0)
expect(Ci::UnitTestFailure.count).to eq(0)
end
end
end
describe '#should_track_failures?' do
subject { described_class.new(pipeline).should_track_failures? }
let_it_be(:jobs) do
create_list(:ci_build, 2, :test_reports, :failed, pipeline: pipeline)
end
context 'when feature flag is enabled and pipeline ref is the default branch' do
it { is_expected.to eq(true) }
end
context 'when pipeline is not equal to the project default branch' do
before do
pipeline.update_column(:ref, 'some-other-branch')
end
it { is_expected.to eq(false) }
end
context 'when total number of builds with failed tests exceeds the max number of trackable failures' do
before do
stub_const("#{described_class.name}::MAX_TRACKABLE_FAILURES", 1)
end
it { is_expected.to eq(false) }
end
end
describe '#async' do
let(:pipeline) { double(id: 1) }
let(:service) { described_class.new(pipeline) }
context 'when service should track failures' do
before do
allow(service).to receive(:should_track_failures?).and_return(true)
end
it 'enqueues the worker when #perform_if_needed is called' do
expect(Ci::TestFailureHistoryWorker).to receive(:perform_async).with(pipeline.id)
service.async.perform_if_needed
end
end
context 'when service should not track failures' do
before do
allow(service).to receive(:should_track_failures?).and_return(false)
end
it 'does not enqueue the worker when #perform_if_needed is called' do
expect(Ci::TestFailureHistoryWorker).not_to receive(:perform_async)
service.async.perform_if_needed
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class UnlockPipelineService
include BaseServiceUtility
include ::Gitlab::ExclusiveLeaseHelpers
ExecutionTimeoutError = Class.new(StandardError)
BATCH_SIZE = 100
MAX_EXEC_DURATION = 10.minutes.freeze
LOCK_TIMEOUT = (MAX_EXEC_DURATION + 1.minute).freeze
def initialize(pipeline)
@pipeline = pipeline
@already_leased = false
@already_unlocked = false
@exec_timeout = false
@unlocked_job_artifacts_count = 0
@unlocked_pipeline_artifacts_count = 0
end
def execute
unlock_pipeline_exclusively
success(
skipped_already_leased: already_leased,
skipped_already_unlocked: already_unlocked,
exec_timeout: exec_timeout,
unlocked_job_artifacts: unlocked_job_artifacts_count,
unlocked_pipeline_artifacts: unlocked_pipeline_artifacts_count
)
end
private
attr_reader :pipeline, :already_leased, :already_unlocked, :exec_timeout,
:unlocked_job_artifacts_count, :unlocked_pipeline_artifacts_count
def unlock_pipeline_exclusively
in_lock(lock_key, ttl: LOCK_TIMEOUT, retries: 0) do
# Even though we enforce uniqueness when enqueueing pipelines, there is still a rare race condition chance that
# a pipeline can be re-enqueued right after a worker pops off the same pipeline ID from the queue, and then just
# after it completing the unlock process and releasing the lock, another worker picks up the re-enqueued
# pipeline ID. So let's make sure to only unlock artifacts if the pipeline has not been unlocked.
if pipeline.unlocked?
@already_unlocked = true
break
end
unlock_job_artifacts
unlock_pipeline_artifacts
# Marking the row in `ci_pipelines` to unlocked signifies that all artifacts have
# already been unlocked. This must always happen last.
unlock_pipeline
end
rescue ExecutionTimeoutError
@exec_timeout = true
rescue Gitlab::ExclusiveLeaseHelpers::FailedToObtainLockError
@already_leased = true
ensure
if pipeline.unlocked?
Ci::UnlockPipelineRequest.log_event(:completed, pipeline.id) unless already_unlocked
else
# This is to ensure to re-enqueue the pipeline in 2 occasions:
# 1. When an unexpected error happens.
# 2. When the execution timeout has been reached in the case of a pipeline having a lot of
# job artifacts. This allows us to continue unlocking the rest of the artifacts from
# where we left off. This is why we unlock the pipeline last.
Ci::UnlockPipelineRequest.enqueue(pipeline.id)
Ci::UnlockPipelineRequest.log_event(:re_enqueued, pipeline.id)
end
end
def lock_key
"ci:unlock_pipeline_service:lock:#{pipeline.id}"
end
def unlock_pipeline
pipeline.update_column(:locked, Ci::Pipeline.lockeds[:unlocked])
end
def unlock_job_artifacts
start = Time.current
pipeline.builds.each_batch(of: BATCH_SIZE) do |builds|
# rubocop: disable CodeReuse/ActiveRecord
Ci::JobArtifact.where(job_id: builds.pluck(:id)).each_batch(of: BATCH_SIZE) do |job_artifacts|
unlocked_count = Ci::JobArtifact
.where(id: job_artifacts.pluck(:id))
.update_all(locked: :unlocked)
@unlocked_job_artifacts_count ||= 0
@unlocked_job_artifacts_count += unlocked_count
raise ExecutionTimeoutError if (Time.current - start) > MAX_EXEC_DURATION
end
# rubocop: enable CodeReuse/ActiveRecord
end
end
def unlock_pipeline_artifacts
@unlocked_pipeline_artifacts_count = pipeline.pipeline_artifacts.update_all(locked: :unlocked)
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::UnlockPipelineService, :unlock_pipelines, :clean_gitlab_redis_shared_state, feature_category: :build_artifacts do
describe '#execute', :aggregate_failures do
let(:service) { described_class.new(pipeline) }
let!(:pipeline) do
create(
:ci_pipeline,
:with_coverage_report_artifact,
:with_codequality_mr_diff_report,
:with_persisted_artifacts,
locked: :artifacts_locked
)
end
subject(:execute) { service.execute }
context 'when pipeline is not yet exclusively leased' do
before do
stub_const("#{described_class}::BATCH_SIZE", 1)
end
it 'unlocks the pipeline and all its artifacts' do
expect { execute }
.to change { pipeline.reload.locked }.from('artifacts_locked').to('unlocked')
.and change { pipeline.reload.job_artifacts.all?(&:artifact_unlocked?) }.to(true)
.and change { pipeline.reload.pipeline_artifacts.all?(&:artifact_unlocked?) }.to(true)
expect(execute).to eq(
status: :success,
skipped_already_leased: false,
skipped_already_unlocked: false,
exec_timeout: false,
unlocked_job_artifacts: pipeline.job_artifacts.count,
unlocked_pipeline_artifacts: pipeline.pipeline_artifacts.count
)
end
context 'and pipeline is already unlocked' do
before do
described_class.new(pipeline).execute
end
it 'skips the pipeline' do
expect(Ci::JobArtifact).not_to receive(:where)
expect(execute).to eq(
status: :success,
skipped_already_leased: false,
skipped_already_unlocked: true,
exec_timeout: false,
unlocked_job_artifacts: 0,
unlocked_pipeline_artifacts: 0
)
end
end
context 'and max execution duration was reached' do
let!(:first_artifact) { pipeline.job_artifacts.order(:id).first }
let!(:last_artifact) { pipeline.job_artifacts.order(:id).last }
before do
stub_const("#{described_class}::MAX_EXEC_DURATION", 0.seconds)
end
it 'keeps the unlocked state of job artifacts already processed and re-enqueues the pipeline' do
expect { execute }
.to change { first_artifact.reload.artifact_unlocked? }.to(true)
.and not_change { last_artifact.reload.artifact_unlocked? }
.and not_change { pipeline.reload.locked }
.and not_change { pipeline.reload.pipeline_artifacts.all?(&:artifact_unlocked?) }
.and change { pipeline_ids_waiting_to_be_unlocked }.from([]).to([pipeline.id])
expect(execute).to eq(
status: :success,
skipped_already_leased: false,
skipped_already_unlocked: false,
exec_timeout: true,
unlocked_job_artifacts: 1,
unlocked_pipeline_artifacts: 0
)
end
end
context 'and an error happened' do
context 'and was raised in the middle batches of job artifacts being unlocked' do
let!(:first_artifact) { pipeline.job_artifacts.order(:id).first }
let!(:last_artifact) { pipeline.job_artifacts.order(:id).last }
before do
mock_relation = instance_double('Ci::JobArtifact::ActiveRecord_Relation')
allow(Ci::JobArtifact).to receive(:where).and_call_original
allow(Ci::JobArtifact).to receive(:where).with(id: [last_artifact.id]).and_return(mock_relation)
allow(mock_relation).to receive(:update_all).and_raise('An error')
end
it 'keeps the unlocked state of job artifacts already processed and re-enqueues the pipeline' do
expect { execute }
.to raise_error('An error')
.and change { first_artifact.reload.artifact_unlocked? }.to(true)
.and not_change { last_artifact.reload.artifact_unlocked? }
.and not_change { pipeline.reload.locked }
.and not_change { pipeline.reload.pipeline_artifacts.all?(&:artifact_unlocked?) }
.and change { pipeline_ids_waiting_to_be_unlocked }.from([]).to([pipeline.id])
end
end
context 'and was raised while unlocking pipeline artifacts' do
before do
allow(pipeline).to receive_message_chain(:pipeline_artifacts, :update_all).and_raise('An error')
end
it 'keeps the unlocked state of job artifacts and re-enqueues the pipeline' do
expect { execute }
.to raise_error('An error')
.and change { pipeline.reload.job_artifacts.all?(&:artifact_unlocked?) }.to(true)
.and not_change { Ci::PipelineArtifact.where(pipeline_id: pipeline.id).all?(&:artifact_unlocked?) }
.and not_change { pipeline.reload.locked }.from('artifacts_locked')
.and change { pipeline_ids_waiting_to_be_unlocked }.from([]).to([pipeline.id])
end
end
context 'and was raised while unlocking pipeline' do
before do
allow(pipeline).to receive(:update_column).and_raise('An error')
end
it 'keeps the unlocked state of job artifacts and pipeline artifacts and re-enqueues the pipeline' do
expect { execute }
.to raise_error('An error')
.and change { pipeline.reload.job_artifacts.all?(&:artifact_unlocked?) }.to(true)
.and change { pipeline.reload.pipeline_artifacts.all?(&:artifact_unlocked?) }.to(true)
.and not_change { pipeline.reload.locked }.from('artifacts_locked')
.and change { pipeline_ids_waiting_to_be_unlocked }.from([]).to([pipeline.id])
end
end
end
end
context 'when pipeline is already exclusively leased' do
before do
allow(service).to receive(:in_lock).and_raise(Gitlab::ExclusiveLeaseHelpers::FailedToObtainLockError)
end
it 'does nothing and returns success' do
expect { execute }.not_to change { pipeline.reload.locked }
expect(execute).to include(
status: :success,
skipped_already_leased: true,
unlocked_job_artifacts: 0,
unlocked_pipeline_artifacts: 0
)
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class ExpirePipelineCacheService
class UrlHelpers
include ::Gitlab::Routing
include ::GitlabRoutingHelper
end
def execute(pipeline, delete: false)
store = Gitlab::EtagCaching::Store.new
update_etag_cache(pipeline, store)
if delete
Gitlab::Cache::Ci::ProjectPipelineStatus.new(pipeline.project).delete_from_cache
else
Gitlab::Cache::Ci::ProjectPipelineStatus.update_for_pipeline(pipeline)
end
end
private
def project_pipelines_path(project)
url_helpers.project_pipelines_path(project, format: :json)
end
def project_pipeline_path(project, pipeline)
url_helpers.project_pipeline_path(project, pipeline, format: :json)
end
def commit_pipelines_path(project, commit)
url_helpers.pipelines_project_commit_path(project, commit.id, format: :json)
end
def new_merge_request_pipelines_path(project)
url_helpers.project_new_merge_request_path(project, format: :json)
end
def pipelines_project_merge_request_path(merge_request)
url_helpers.pipelines_project_merge_request_path(merge_request.target_project, merge_request, format: :json)
end
def merge_request_widget_path(merge_request)
url_helpers.cached_widget_project_json_merge_request_path(merge_request.project, merge_request, format: :json)
end
def each_pipelines_merge_request_path(pipeline)
pipeline.all_merge_requests.each do |merge_request|
yield(pipelines_project_merge_request_path(merge_request))
yield(merge_request_widget_path(merge_request))
end
end
def graphql_pipeline_path(pipeline)
url_helpers.graphql_etag_pipeline_path(pipeline)
end
def graphql_pipeline_sha_path(sha)
url_helpers.graphql_etag_pipeline_sha_path(sha)
end
def graphql_project_on_demand_scan_counts_path(project)
url_helpers.graphql_etag_project_on_demand_scan_counts_path(project)
end
# Updates ETag caches of a pipeline.
#
# This logic resides in a separate method so that EE can more easily extend
# it.
#
# @param [Ci::Pipeline] pipeline
# @param [Gitlab::EtagCaching::Store] store
def update_etag_cache(pipeline, store)
project = pipeline.project
etag_paths = [
project_pipelines_path(project),
new_merge_request_pipelines_path(project),
graphql_project_on_demand_scan_counts_path(project)
]
etag_paths << commit_pipelines_path(project, pipeline.commit) unless pipeline.commit.nil?
each_pipelines_merge_request_path(pipeline) do |path|
etag_paths << path
end
pipeline.upstream_and_all_downstreams.includes(project: [:route, { namespace: :route }]).each do |relative_pipeline| # rubocop: disable CodeReuse/ActiveRecord
etag_paths << project_pipeline_path(relative_pipeline.project, relative_pipeline)
etag_paths << graphql_pipeline_path(relative_pipeline)
etag_paths << graphql_pipeline_sha_path(relative_pipeline.sha)
end
store.touch(*etag_paths)
end
def url_helpers
@url_helpers ||= UrlHelpers.new
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::ExpirePipelineCacheService, feature_category: :continuous_integration do
let_it_be(:user) { create(:user) }
let_it_be(:project) { create(:project) }
let_it_be(:pipeline) { create(:ci_pipeline, project: project) }
subject { described_class.new }
describe '#execute' do
it 'invalidates Etag caching for project pipelines path' do
pipelines_path = "/#{project.full_path}/-/pipelines.json"
new_mr_pipelines_path = "/#{project.full_path}/-/merge_requests/new.json"
pipeline_path = "/#{project.full_path}/-/pipelines/#{pipeline.id}.json"
graphql_pipeline_path = "/api/graphql:pipelines/id/#{pipeline.id}"
graphql_pipeline_sha_path = "/api/graphql:pipelines/sha/#{pipeline.sha}"
graphql_project_on_demand_scan_counts_path = "/api/graphql:on_demand_scan/counts/#{project.full_path}"
expect_touched_etag_caching_paths(
pipelines_path,
new_mr_pipelines_path,
pipeline_path,
graphql_pipeline_path,
graphql_pipeline_sha_path,
graphql_project_on_demand_scan_counts_path
)
subject.execute(pipeline)
end
it 'invalidates Etag caching for merge request pipelines if pipeline runs on any commit of that source branch' do
merge_request = create(:merge_request, :with_detached_merge_request_pipeline)
project = merge_request.target_project
merge_request_pipelines_path = "/#{project.full_path}/-/merge_requests/#{merge_request.iid}/pipelines.json"
merge_request_widget_path = "/#{project.full_path}/-/merge_requests/#{merge_request.iid}/cached_widget.json"
expect_touched_etag_caching_paths(
merge_request_pipelines_path,
merge_request_widget_path
)
subject.execute(merge_request.all_pipelines.last)
end
it 'updates the cached status for a project' do
expect(Gitlab::Cache::Ci::ProjectPipelineStatus).to receive(:update_for_pipeline).with(pipeline)
subject.execute(pipeline)
end
context 'destroyed pipeline' do
let(:project_with_repo) { create(:project, :repository) }
let!(:pipeline_with_commit) { create(:ci_pipeline, :success, project: project_with_repo, sha: project_with_repo.commit.id) }
it 'clears the cache', :use_clean_rails_redis_caching do
create(:commit_status, :success, pipeline: pipeline_with_commit, ref: pipeline_with_commit.ref)
# Sanity check
expect(project_with_repo.pipeline_status.has_status?).to be_truthy
subject.execute(pipeline_with_commit, delete: true)
pipeline_with_commit.destroy!
# We need to reset lazy_latest_pipeline cache to simulate a new request
BatchLoader::Executor.clear_current
# Need to use find to avoid memoization
expect(Project.find(project_with_repo.id).pipeline_status.has_status?).to be_falsey
end
end
context 'when the pipeline is triggered by another pipeline' do
let(:source) { create(:ci_sources_pipeline, pipeline: pipeline) }
it 'updates the cache of dependent pipeline' do
dependent_pipeline_path = "/#{source.source_project.full_path}/-/pipelines/#{source.source_pipeline.id}.json"
expect_touched_etag_caching_paths(dependent_pipeline_path)
subject.execute(pipeline)
end
end
context 'when the pipeline triggered another pipeline' do
let(:build) { create(:ci_build, pipeline: pipeline) }
let(:source) { create(:ci_sources_pipeline, source_job: build) }
it 'updates the cache of dependent pipeline' do
dependent_pipeline_path = "/#{source.project.full_path}/-/pipelines/#{source.pipeline.id}.json"
expect_touched_etag_caching_paths(dependent_pipeline_path)
subject.execute(pipeline)
end
end
it 'does not do N+1 queries' do
subject.execute(pipeline)
control = ActiveRecord::QueryRecorder.new { subject.execute(pipeline) }
create(:ci_sources_pipeline, pipeline: pipeline)
create(:ci_sources_pipeline, source_job: create(:ci_build, pipeline: pipeline))
expect { subject.execute(pipeline) }.not_to exceed_query_limit(control.count)
end
end
def expect_touched_etag_caching_paths(*paths)
expect_next_instance_of(Gitlab::EtagCaching::Store) do |store|
expect(store).to receive(:touch).and_wrap_original do |m, *args|
expect(args).to include(*paths)
m.call(*args)
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
# This service tracks failed CI builds using Snowplow.
#
# @param build [Ci::Build] the build that failed.
# @param exit_code [Int] the resulting exit code.
module Ci
class TrackFailedBuildService
SCHEMA_URL = 'iglu:com.gitlab/ci_build_failed/jsonschema/1-0-2'
def initialize(build:, exit_code:, failure_reason:)
@build = build
@exit_code = exit_code
@failure_reason = failure_reason
end
def execute
# rubocop:disable Style/IfUnlessModifier
unless @build.failed?
return ServiceResponse.error(message: 'Attempted to track a non-failed CI build')
end
# rubocop:enable Style/IfUnlessModifier
context = SnowplowTracker::SelfDescribingJson.new(SCHEMA_URL, payload)
::Gitlab::Tracking.event(
'ci::build',
'failed',
context: [context],
user: @build.user,
project: @build.project_id)
ServiceResponse.success
end
private
def payload
{
build_id: @build.id,
build_name: @build.name,
build_artifact_types: @build.job_artifact_types,
exit_code: @exit_code,
failure_reason: @failure_reason,
project: @build.project_id
}
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::TrackFailedBuildService, feature_category: :continuous_integration do
let_it_be(:user) { create(:user) }
let_it_be(:project) { create(:project, :public) }
let_it_be(:pipeline) { create(:ci_pipeline, project: project, user: user) }
let_it_be(:exit_code) { 42 }
let_it_be(:failure_reason) { "script_failure" }
describe '#execute' do
context 'when a build has failed' do
let_it_be(:build) { create(:ci_build, :failed, :sast_report, pipeline: pipeline, user: user) }
subject { described_class.new(build: build, exit_code: exit_code, failure_reason: failure_reason) }
it 'tracks the build failed event', :snowplow do
response = subject.execute
expect(response.success?).to be true
context = {
schema: described_class::SCHEMA_URL,
data: {
build_id: build.id,
build_name: build.name,
build_artifact_types: ["sast"],
exit_code: exit_code,
failure_reason: failure_reason,
project: project.id
}
}
expect_snowplow_event(
category: 'ci::build',
action: 'failed',
context: [context],
user: user,
project: project.id)
end
end
context 'when a build has not failed' do
let_it_be(:build) { create(:ci_build, :success, :sast_report, pipeline: pipeline, user: user) }
subject { described_class.new(build: build, exit_code: nil, failure_reason: nil) }
it 'does not track the build failed event', :snowplow do
response = subject.execute
expect(response.error?).to be true
expect_no_snowplow_event
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class ChangeVariablesService < BaseContainerService
def execute
container.update(params)
end
end
end
::Ci::ChangeVariablesService.prepend_mod_with('Ci::ChangeVariablesService')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::ChangeVariablesService, feature_category: :secrets_management do
let(:service) { described_class.new(container: group, current_user: user, params: params) }
let_it_be(:user) { create(:user) }
let(:group) { spy(:group, variables: []) }
let(:params) { { variables_attributes: [{ key: 'new_variable', value: 'variable_value' }] } }
describe '#execute' do
subject(:execute) { service.execute }
it 'delegates to ActiveRecord update' do
execute
expect(group).to have_received(:update).with(params)
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class RetryJobService < ::BaseService
include Gitlab::Utils::StrongMemoize
def execute(job, variables: [])
if job.retryable?
job.ensure_scheduling_type!
new_job = retry_job(job, variables: variables)
ServiceResponse.success(payload: { job: new_job })
else
ServiceResponse.error(
message: 'Job cannot be retried',
payload: { job: job, reason: :not_retryable }
)
end
end
# rubocop: disable CodeReuse/ActiveRecord
def clone!(job, variables: [], enqueue_if_actionable: false, start_pipeline: false)
# Cloning a job requires a strict type check to ensure
# the attributes being used for the clone are taken straight
# from the model and not overridden by other abstractions.
raise TypeError unless job.instance_of?(Ci::Build) || job.instance_of?(Ci::Bridge)
check_access!(job)
new_job = job.clone(current_user: current_user, new_job_variables_attributes: variables)
if enqueue_if_actionable && new_job.action?
new_job.set_enqueue_immediately!
end
start_pipeline_proc = -> { start_pipeline(job, new_job) } if start_pipeline
new_job.run_after_commit do
start_pipeline_proc&.call
::Ci::CopyCrossDatabaseAssociationsService.new.execute(job, new_job)
::MergeRequests::AddTodoWhenBuildFailsService
.new(project: project)
.close(new_job)
end
::Ci::Pipelines::AddJobService.new(job.pipeline).execute!(new_job) do |processable|
BulkInsertableAssociations.with_bulk_insert do
processable.save!
end
end
job.reset # refresh the data to get new values of `retried` and `processed`.
new_job
end
# rubocop: enable CodeReuse/ActiveRecord
private
def check_assignable_runners!(job); end
def retry_job(job, variables: [])
clone!(job, variables: variables, enqueue_if_actionable: true, start_pipeline: true).tap do |new_job|
check_assignable_runners!(new_job) if new_job.is_a?(Ci::Build)
next if new_job.failed?
ResetSkippedJobsService.new(project, current_user).execute(job)
end
end
def check_access!(job)
unless can?(current_user, :update_build, job)
raise Gitlab::Access::AccessDeniedError, '403 Forbidden'
end
end
def start_pipeline(job, new_job)
Ci::PipelineCreation::StartPipelineService.new(job.pipeline).execute
new_job.reset
end
end
end
Ci::RetryJobService.prepend_mod_with('Ci::RetryJobService')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::RetryJobService, feature_category: :continuous_integration do
using RSpec::Parameterized::TableSyntax
let_it_be(:reporter) { create(:user) }
let_it_be(:developer) { create(:user) }
let_it_be(:project) { create(:project, :repository) }
let_it_be(:pipeline) do
create(:ci_pipeline, project: project, sha: 'b83d6e391c22777fca1ed3012fce84f633d7fed0')
end
let_it_be(:stage) do
create(:ci_stage, pipeline: pipeline, name: 'test')
end
let_it_be(:deploy_stage) { create(:ci_stage, pipeline: pipeline, name: 'deploy', position: stage.position + 1) }
let(:job_variables_attributes) { [{ key: 'MANUAL_VAR', value: 'manual test var' }] }
let(:user) { developer }
let(:service) { described_class.new(project, user) }
before_all do
project.add_developer(developer)
project.add_reporter(reporter)
end
shared_context 'retryable bridge' do
let_it_be(:downstream_project) { create(:project, :repository) }
let_it_be_with_refind(:job) do
create(:ci_bridge, :success,
pipeline: pipeline, downstream: downstream_project, description: 'a trigger job', ci_stage: stage
)
end
let_it_be(:job_to_clone) { job }
before do
job.update!(retried: false)
end
end
shared_context 'retryable build' do
let_it_be_with_reload(:job) do
create(:ci_build, :success, pipeline: pipeline, ci_stage: stage)
end
let_it_be(:another_pipeline) { create(:ci_empty_pipeline, project: project) }
let_it_be(:job_to_clone) do
create(
:ci_build, :failed, :picked, :expired, :erased, :queued, :coverage, :tags,
:allowed_to_fail, :on_tag, :triggered, :teardown_environment, :resource_group,
description: 'my-job', ci_stage: stage,
pipeline: pipeline, auto_canceled_by: another_pipeline,
scheduled_at: 10.seconds.since
)
end
before do
job.update!(retried: false, status: :success)
job_to_clone.update!(retried: false, status: :success)
end
end
shared_examples_for 'clones the job' do
let(:job) { job_to_clone }
before_all do
job_to_clone.update!(ci_stage: stage)
create(:ci_build_need, build: job_to_clone)
end
context 'when the user has ability to execute job' do
before do
stub_not_protect_default_branch
end
context 'when there is a failed job ToDo for the MR' do
let!(:merge_request) { create(:merge_request, source_project: project, author: user, head_pipeline: pipeline) }
let!(:todo) { create(:todo, :build_failed, user: user, project: project, author: user, target: merge_request) }
it 'resolves the ToDo for the failed job' do
expect do
service.execute(job)
end.to change { todo.reload.state }.from('pending').to('done')
end
end
context 'when the job has needs' do
before do
create_list(:ci_build_need, 2, build: job)
end
it 'bulk inserts all the needs' do
expect(Ci::BuildNeed).to receive(:bulk_insert!).and_call_original
new_job
end
end
it 'marks the old job as retried' do
expect(new_job).to be_latest
expect(job).to be_retried
expect(job).to be_processed
end
end
context 'when the user does not have permission to execute the job' do
let(:user) { reporter }
it 'raises an error' do
expect { service.execute(job) }
.to raise_error Gitlab::Access::AccessDeniedError
end
end
end
shared_examples_for 'does not retry the job' do
it 'returns :not_retryable and :unprocessable_entity' do
expect(subject.message).to be('Job cannot be retried')
expect(subject.payload[:reason]).to eq(:not_retryable)
expect(subject.payload[:job]).to eq(job)
end
end
shared_examples_for 'retries the job' do
it_behaves_like 'clones the job'
it 'enqueues the new job' do
expect(new_job).to be_pending
end
context 'when there are subsequent processables that are skipped' do
let!(:subsequent_build) do
create(:ci_build, :skipped, pipeline: pipeline, ci_stage: deploy_stage)
end
let!(:subsequent_bridge) do
create(:ci_bridge, :skipped, pipeline: pipeline, ci_stage: deploy_stage)
end
it 'resumes pipeline processing in the subsequent stage' do
service.execute(job)
expect(subsequent_build.reload).to be_created
expect(subsequent_bridge.reload).to be_created
end
it 'updates ownership for subsequent builds' do
expect { service.execute(job) }.to change { subsequent_build.reload.user }.to(user)
end
it 'updates ownership for subsequent bridges' do
expect { service.execute(job) }.to change { subsequent_bridge.reload.user }.to(user)
end
end
context 'when the pipeline has other jobs' do
let!(:other_test_build) { create(:ci_build, pipeline: pipeline, ci_stage: stage) }
let!(:deploy) { create(:ci_build, pipeline: pipeline, ci_stage: deploy_stage) }
let!(:deploy_needs_build2) { create(:ci_build_need, build: deploy, name: other_test_build.name) }
context 'when job has a nil scheduling_type' do
before do
job.pipeline.processables.update_all(scheduling_type: nil)
job.reload
end
it 'populates scheduling_type of processables' do
expect(new_job.scheduling_type).to eq('stage')
expect(job.reload.scheduling_type).to eq('stage')
expect(other_test_build.reload.scheduling_type).to eq('stage')
expect(deploy.reload.scheduling_type).to eq('dag')
end
end
context 'when job has scheduling_type' do
it 'does not call populate_scheduling_type!' do
expect(job.pipeline).not_to receive(:ensure_scheduling_type!)
expect(new_job.scheduling_type).to eq('stage')
end
end
end
context 'when the pipeline is a child pipeline and the bridge uses strategy:depend' do
let!(:parent_pipeline) { create(:ci_pipeline, project: project) }
let!(:bridge) { create(:ci_bridge, :strategy_depend, pipeline: parent_pipeline, status: 'success') }
let!(:source_pipeline) { create(:ci_sources_pipeline, pipeline: pipeline, source_job: bridge) }
it 'marks the source bridge as pending' do
service.execute(job)
expect(bridge.reload).to be_pending
end
end
end
shared_examples_for 'checks enqueue_immediately?' do
it "returns enqueue_immediately" do
subject
expect(new_job.enqueue_immediately?).to eq enqueue_immediately
end
end
shared_examples_for 'creates associations for a deployable job' do |factory_type|
context 'when a job with a deployment is retried' do
let!(:job) do
create(factory_type, :with_deployment, :deploy_to_production, pipeline: pipeline, ci_stage: stage)
end
it 'creates a new deployment' do
expect { new_job }.to change { Deployment.count }.by(1)
end
it 'does not create a new environment' do
expect { new_job }.not_to change { Environment.count }
end
end
context 'when a job with a dynamic environment is retried' do
let_it_be(:other_developer) { create(:user).tap { |u| project.add_developer(u) } }
let(:environment_name) { 'review/$CI_COMMIT_REF_SLUG-$GITLAB_USER_ID' }
let!(:job) do
create(factory_type, :with_deployment,
environment: environment_name,
options: { environment: { name: environment_name } },
pipeline: pipeline,
ci_stage: stage,
user: other_developer)
end
it 'creates a new deployment' do
expect { new_job }.to change { Deployment.count }.by(1)
end
it 'does not create a new environment' do
expect { new_job }.not_to change { Environment.count }
end
end
end
describe '#clone!' do
let(:start_pipeline_on_clone) { false }
let(:new_job) { service.clone!(job, start_pipeline: start_pipeline_on_clone) }
it 'raises an error when an unexpected class is passed' do
expect { service.clone!(create(:ci_build).present) }.to raise_error(TypeError)
end
context 'when the job to be cloned is a bridge' do
include_context 'retryable bridge'
it_behaves_like 'clones the job'
it 'does not create a new deployment' do
expect { new_job }.not_to change { Deployment.count }
end
context 'when the pipeline is started automatically' do
let(:start_pipeline_on_clone) { true }
it_behaves_like 'creates associations for a deployable job', :ci_bridge
end
context 'when given variables' do
let(:new_job) { service.clone!(job, variables: job_variables_attributes) }
it 'does not give variables to the new bridge' do
expect { new_job }.not_to raise_error
end
end
end
context 'when the job to be cloned is a build' do
include_context 'retryable build'
it_behaves_like 'clones the job'
it 'does not create a new deployment' do
expect { new_job }.not_to change { Deployment.count }
end
context 'when the pipeline is started automatically' do
let(:start_pipeline_on_clone) { true }
it_behaves_like 'creates associations for a deployable job', :ci_build
end
context 'when given variables' do
let(:new_job) { service.clone!(job, variables: job_variables_attributes) }
context 'when the build is actionable' do
let_it_be_with_refind(:job) { create(:ci_build, :actionable, pipeline: pipeline) }
it 'gives variables to the new build' do
expect(new_job.job_variables.count).to be(1)
expect(new_job.job_variables.first.key).to eq('MANUAL_VAR')
expect(new_job.job_variables.first.value).to eq('manual test var')
end
end
context 'when the build is not actionable' do
let_it_be_with_refind(:job) { create(:ci_build, pipeline: pipeline) }
it 'does not give variables to the new build' do
expect(new_job.job_variables.count).to be_zero
end
end
end
end
context 'when enqueue_if_actionable is provided' do
let!(:job) do
create(:ci_build, *[trait].compact, :failed, pipeline: pipeline, ci_stage: stage)
end
let(:new_job) { subject }
subject { service.clone!(job, enqueue_if_actionable: enqueue_if_actionable) }
where(:enqueue_if_actionable, :trait, :enqueue_immediately) do
true | nil | false
true | :manual | true
true | :expired_scheduled | true
false | nil | false
false | :manual | false
false | :expired_scheduled | false
end
with_them do
it_behaves_like 'checks enqueue_immediately?'
end
end
end
describe '#execute' do
let(:new_job) { subject[:job] }
subject { service.execute(job) }
context 'when the job to be retried is a bridge' do
context 'and it is not retryable' do
let_it_be(:job) { create(:ci_bridge, :failed, :reached_max_descendant_pipelines_depth) }
it_behaves_like 'does not retry the job'
end
include_context 'retryable bridge'
it_behaves_like 'retries the job'
context 'when given variables' do
let(:new_job) { service.clone!(job, variables: job_variables_attributes) }
it 'does not give variables to the new bridge' do
expect { new_job }.not_to raise_error
end
end
end
context 'when the job to be retried is a build' do
context 'and it is not retryable' do
let_it_be(:job) { create(:ci_build, :deployment_rejected, pipeline: pipeline) }
it_behaves_like 'does not retry the job'
end
include_context 'retryable build'
it_behaves_like 'retries the job'
context 'automatic retryable build' do
let!(:auto_retryable_build) do
create(:ci_build, pipeline: pipeline, ci_stage: stage, user: user, options: { retry: 1 })
end
def drop_build!
auto_retryable_build.drop_with_exit_code!('test failure', 1)
end
it 'creates a new build and enqueues BuildQueueWorker' do
expect { drop_build! }.to change { Ci::Build.count }.by(1)
.and change { BuildQueueWorker.jobs.count }.by(1)
end
end
context 'when there are subsequent jobs that are skipped' do
let!(:subsequent_build) do
create(:ci_build, :skipped, pipeline: pipeline, ci_stage: deploy_stage)
end
let!(:subsequent_bridge) do
create(:ci_bridge, :skipped, pipeline: pipeline, ci_stage: deploy_stage)
end
it 'does not cause an N+1 when updating the job ownership' do
control_count = ActiveRecord::QueryRecorder.new(skip_cached: false) { service.execute(job) }.count
create_list(:ci_build, 2, :skipped, pipeline: pipeline, ci_stage: deploy_stage)
expect { service.execute(job) }.not_to exceed_all_query_limit(control_count)
end
end
context 'when given variables' do
let(:new_job) { service.clone!(job, variables: job_variables_attributes) }
context 'when the build is actionable' do
let_it_be_with_refind(:job) { create(:ci_build, :actionable, pipeline: pipeline) }
it 'gives variables to the new build' do
expect(new_job.job_variables.count).to be(1)
expect(new_job.job_variables.first.key).to eq('MANUAL_VAR')
expect(new_job.job_variables.first.value).to eq('manual test var')
end
end
context 'when the build is not actionable' do
let_it_be_with_refind(:job) { create(:ci_build, pipeline: pipeline) }
it 'does not give variables to the new build' do
expect(new_job.job_variables.count).to be_zero
end
end
end
end
context 'when job being retried has jobs in previous stages' do
let!(:job) do
create(
:ci_build,
:failed,
name: 'deploy_a',
pipeline: pipeline,
ci_stage: deploy_stage
)
end
before do
create(
:ci_build,
previous_stage_job_status,
name: 'test_a',
pipeline: pipeline,
ci_stage: stage
)
end
where(:previous_stage_job_status, :after_status) do
:created | 'created'
:pending | 'created'
:running | 'created'
:manual | 'created'
:scheduled | 'created'
:success | 'pending'
:failed | 'skipped'
:skipped | 'pending'
end
with_them do
it 'updates the new job status to after_status' do
expect(subject).to be_success
expect(new_job.status).to eq after_status
end
end
end
context 'when job being retried has DAG dependencies' do
let!(:job) do
create(
:ci_build,
:failed,
:dependent,
name: 'deploy_a',
pipeline: pipeline,
ci_stage: deploy_stage,
needed: dependency
)
end
let(:dependency) do
create(
:ci_build,
dag_dependency_status,
name: 'test_a',
pipeline: pipeline,
ci_stage: stage
)
end
where(:dag_dependency_status, :after_status) do
:created | 'created'
:pending | 'created'
:running | 'created'
:manual | 'created'
:scheduled | 'created'
:success | 'pending'
:failed | 'skipped'
:skipped | 'skipped'
end
with_them do
it 'updates the new job status to after_status' do
expect(subject).to be_success
expect(new_job.status).to eq after_status
end
end
end
context 'when there are other manual/scheduled jobs' do
let_it_be(:test_manual_build) do
create(:ci_build, :manual, pipeline: pipeline, ci_stage: stage)
end
let_it_be(:subsequent_manual_build) do
create(:ci_build, :manual, pipeline: pipeline, ci_stage: deploy_stage)
end
let_it_be(:test_scheduled_build) do
create(:ci_build, :scheduled, pipeline: pipeline, ci_stage: stage)
end
let_it_be(:subsequent_scheduled_build) do
create(:ci_build, :scheduled, pipeline: pipeline, ci_stage: deploy_stage)
end
let!(:job) do
create(:ci_build, *[trait].compact, :failed, pipeline: pipeline, ci_stage: stage)
end
where(:trait, :enqueue_immediately) do
nil | false
:manual | true
:expired_scheduled | true
end
with_them do
it 'retries the given job but not the other manual/scheduled jobs' do
expect { subject }
.to change { Ci::Build.count }.by(1)
.and not_change { test_manual_build.reload.status }
.and not_change { subsequent_manual_build.reload.status }
.and not_change { test_scheduled_build.reload.status }
.and not_change { subsequent_scheduled_build.reload.status }
expect(new_job).to be_pending
end
it_behaves_like 'checks enqueue_immediately?'
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class DeleteUnitTestsService
include EachBatch
BATCH_SIZE = 100
def execute
purge_data!(Ci::UnitTestFailure)
purge_data!(Ci::UnitTest)
end
private
def purge_data!(klass)
loop do
break unless delete_batch!(klass)
end
end
# rubocop: disable CodeReuse/ActiveRecord
def delete_batch!(klass)
deleted = 0
klass.transaction do
ids = klass.deletable.lock('FOR UPDATE SKIP LOCKED').limit(BATCH_SIZE).pluck(:id)
deleted = klass.where(id: ids).delete_all if ids.any?
end
deleted > 0
end
# rubocop: enable CodeReuse/ActiveRecord
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::DeleteUnitTestsService, feature_category: :continuous_integration do
describe '#execute' do
let!(:unit_test_1) { create(:ci_unit_test) }
let!(:unit_test_2) { create(:ci_unit_test) }
let!(:unit_test_3) { create(:ci_unit_test) }
let!(:unit_test_4) { create(:ci_unit_test) }
let!(:unit_test_1_recent_failure) { create(:ci_unit_test_failure, unit_test: unit_test_1) }
let!(:unit_test_1_old_failure) { create(:ci_unit_test_failure, unit_test: unit_test_1, failed_at: 15.days.ago) }
let!(:unit_test_2_old_failure) { create(:ci_unit_test_failure, unit_test: unit_test_2, failed_at: 15.days.ago) }
let!(:unit_test_3_old_failure) { create(:ci_unit_test_failure, unit_test: unit_test_3, failed_at: 15.days.ago) }
let!(:unit_test_4_old_failure) { create(:ci_unit_test_failure, unit_test: unit_test_4, failed_at: 15.days.ago) }
before do
stub_const("#{described_class.name}::BATCH_SIZE", 2)
described_class.new.execute
end
it 'does not delete unit test failures not older than 14 days' do
expect(unit_test_1_recent_failure.reload).to be_persisted
end
it 'deletes unit test failures older than 14 days' do
ids = [
unit_test_1_old_failure,
unit_test_2_old_failure,
unit_test_3_old_failure,
unit_test_4_old_failure
].map(&:id)
result = Ci::UnitTestFailure.where(id: ids)
expect(result).to be_empty
end
it 'deletes unit tests that have no more associated unit test failures' do
ids = [
unit_test_2,
unit_test_3,
unit_test_4
].map(&:id)
result = Ci::UnitTest.where(id: ids)
expect(result).to be_empty
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
# This class is a simplified version of assign_nested_attributes_for_collection_association from ActiveRecord
# https://github.com/rails/rails/blob/v6.0.2.1/activerecord/lib/active_record/nested_attributes.rb#L466
module Ci
class UpdateInstanceVariablesService
UNASSIGNABLE_KEYS = %w[id _destroy].freeze
def initialize(params)
@params = params[:variables_attributes]
end
def execute
instantiate_records
persist_records
end
def errors
@records.to_a.flat_map { |r| r.errors.full_messages }
end
private
attr_reader :params
def existing_records_by_id
@existing_records_by_id ||= Ci::InstanceVariable
.all
.index_by { |var| var.id.to_s }
end
def instantiate_records
@records = params.map do |attributes|
find_or_initialize_record(attributes).tap do |record|
record.assign_attributes(attributes.except(*UNASSIGNABLE_KEYS))
record.mark_for_destruction if has_destroy_flag?(attributes)
end
end
end
def find_or_initialize_record(attributes)
id = attributes[:id].to_s
if id.blank?
Ci::InstanceVariable.new
else
existing_records_by_id.fetch(id) { raise ActiveRecord::RecordNotFound }
end
end
def persist_records
Ci::InstanceVariable.transaction do
success = @records.map do |record|
if record.marked_for_destruction?
record.destroy
else
record.save
end
end.all?
raise ActiveRecord::Rollback unless success
success
end
end
def has_destroy_flag?(hash)
Gitlab::Utils.to_boolean(hash['_destroy'])
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::UpdateInstanceVariablesService, feature_category: :secrets_management do
let(:params) { { variables_attributes: variables_attributes } }
subject { described_class.new(params) }
describe '#execute' do
context 'without variables' do
let(:variables_attributes) { [] }
it { expect(subject.execute).to be_truthy }
end
context 'with insert only variables' do
let(:variables_attributes) do
[
{ key: 'var_a', secret_value: 'dummy_value_for_a', protected: true },
{ key: 'var_b', secret_value: 'dummy_value_for_b', protected: false }
]
end
it { expect(subject.execute).to be_truthy }
it 'persists all the records' do
expect { subject.execute }
.to change { Ci::InstanceVariable.count }
.by variables_attributes.size
end
it 'persists attributes' do
subject.execute
expect(Ci::InstanceVariable.all).to contain_exactly(
have_attributes(key: 'var_a', secret_value: 'dummy_value_for_a', protected: true),
have_attributes(key: 'var_b', secret_value: 'dummy_value_for_b', protected: false)
)
end
end
context 'with update only variables' do
let!(:var_a) { create(:ci_instance_variable) }
let!(:var_b) { create(:ci_instance_variable, protected: false) }
let(:variables_attributes) do
[
{
id: var_a.id,
key: var_a.key,
secret_value: 'new_dummy_value_for_a',
protected: var_a.protected?.to_s
},
{
id: var_b.id,
key: 'var_b_key',
secret_value: 'new_dummy_value_for_b',
protected: 'true'
}
]
end
it { expect(subject.execute).to be_truthy }
it 'does not change the count' do
expect { subject.execute }
.not_to change { Ci::InstanceVariable.count }
end
it 'updates the records in place', :aggregate_failures do
subject.execute
expect(var_a.reload).to have_attributes(secret_value: 'new_dummy_value_for_a')
expect(var_b.reload).to have_attributes(
key: 'var_b_key', secret_value: 'new_dummy_value_for_b', protected: true)
end
end
context 'with insert and update variables' do
let!(:var_a) { create(:ci_instance_variable) }
let(:variables_attributes) do
[
{
id: var_a.id,
key: var_a.key,
secret_value: 'new_dummy_value_for_a',
protected: var_a.protected?.to_s
},
{
key: 'var_b',
secret_value: 'dummy_value_for_b',
protected: true
}
]
end
it { expect(subject.execute).to be_truthy }
it 'inserts only one record' do
expect { subject.execute }
.to change { Ci::InstanceVariable.count }.by 1
end
it 'persists all the records', :aggregate_failures do
subject.execute
var_b = Ci::InstanceVariable.find_by(key: 'var_b')
expect(var_a.reload.secret_value).to eq('new_dummy_value_for_a')
expect(var_b.secret_value).to eq('dummy_value_for_b')
end
end
context 'with insert, update, and destroy variables' do
let!(:var_a) { create(:ci_instance_variable) }
let!(:var_b) { create(:ci_instance_variable) }
let(:variables_attributes) do
[
{
id: var_a.id,
key: var_a.key,
secret_value: 'new_dummy_value_for_a',
protected: var_a.protected?.to_s
},
{
id: var_b.id,
key: var_b.key,
secret_value: 'dummy_value_for_b',
protected: var_b.protected?.to_s,
'_destroy' => 'true'
},
{
key: 'var_c',
secret_value: 'dummy_value_for_c',
protected: true
}
]
end
it { expect(subject.execute).to be_truthy }
it 'persists all the records', :aggregate_failures do
subject.execute
var_c = Ci::InstanceVariable.find_by(key: 'var_c')
expect(var_a.reload.secret_value).to eq('new_dummy_value_for_a')
expect { var_b.reload }.to raise_error(ActiveRecord::RecordNotFound)
expect(var_c.secret_value).to eq('dummy_value_for_c')
end
end
context 'with invalid variables' do
let!(:var_a) { create(:ci_instance_variable, secret_value: 'dummy_value_for_a') }
let(:variables_attributes) do
[
{
key: '...?',
secret_value: 'nice_value'
},
{
id: var_a.id,
key: var_a.key,
secret_value: 'new_dummy_value_for_a',
protected: var_a.protected?.to_s
},
{
key: var_a.key,
secret_value: 'other_value'
}
]
end
it { expect(subject.execute).to be_falsey }
it 'does not insert any records' do
expect { subject.execute }
.not_to change { Ci::InstanceVariable.count }
end
it 'does not update existing records' do
subject.execute
expect(var_a.reload.secret_value).to eq('dummy_value_for_a')
end
it 'returns errors' do
subject.execute
expect(subject.errors).to match_array(
[
"Key (#{var_a.key}) has already been taken",
"Key can contain only letters, digits and '_'."
])
end
end
context 'when deleting non existing variables' do
let(:variables_attributes) do
[
{
id: 'some-id',
key: 'some_key',
secret_value: 'other_value',
'_destroy' => 'true'
}
]
end
it { expect { subject.execute }.to raise_error(ActiveRecord::RecordNotFound) }
end
context 'when updating non existing variables' do
let(:variables_attributes) do
[
{
id: 'some-id',
key: 'some_key',
secret_value: 'other_value'
}
]
end
it { expect { subject.execute }.to raise_error(ActiveRecord::RecordNotFound) }
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
# TODO: a couple of points with this approach:
# + reuses existing architecture and reactive caching
# - it's not a report comparison and some comparing features must be turned off.
# see CompareReportsBaseService for more notes.
# issue: https://gitlab.com/gitlab-org/gitlab/issues/34224
class GenerateCoverageReportsService < CompareReportsBaseService
def execute(base_pipeline, head_pipeline)
merge_request = MergeRequest.find_by_id(params[:id])
{
status: :parsed,
key: key(base_pipeline, head_pipeline),
data: head_pipeline.pipeline_artifacts.find_by_file_type(:code_coverage).present.for_files(merge_request.new_paths)
}
rescue StandardError => e
Gitlab::ErrorTracking.track_exception(
e,
project_id: project.id,
base_pipeline_id: base_pipeline&.id,
head_pipeline_id: head_pipeline&.id
)
{
status: :error,
key: key(base_pipeline, head_pipeline),
status_reason: _('An error occurred while fetching coverage reports.')
}
end
def latest?(base_pipeline, head_pipeline, data)
data&.fetch(:key, nil) == key(base_pipeline, head_pipeline)
end
private
def key(base_pipeline, head_pipeline)
[
base_pipeline&.id, last_update_timestamp(base_pipeline),
head_pipeline&.id, last_update_timestamp(head_pipeline)
]
end
def last_update_timestamp(pipeline_hierarchy)
pipeline_hierarchy&.self_and_project_descendants&.maximum(:updated_at)
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::GenerateCoverageReportsService, feature_category: :code_testing do
let_it_be(:project) { create(:project, :repository) }
let(:service) { described_class.new(project) }
describe '#execute' do
subject { service.execute(base_pipeline, head_pipeline) }
context 'when head pipeline has coverage reports' do
let!(:merge_request) { create(:merge_request, :with_coverage_reports, source_project: project) }
let!(:service) { described_class.new(project, nil, id: merge_request.id) }
let!(:head_pipeline) { merge_request.head_pipeline }
let!(:base_pipeline) { nil }
it 'returns status and data', :aggregate_failures do
expect_any_instance_of(Ci::PipelineArtifact) do |instance|
expect(instance).to receive(:present)
expect(instance).to receive(:for_files).with(merge_request.new_paths).and_call_original
end
expect(subject[:status]).to eq(:parsed)
expect(subject[:data]).to eq(files: {})
end
end
context 'when head pipeline does not have a coverage report artifact' do
let!(:merge_request) { create(:merge_request, :with_coverage_reports, source_project: project) }
let!(:service) { described_class.new(project, nil, id: merge_request.id) }
let!(:head_pipeline) { merge_request.head_pipeline }
let!(:base_pipeline) { nil }
before do
head_pipeline.pipeline_artifacts.destroy_all # rubocop: disable Cop/DestroyAll
end
it 'returns status and error message' do
expect(subject[:status]).to eq(:error)
expect(subject[:status_reason]).to include('An error occurred while fetching coverage reports.')
end
end
context 'when head pipeline has coverage reports and no merge request associated' do
let!(:head_pipeline) { create(:ci_pipeline, :with_coverage_reports, project: project) }
let!(:base_pipeline) { nil }
it 'returns status and error message' do
expect(subject[:status]).to eq(:error)
expect(subject[:status_reason]).to include('An error occurred while fetching coverage reports.')
end
end
end
describe '#latest?' do
subject { service.latest?(base_pipeline, head_pipeline, data) }
let!(:base_pipeline) { nil }
let!(:head_pipeline) { create(:ci_pipeline, :with_coverage_reports, project: project) }
let!(:child_pipeline) { create(:ci_pipeline, child_of: head_pipeline) }
let!(:key) { service.send(:key, base_pipeline, head_pipeline) }
let(:data) { { key: key } }
context 'when cache key is latest' do
it { is_expected.to be_truthy }
end
context 'when head pipeline has been updated' do
before do
head_pipeline.update_column(:updated_at, 1.minute.from_now)
end
it { is_expected.to be_falsy }
end
context 'when cache key is empty' do
let(:data) { { key: nil } }
it { is_expected.to be_falsy }
end
context 'when the pipeline has a child that is updated' do
before do
child_pipeline.update_column(:updated_at, 1.minute.from_now)
end
it { is_expected.to be_falsy }
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class ProcessPipelineService
attr_reader :pipeline
def initialize(pipeline)
@pipeline = pipeline
end
def execute
increment_processing_counter
Ci::PipelineProcessing::AtomicProcessingService
.new(pipeline)
.execute
end
def metrics
@metrics ||= ::Gitlab::Ci::Pipeline::Metrics
end
private
def increment_processing_counter
metrics.pipeline_processing_events_counter.increment
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::ProcessPipelineService, feature_category: :continuous_integration do
let_it_be(:project) { create(:project) }
let(:pipeline) do
create(:ci_empty_pipeline, ref: 'master', project: project)
end
let(:pipeline_processing_events_counter) { double(increment: true) }
let(:metrics) do
double(pipeline_processing_events_counter: pipeline_processing_events_counter)
end
subject { described_class.new(pipeline) }
before do
stub_ci_pipeline_to_return_yaml_file
stub_not_protect_default_branch
allow(subject).to receive(:metrics).and_return(metrics)
end
describe 'processing events counter' do
it 'increments processing events counter' do
expect(pipeline_processing_events_counter).to receive(:increment)
subject.execute
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class CreateWebIdeTerminalService < ::BaseService
include ::Gitlab::Utils::StrongMemoize
TerminalCreationError = Class.new(StandardError)
TERMINAL_NAME = 'terminal'
attr_reader :terminal
def execute
check_access!
validate_params!
load_terminal_config!
pipeline = create_pipeline!
success(pipeline: pipeline)
rescue TerminalCreationError => e
error(e.message)
rescue ActiveRecord::RecordInvalid => e
error("Failed to persist the pipeline: #{e.message}")
end
private
def create_pipeline!
build_pipeline.tap do |pipeline|
pipeline.stages << terminal_stage_seed(pipeline).to_resource
# Project iid must be called outside a transaction, so we ensure it is set here
# otherwise it may be set within the save! which it will lock the InternalId row for the whole transaction
pipeline.ensure_project_iid!
pipeline.save!
Ci::ProcessPipelineService
.new(pipeline)
.execute
pipeline_created_counter.increment(source: :webide)
end
end
def build_pipeline
Ci::Pipeline.new(
project: project,
user: current_user,
source: :webide,
config_source: :webide_source,
ref: ref,
sha: sha,
tag: false,
before_sha: Gitlab::Git::BLANK_SHA
)
end
def terminal_stage_seed(pipeline)
attributes = {
name: TERMINAL_NAME,
index: 0,
builds: [terminal_build_seed]
}
seed_context = Gitlab::Ci::Pipeline::Seed::Context.new(pipeline)
Gitlab::Ci::Pipeline::Seed::Stage.new(seed_context, attributes, [])
end
def terminal_build_seed
terminal.merge(
name: TERMINAL_NAME,
stage: TERMINAL_NAME,
user: current_user,
scheduling_type: :stage)
end
def load_terminal_config!
result = ::Ide::TerminalConfigService.new(project, current_user, sha: sha).execute
raise TerminalCreationError, result[:message] if result[:status] != :success
@terminal = result[:terminal]
raise TerminalCreationError, 'Terminal is not configured' unless terminal
end
def validate_params!
unless sha
raise TerminalCreationError, 'Ref does not exist'
end
unless branch_exists?
raise TerminalCreationError, 'Ref needs to be a branch'
end
end
def check_access!
unless can?(current_user, :create_web_ide_terminal, project)
raise TerminalCreationError, 'Insufficient permissions to create a terminal'
end
if terminal_active?
raise TerminalCreationError, 'There is already a terminal running'
end
end
def pipeline_created_counter
@pipeline_created_counter ||= Gitlab::Metrics
.counter(:pipelines_created_total, "Counter of pipelines created")
end
def terminal_active?
project.active_webide_pipelines(user: current_user).exists?
end
def ref
strong_memoize(:ref) do
Gitlab::Git.ref_name(params[:ref])
end
end
def branch_exists?
project.repository.branch_exists?(ref)
end
def sha
project.commit(params[:ref]).try(:id)
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::CreateWebIdeTerminalService, feature_category: :continuous_integration do
let_it_be(:project) { create(:project, :repository) }
let_it_be(:user) { create(:user) }
let(:ref) { 'master' }
describe '#execute' do
subject { described_class.new(project, user, ref: ref).execute }
context 'for maintainer' do
shared_examples 'be successful' do
it 'returns a success with pipeline object' do
is_expected.to include(status: :success)
expect(subject[:pipeline]).to be_a(Ci::Pipeline)
expect(subject[:pipeline]).to be_persisted
expect(subject[:pipeline].stages.count).to eq(1)
expect(subject[:pipeline].builds.count).to eq(1)
end
it 'calls ensure_project_iid explicitly' do
expect_next_instance_of(Ci::Pipeline) do |instance|
expect(instance).to receive(:ensure_project_iid!).twice
end
subject
end
end
before do
project.add_maintainer(user)
end
context 'when web-ide has valid configuration' do
before do
stub_webide_config_file(config_content)
end
context 'for empty configuration' do
let(:config_content) do
'terminal: {}'
end
it_behaves_like 'be successful'
end
context 'for configuration with container image' do
let(:config_content) do
'terminal: { image: ruby }'
end
it_behaves_like 'be successful'
end
context 'for configuration with ports' do
let(:config_content) do
<<-EOS
terminal:
image:
name: image:1.0
ports:
- 80
script: rspec
services:
- name: test
alias: test
ports:
- 8080
EOS
end
it_behaves_like 'be successful'
end
context 'for configuration with variables' do
let(:config_content) do
<<-EOS
terminal:
script: rspec
variables:
KEY1: VAL1
EOS
end
it_behaves_like 'be successful'
it 'saves the variables' do
expect(subject[:pipeline].builds[0].variables).to include(
key: 'KEY1', value: 'VAL1', public: true, masked: false
)
end
end
end
end
context 'error handling' do
shared_examples 'having an error' do |message|
it 'returns an error' do
is_expected.to eq(
status: :error,
message: message
)
end
end
shared_examples 'having insufficient permissions' do
it_behaves_like 'having an error', 'Insufficient permissions to create a terminal'
end
context 'when user is developer' do
before do
project.add_developer(user)
end
it_behaves_like 'having insufficient permissions'
end
context 'when user is maintainer' do
before do
project.add_maintainer(user)
end
context 'when terminal is already running' do
let!(:webide_pipeline) { create(:ci_pipeline, :webide, :running, project: project, user: user) }
it_behaves_like 'having an error', 'There is already a terminal running'
end
context 'when ref is non-existing' do
let(:ref) { 'non-existing-ref' }
it_behaves_like 'having an error', 'Ref does not exist'
end
context 'when ref is a tag' do
let(:ref) { 'v1.0.0' }
it_behaves_like 'having an error', 'Ref needs to be a branch'
end
context 'when terminal config is missing' do
let(:ref) { 'v1.0.0' }
it_behaves_like 'having an error', 'Ref needs to be a branch'
end
context 'when webide config is present' do
before do
stub_webide_config_file(config_content)
end
context 'config has invalid content' do
let(:config_content) { 'invalid' }
it_behaves_like 'having an error', 'Invalid configuration format'
end
context 'config is valid, but does not have terminal' do
let(:config_content) { '{}' }
it_behaves_like 'having an error', 'Terminal is not configured'
end
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
# Takes in input a Ci::Bridge job and creates a downstream pipeline
# (either multi-project or child pipeline) according to the Ci::Bridge
# specifications.
class CreateDownstreamPipelineService < ::BaseService
include Gitlab::Utils::StrongMemoize
include Ci::DownstreamPipelineHelpers
DuplicateDownstreamPipelineError = Class.new(StandardError)
MAX_NESTED_CHILDREN = 2
def execute(bridge)
@bridge = bridge
if @bridge.has_downstream_pipeline?
Gitlab::ErrorTracking.track_exception(
DuplicateDownstreamPipelineError.new,
bridge_id: @bridge.id, project_id: @bridge.project_id
)
return ServiceResponse.error(message: 'Already has a downstream pipeline')
end
pipeline_params = @bridge.downstream_pipeline_params
target_ref = pipeline_params.dig(:target_revision, :ref)
return ServiceResponse.error(message: 'Pre-conditions not met') unless ensure_preconditions!(target_ref)
return ServiceResponse.error(message: 'Can not run the bridge') unless @bridge.run
service = ::Ci::CreatePipelineService.new(
pipeline_params.fetch(:project),
current_user,
pipeline_params.fetch(:target_revision))
downstream_pipeline = service
.execute(pipeline_params.fetch(:source), **pipeline_params[:execute_params])
.payload
log_downstream_pipeline_creation(downstream_pipeline)
update_bridge_status!(@bridge, downstream_pipeline)
rescue StandardError => e
@bridge.reset.drop!(:data_integrity_failure)
raise e
end
private
def update_bridge_status!(bridge, pipeline)
Gitlab::OptimisticLocking.retry_lock(bridge, name: 'create_downstream_pipeline_update_bridge_status') do |subject|
if pipeline.created_successfully?
# If bridge uses `strategy:depend` we leave it running
# and update the status when the downstream pipeline completes.
subject.success! unless subject.dependent?
ServiceResponse.success(payload: pipeline)
else
message = pipeline.errors.full_messages
subject.options[:downstream_errors] = message
subject.drop!(:downstream_pipeline_creation_failed)
ServiceResponse.error(payload: pipeline, message: message)
end
end
rescue StateMachines::InvalidTransition => e
Gitlab::ErrorTracking.track_exception(
Ci::Bridge::InvalidTransitionError.new(e.message),
bridge_id: bridge.id,
downstream_pipeline_id: pipeline.id)
ServiceResponse.error(payload: pipeline, message: e.message)
end
def ensure_preconditions!(target_ref)
unless downstream_project_accessible?
@bridge.drop!(:downstream_bridge_project_not_found)
return false
end
# TODO: Remove this condition if favour of model validation
# https://gitlab.com/gitlab-org/gitlab/issues/38338
if downstream_project == project && [email protected]_child_pipeline?
@bridge.drop!(:invalid_bridge_trigger)
return false
end
# TODO: Remove this condition if favour of model validation
# https://gitlab.com/gitlab-org/gitlab/issues/38338
# only applies to parent-child pipelines not multi-project
if has_max_nested_children?
@bridge.drop!(:reached_max_descendant_pipelines_depth)
return false
end
if pipeline_tree_too_large?
@bridge.drop!(:reached_max_pipeline_hierarchy_size)
return false
end
unless can_create_downstream_pipeline?(target_ref)
@bridge.drop!(:insufficient_bridge_permissions)
return false
end
if has_cyclic_dependency?
@bridge.drop!(:pipeline_loop_detected)
return false
end
true
end
def downstream_project_accessible?
downstream_project.present? &&
can?(current_user, :read_project, downstream_project)
end
def can_create_downstream_pipeline?(target_ref)
can?(current_user, :update_pipeline, project) &&
can?(current_user, :create_pipeline, downstream_project) &&
can_update_branch?(target_ref)
end
def can_update_branch?(target_ref)
::Gitlab::UserAccess.new(current_user, container: downstream_project).can_update_branch?(target_ref)
end
def downstream_project
strong_memoize(:downstream_project) do
@bridge.downstream_project
end
end
def has_cyclic_dependency?
return false if @bridge.triggers_child_pipeline?
pipeline_checksums = @bridge.pipeline.self_and_upstreams.filter_map do |pipeline|
config_checksum(pipeline) unless pipeline.child?
end
# To avoid false positives we allow 1 cycle in the ancestry and
# fail when 2 cycles are detected: A -> B -> A -> B -> A
pipeline_checksums.tally.any? { |_checksum, occurrences| occurrences > 2 }
end
def has_max_nested_children?
return false unless @bridge.triggers_child_pipeline?
# only applies to parent-child pipelines not multi-project
ancestors_of_new_child = @bridge.pipeline.self_and_project_ancestors
ancestors_of_new_child.count > MAX_NESTED_CHILDREN
end
def pipeline_tree_too_large?
return false unless @bridge.triggers_downstream_pipeline?
# Applies to the entire pipeline tree across all projects
# A pipeline tree can be shared between multiple namespaces (customers), the limit that is used here
# is the limit of the namespace that has added a downstream pipeline to a pipeline tree.
@bridge.project.actual_limits.exceeded?(:pipeline_hierarchy_size, complete_hierarchy_count)
end
def complete_hierarchy_count
@bridge.pipeline.complete_hierarchy_count
end
def config_checksum(pipeline)
[pipeline.project_id, pipeline.ref, pipeline.source].hash
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::CreateDownstreamPipelineService, '#execute', feature_category: :continuous_integration do
include Ci::SourcePipelineHelpers
# Using let_it_be on user and projects for these specs can cause
# spec-ordering failures due to the project-based permissions
# associating them. They should be recreated every time.
let(:user) { create(:user) }
let(:upstream_project) { create(:project, :repository) }
let(:downstream_project) { create(:project, :repository) }
let!(:upstream_pipeline) do
create(:ci_pipeline, :created, project: upstream_project)
end
let(:trigger) do
{
trigger: {
project: downstream_project.full_path,
branch: 'feature'
}
}
end
let(:bridge) do
create(
:ci_bridge,
status: :pending,
user: user,
options: trigger,
pipeline: upstream_pipeline
)
end
let(:service) { described_class.new(upstream_project, user) }
let(:pipeline) { subject.payload }
before do
upstream_project.add_developer(user)
end
subject { service.execute(bridge) }
context 'when downstream project has not been found' do
let(:trigger) do
{ trigger: { project: 'unknown/project' } }
end
it 'does not create a pipeline' do
expect { subject }
.not_to change { Ci::Pipeline.count }
expect(subject).to be_error
expect(subject.message).to eq("Pre-conditions not met")
end
it 'changes pipeline bridge job status to failed' do
subject
expect(bridge.reload).to be_failed
expect(bridge.failure_reason)
.to eq 'downstream_bridge_project_not_found'
end
end
context 'when user can not access downstream project' do
it 'does not create a new pipeline' do
expect { subject }
.not_to change { Ci::Pipeline.count }
expect(subject).to be_error
expect(subject.message).to eq("Pre-conditions not met")
end
it 'changes status of the bridge build to failed' do
subject
expect(bridge.reload).to be_failed
expect(bridge.failure_reason)
.to eq 'downstream_bridge_project_not_found'
end
end
context 'when user does not have access to create pipeline' do
before do
downstream_project.add_guest(user)
end
it 'does not create a new pipeline' do
expect { subject }
.not_to change { Ci::Pipeline.count }
expect(subject).to be_error
expect(subject.message).to eq("Pre-conditions not met")
end
it 'changes status of the bridge build to failed' do
subject
expect(bridge.reload).to be_failed
expect(bridge.failure_reason).to eq 'insufficient_bridge_permissions'
end
end
context 'when user can create pipeline in a downstream project' do
let(:stub_config) { true }
before do
downstream_project.add_developer(user)
stub_ci_pipeline_yaml_file(YAML.dump(rspec: { script: 'rspec' })) if stub_config
end
it 'creates only one new pipeline' do
expect { subject }
.to change { Ci::Pipeline.count }.by(1)
expect(subject).to be_success
end
it 'creates a new pipeline in a downstream project' do
expect(pipeline.user).to eq bridge.user
expect(pipeline.project).to eq downstream_project
expect(bridge.reload.sourced_pipeline.pipeline).to eq pipeline
expect(pipeline.triggered_by_pipeline).to eq upstream_pipeline
expect(pipeline.source_bridge).to eq bridge
expect(pipeline.source_bridge).to be_a ::Ci::Bridge
end
it_behaves_like 'logs downstream pipeline creation' do
let(:downstream_pipeline) { pipeline }
let(:expected_root_pipeline) { upstream_pipeline }
let(:expected_hierarchy_size) { 2 }
let(:expected_downstream_relationship) { :multi_project }
end
it 'updates bridge status when downstream pipeline gets processed' do
expect(pipeline.reload).to be_created
expect(bridge.reload).to be_success
end
it 'triggers the upstream pipeline duration calculation', :sidekiq_inline do
expect { subject }
.to change { upstream_pipeline.reload.duration }.from(nil).to(an_instance_of(Integer))
end
context 'when bridge job has already any downstream pipeline' do
before do
bridge.create_sourced_pipeline!(
source_pipeline: bridge.pipeline,
source_project: bridge.project,
project: bridge.project,
pipeline: create(:ci_pipeline, project: bridge.project)
)
end
it 'logs an error and exits' do
expect(Gitlab::ErrorTracking)
.to receive(:track_exception)
.with(
instance_of(described_class::DuplicateDownstreamPipelineError),
bridge_id: bridge.id, project_id: bridge.project.id)
.and_call_original
expect(Ci::CreatePipelineService).not_to receive(:new)
expect(subject).to be_error
expect(subject.message).to eq("Already has a downstream pipeline")
end
end
context 'when target ref is not specified' do
let(:trigger) do
{ trigger: { project: downstream_project.full_path } }
end
it 'is using default branch name' do
expect(pipeline.ref).to eq 'master'
end
end
context 'when downstream pipeline has yaml configuration error' do
before do
stub_ci_pipeline_yaml_file(YAML.dump(job: { invalid: 'yaml' }))
end
it 'creates only one new pipeline' do
expect { subject }
.to change { Ci::Pipeline.count }.by(1)
expect(subject).to be_error
expect(subject.message).to match_array(["jobs job config should implement a script: or a trigger: keyword"])
end
it 'creates a new pipeline in a downstream project' do
expect(pipeline.user).to eq bridge.user
expect(pipeline.project).to eq downstream_project
expect(bridge.reload.sourced_pipeline.pipeline).to eq pipeline
expect(pipeline.triggered_by_pipeline).to eq upstream_pipeline
expect(pipeline.source_bridge).to eq bridge
expect(pipeline.source_bridge).to be_a ::Ci::Bridge
end
it 'updates the bridge status when downstream pipeline gets processed' do
expect(pipeline.reload).to be_failed
expect(bridge.reload).to be_failed
end
end
context 'when downstream project is the same as the upstream project' do
let(:trigger) do
{ trigger: { project: upstream_project.full_path } }
end
context 'detects a circular dependency' do
it 'does not create a new pipeline' do
expect { subject }
.not_to change { Ci::Pipeline.count }
expect(subject).to be_error
expect(subject.message).to eq("Pre-conditions not met")
end
it 'changes status of the bridge build' do
subject
expect(bridge.reload).to be_failed
expect(bridge.failure_reason).to eq 'invalid_bridge_trigger'
end
end
context 'when "include" is provided' do
let(:file_content) do
YAML.dump(
rspec: { script: 'rspec' },
echo: { script: 'echo' })
end
shared_examples 'creates a child pipeline' do
it 'creates only one new pipeline' do
expect { subject }
.to change { Ci::Pipeline.count }.by(1)
expect(subject).to be_success
end
it 'creates a child pipeline in the same project' do
expect(pipeline.builds.map(&:name)).to match_array(%w[rspec echo])
expect(pipeline.user).to eq bridge.user
expect(pipeline.project).to eq bridge.project
expect(bridge.reload.sourced_pipeline.pipeline).to eq pipeline
expect(pipeline.triggered_by_pipeline).to eq upstream_pipeline
expect(pipeline.source_bridge).to eq bridge
expect(pipeline.source_bridge).to be_a ::Ci::Bridge
end
it 'updates bridge status when downstream pipeline gets processed' do
expect(pipeline.reload).to be_created
expect(bridge.reload).to be_success
end
it 'propagates parent pipeline settings to the child pipeline' do
expect(pipeline.ref).to eq(upstream_pipeline.ref)
expect(pipeline.sha).to eq(upstream_pipeline.sha)
expect(pipeline.source_sha).to eq(upstream_pipeline.source_sha)
expect(pipeline.target_sha).to eq(upstream_pipeline.target_sha)
expect(pipeline.target_sha).to eq(upstream_pipeline.target_sha)
expect(pipeline.trigger_requests.last).to eq(bridge.trigger_request)
end
end
before do
upstream_project.repository.create_file(
user, 'child-pipeline.yml', file_content, message: 'message', branch_name: 'master')
upstream_pipeline.update!(sha: upstream_project.commit.id)
end
let(:stub_config) { false }
let(:trigger) do
{
trigger: { include: 'child-pipeline.yml' }
}
end
it_behaves_like 'creates a child pipeline'
it_behaves_like 'logs downstream pipeline creation' do
let(:downstream_pipeline) { pipeline }
let(:expected_root_pipeline) { upstream_pipeline }
let(:expected_hierarchy_size) { 2 }
let(:expected_downstream_relationship) { :parent_child }
end
it 'updates the bridge job to success' do
expect { subject }.to change { bridge.status }.to 'success'
expect(subject).to be_success
end
context 'when bridge uses "depend" strategy' do
let(:trigger) do
{
trigger: { include: 'child-pipeline.yml', strategy: 'depend' }
}
end
it 'update the bridge job to running status' do
expect { subject }.to change { bridge.status }.from('pending').to('running')
expect(subject).to be_success
end
end
context 'when latest sha for the ref changed in the meantime' do
before do
upstream_project.repository.create_file(
user, 'another-change', 'test', message: 'message', branch_name: 'master')
end
# it does not auto-cancel pipelines from the same family
it_behaves_like 'creates a child pipeline'
end
context 'when the parent is a merge request pipeline' do
let(:merge_request) { create(:merge_request, source_project: bridge.project, target_project: bridge.project) }
let(:file_content) do
YAML.dump(
workflow: { rules: [{ if: '$CI_MERGE_REQUEST_ID' }] },
rspec: { script: 'rspec' },
echo: { script: 'echo' })
end
before do
bridge.pipeline.update!(source: :merge_request_event, merge_request: merge_request)
end
it_behaves_like 'creates a child pipeline'
it 'propagates the merge request to the child pipeline' do
expect(pipeline.merge_request).to eq(merge_request)
expect(pipeline).to be_merge_request
end
end
context 'when upstream pipeline has a parent pipeline' do
before do
create(:ci_sources_pipeline,
source_pipeline: create(:ci_pipeline, project: upstream_pipeline.project),
pipeline: upstream_pipeline
)
end
it 'creates the pipeline' do
expect { subject }
.to change { Ci::Pipeline.count }.by(1)
expect(subject).to be_success
expect(bridge.reload).to be_success
end
it_behaves_like 'logs downstream pipeline creation' do
let(:downstream_pipeline) { pipeline }
let(:expected_root_pipeline) { upstream_pipeline.parent_pipeline }
let(:expected_hierarchy_size) { 3 }
let(:expected_downstream_relationship) { :parent_child }
end
end
context 'when upstream pipeline has a parent pipeline, which has a parent pipeline' do
before do
parent_of_upstream_pipeline = create(:ci_pipeline, project: upstream_pipeline.project)
create(:ci_sources_pipeline,
source_pipeline: create(:ci_pipeline, project: upstream_pipeline.project),
pipeline: parent_of_upstream_pipeline
)
create(:ci_sources_pipeline,
source_pipeline: parent_of_upstream_pipeline,
pipeline: upstream_pipeline
)
end
it 'does not create a second descendant pipeline' do
expect { subject }
.not_to change { Ci::Pipeline.count }
expect(bridge.reload).to be_failed
expect(bridge.failure_reason).to eq 'reached_max_descendant_pipelines_depth'
end
end
context 'when upstream pipeline has two level upstream pipelines from different projects' do
before do
upstream_of_upstream_of_upstream_pipeline = create(:ci_pipeline)
upstream_of_upstream_pipeline = create(:ci_pipeline)
create(:ci_sources_pipeline,
source_pipeline: upstream_of_upstream_of_upstream_pipeline,
pipeline: upstream_of_upstream_pipeline
)
create(:ci_sources_pipeline,
source_pipeline: upstream_of_upstream_pipeline,
pipeline: upstream_pipeline
)
end
it 'create the pipeline' do
expect { subject }.to change { Ci::Pipeline.count }.by(1)
expect(subject).to be_success
end
end
context 'when downstream project does not allow user-defined variables for child pipelines' do
before do
bridge.yaml_variables = [{ key: 'BRIDGE', value: '$PIPELINE_VARIABLE-var', public: true }]
upstream_pipeline.project.update!(restrict_user_defined_variables: true)
end
it 'creates a new pipeline allowing variables to be passed downstream' do
expect { subject }.to change { Ci::Pipeline.count }.by(1)
expect(subject).to be_success
end
it 'passes variables downstream from the bridge' do
pipeline.variables.map(&:key).tap do |variables|
expect(variables).to include 'BRIDGE'
end
end
end
context 'when multi-project pipeline runs from child pipelines bridge job' do
before do
stub_ci_pipeline_yaml_file(YAML.dump(rspec: { script: 'rspec' }))
end
# instantiate new service, to clear memoized values from child pipeline run
subject(:execute_with_trigger_project_bridge) do
described_class.new(upstream_project, user).execute(trigger_project_bridge)
end
let!(:child_pipeline) do
service.execute(bridge)
bridge.downstream_pipeline
end
let!(:trigger_downstream_project) do
{
trigger: {
project: downstream_project.full_path,
branch: 'feature'
}
}
end
let!(:trigger_project_bridge) do
create(
:ci_bridge, status: :pending, user: user, options: trigger_downstream_project, pipeline: child_pipeline
)
end
it 'creates a new pipeline' do
expect { execute_with_trigger_project_bridge }
.to change { Ci::Pipeline.count }.by(1)
new_pipeline = trigger_project_bridge.downstream_pipeline
expect(new_pipeline.child?).to eq(false)
expect(new_pipeline.triggered_by_pipeline).to eq child_pipeline
expect(trigger_project_bridge.reload).not_to be_failed
end
end
end
end
describe 'cyclical dependency detection' do
shared_examples 'detects cyclical pipelines' do
it 'does not create a new pipeline' do
expect { subject }
.not_to change { Ci::Pipeline.count }
expect(subject).to be_error
expect(subject.message).to eq("Pre-conditions not met")
end
it 'changes status of the bridge build' do
subject
expect(bridge.reload).to be_failed
expect(bridge.failure_reason).to eq 'pipeline_loop_detected'
end
end
shared_examples 'passes cyclical pipeline precondition' do
it 'creates a new pipeline' do
expect { subject }
.to change { Ci::Pipeline.count }
expect(subject).to be_success
end
it 'expect bridge build not to be failed' do
subject
expect(bridge.reload).not_to be_failed
end
end
context 'when pipeline ancestry contains 2 cycles of dependencies' do
before do
# A(push on master) -> B(pipeline on master) -> A(push on master) ->
# B(pipeline on master) -> A(push on master)
pipeline_1 = create(:ci_pipeline, project: upstream_project, source: :push)
pipeline_2 = create(:ci_pipeline, project: downstream_project, source: :pipeline)
pipeline_3 = create(:ci_pipeline, project: upstream_project, source: :push)
pipeline_4 = create(:ci_pipeline, project: downstream_project, source: :pipeline)
create_source_pipeline(pipeline_1, pipeline_2)
create_source_pipeline(pipeline_2, pipeline_3)
create_source_pipeline(pipeline_3, pipeline_4)
create_source_pipeline(pipeline_4, upstream_pipeline)
end
it_behaves_like 'detects cyclical pipelines'
end
context 'when source in the ancestry differ' do
before do
# A(push on master) -> B(pipeline on master) -> A(pipeline on master)
pipeline_1 = create(:ci_pipeline, project: upstream_project, source: :push)
pipeline_2 = create(:ci_pipeline, project: downstream_project, source: :pipeline)
upstream_pipeline.update!(source: :pipeline)
create_source_pipeline(pipeline_1, pipeline_2)
create_source_pipeline(pipeline_2, upstream_pipeline)
end
it_behaves_like 'passes cyclical pipeline precondition'
end
context 'when ref in the ancestry differ' do
before do
# A(push on master) -> B(pipeline on master) -> A(push on feature-1)
pipeline_1 = create(:ci_pipeline, ref: 'master', project: upstream_project, source: :push)
pipeline_2 = create(:ci_pipeline, ref: 'master', project: downstream_project, source: :pipeline)
upstream_pipeline.update!(ref: 'feature-1')
create_source_pipeline(pipeline_1, pipeline_2)
create_source_pipeline(pipeline_2, upstream_pipeline)
end
it_behaves_like 'passes cyclical pipeline precondition'
end
context 'when only 1 cycle is detected' do
before do
# A(push on master) -> B(pipeline on master) -> A(push on master)
pipeline_1 = create(:ci_pipeline, ref: 'master', project: upstream_project, source: :push)
pipeline_2 = create(:ci_pipeline, ref: 'master', project: downstream_project, source: :pipeline)
create_source_pipeline(pipeline_1, pipeline_2)
create_source_pipeline(pipeline_2, upstream_pipeline)
end
it_behaves_like 'passes cyclical pipeline precondition'
end
end
context 'when downstream pipeline creation errors out' do
let(:stub_config) { false }
before do
stub_ci_pipeline_yaml_file(YAML.dump(invalid: { yaml: 'error' }))
end
it 'creates only one new pipeline' do
expect { subject }
.to change { Ci::Pipeline.count }.by(1)
expect(subject).to be_error
expect(subject.message).to match_array(["jobs invalid config should implement a script: or a trigger: keyword"])
end
it 'creates a new pipeline in the downstream project' do
expect(pipeline.user).to eq bridge.user
expect(pipeline.project).to eq downstream_project
end
it 'drops the bridge' do
expect(pipeline.reload).to be_failed
expect(bridge.reload).to be_failed
expect(bridge.failure_reason).to eq('downstream_pipeline_creation_failed')
end
end
context 'when bridge job status update raises state machine errors' do
let(:stub_config) { false }
before do
stub_ci_pipeline_yaml_file(YAML.dump(invalid: { yaml: 'error' }))
bridge.drop!
end
it 'returns the error' do
expect { subject }.not_to change(downstream_project.ci_pipelines, :count)
expect(subject).to be_error
expect(subject.message).to eq('Can not run the bridge')
end
end
context 'when bridge job has YAML variables defined' do
before do
bridge.yaml_variables = [{ key: 'BRIDGE', value: 'var', public: true }]
end
it 'passes bridge variables to downstream pipeline' do
expect(pipeline.variables.first)
.to have_attributes(key: 'BRIDGE', value: 'var')
end
end
context 'when pipeline variables are defined' do
before do
upstream_pipeline.variables.create!(key: 'PIPELINE_VARIABLE', value: 'my-value')
end
it 'does not pass pipeline variables directly downstream' do
pipeline.variables.map(&:key).tap do |variables|
expect(variables).not_to include 'PIPELINE_VARIABLE'
end
end
context 'when using YAML variables interpolation' do
before do
bridge.yaml_variables = [{ key: 'BRIDGE', value: '$PIPELINE_VARIABLE-var', public: true }]
end
it 'makes it possible to pass pipeline variable downstream' do
pipeline.variables.find_by(key: 'BRIDGE').tap do |variable|
expect(variable.value).to eq 'my-value-var'
end
end
context 'when downstream project does not allow user-defined variables for multi-project pipelines' do
before do
downstream_project.update!(restrict_user_defined_variables: true)
end
it 'does not create a new pipeline' do
expect { subject }
.not_to change { Ci::Pipeline.count }
expect(subject).to be_error
expect(subject.message).to match_array(["Insufficient permissions to set pipeline variables"])
end
it 'ignores variables passed downstream from the bridge' do
pipeline.variables.map(&:key).tap do |variables|
expect(variables).not_to include 'BRIDGE'
end
end
it 'sets errors', :aggregate_failures do
subject
expect(bridge.reload).to be_failed
expect(bridge.failure_reason).to eq('downstream_pipeline_creation_failed')
expect(bridge.options[:downstream_errors]).to eq(['Insufficient permissions to set pipeline variables'])
end
end
end
end
# TODO: Move this context into a feature spec that uses
# multiple pipeline processing services. Location TBD in:
# https://gitlab.com/gitlab-org/gitlab/issues/36216
context 'when configured with bridge job rules', :sidekiq_inline do
before do
stub_ci_pipeline_yaml_file(config)
downstream_project.add_maintainer(upstream_project.first_owner)
end
let(:config) do
<<-EOY
hello:
script: echo world
bridge-job:
rules:
- if: $CI_COMMIT_REF_NAME == "master"
trigger:
project: #{downstream_project.full_path}
branch: master
EOY
end
let(:primary_pipeline) do
Ci::CreatePipelineService.new(upstream_project, upstream_project.first_owner, { ref: 'master' })
.execute(:push, save_on_errors: false)
.payload
end
let(:bridge) { primary_pipeline.processables.find_by(name: 'bridge-job') }
let(:service) { described_class.new(upstream_project, upstream_project.first_owner) }
context 'that include the bridge job' do
it 'creates the downstream pipeline' do
expect { subject }
.to change(downstream_project.ci_pipelines, :count).by(1)
expect(subject).to be_error
expect(subject.message).to eq("Already has a downstream pipeline")
end
end
end
context 'when user does not have access to push protected branch of downstream project' do
before do
create(:protected_branch, :maintainers_can_push, project: downstream_project, name: 'feature')
end
it 'changes status of the bridge build' do
subject
expect(bridge.reload).to be_failed
expect(bridge.failure_reason).to eq 'insufficient_bridge_permissions'
end
end
context 'when there is no such branch in downstream project' do
let(:trigger) do
{
trigger: {
project: downstream_project.full_path,
branch: 'invalid_branch'
}
}
end
it 'does not create a pipeline and drops the bridge' do
expect { subject }.not_to change(downstream_project.ci_pipelines, :count)
expect(subject).to be_error
expect(subject.message).to match_array(["Reference not found"])
expect(bridge.reload).to be_failed
expect(bridge.failure_reason).to eq('downstream_pipeline_creation_failed')
expect(bridge.options[:downstream_errors]).to eq(['Reference not found'])
end
end
context 'when downstream pipeline has a branch rule and does not satisfy' do
before do
stub_ci_pipeline_yaml_file(config)
end
let(:config) do
<<-EOY
hello:
script: echo world
only:
- invalid_branch
EOY
end
it 'does not create a pipeline and drops the bridge' do
expect { subject }.not_to change(downstream_project.ci_pipelines, :count)
expect(subject).to be_error
expect(subject.message).to match_array(['Pipeline will not run for the selected trigger. ' \
'The rules configuration prevented any jobs from being added to the pipeline.'])
expect(bridge.reload).to be_failed
expect(bridge.failure_reason).to eq('downstream_pipeline_creation_failed')
expect(bridge.options[:downstream_errors]).to match_array(['Pipeline will not run for the selected trigger. ' \
'The rules configuration prevented any jobs from being added to the pipeline.'])
end
end
context 'when downstream pipeline has invalid YAML' do
before do
stub_ci_pipeline_yaml_file(config)
end
let(:config) do
<<-EOY
test:
stage: testx
script: echo 1
EOY
end
it 'creates the pipeline but drops the bridge' do
expect { subject }.to change(downstream_project.ci_pipelines, :count).by(1)
expect(subject).to be_error
expect(subject.message).to eq(
["test job: chosen stage does not exist; available stages are .pre, build, test, deploy, .post"]
)
expect(bridge.reload).to be_failed
expect(bridge.failure_reason).to eq('downstream_pipeline_creation_failed')
expect(bridge.options[:downstream_errors]).to eq(
['test job: chosen stage does not exist; available stages are .pre, build, test, deploy, .post']
)
end
end
context 'when downstream pipeline has workflow rule' do
before do
stub_ci_pipeline_yaml_file(config)
end
let(:config) do
<<-EOY
workflow:
rules:
- if: $my_var
regular-job:
script: 'echo Hello, World!'
EOY
end
context 'when passing the required variable' do
before do
bridge.yaml_variables = [{ key: 'my_var', value: 'var', public: true }]
end
it 'creates the pipeline' do
expect { subject }.to change(downstream_project.ci_pipelines, :count).by(1)
expect(subject).to be_success
expect(bridge.reload).to be_success
end
end
context 'when not passing the required variable' do
it 'does not create the pipeline' do
expect { subject }.not_to change(downstream_project.ci_pipelines, :count)
end
end
end
context 'when a downstream pipeline has sibling pipelines' do
it_behaves_like 'logs downstream pipeline creation' do
let(:downstream_pipeline) { pipeline }
let(:expected_root_pipeline) { upstream_pipeline }
let(:expected_downstream_relationship) { :multi_project }
# New downstream, plus upstream, plus two children of upstream created below
let(:expected_hierarchy_size) { 4 }
before do
create_list(:ci_pipeline, 2, child_of: upstream_pipeline)
end
end
end
context 'when the pipeline tree is too large' do
let_it_be(:parent) { create(:ci_pipeline) }
let_it_be(:child) { create(:ci_pipeline, child_of: parent) }
let_it_be(:sibling) { create(:ci_pipeline, child_of: parent) }
let(:project) { build(:project, :repository) }
let(:bridge) do
create(:ci_bridge, status: :pending, user: user, options: trigger, pipeline: child, project: project)
end
context 'when limit was specified by admin' do
before do
project.actual_limits.update!(pipeline_hierarchy_size: 3)
end
it 'does not create a new pipeline' do
expect { subject }.not_to change { Ci::Pipeline.count }
end
it 'drops the trigger job with an explanatory reason' do
subject
expect(bridge.reload).to be_failed
expect(bridge.failure_reason).to eq('reached_max_pipeline_hierarchy_size')
end
end
context 'when there was no limit specified by admin' do
before do
allow(bridge.pipeline).to receive(:complete_hierarchy_count).and_return(1000)
end
context 'when pipeline count reaches the default limit of 1000' do
it 'does not create a new pipeline' do
expect { subject }.not_to change { Ci::Pipeline.count }
expect(subject).to be_error
expect(subject.message).to eq("Pre-conditions not met")
end
it 'drops the trigger job with an explanatory reason' do
subject
expect(bridge.reload).to be_failed
expect(bridge.failure_reason).to eq('reached_max_pipeline_hierarchy_size')
end
end
end
end
end
context 'when downstream pipeline creation fails with unexpected errors', :aggregate_failures do
before do
downstream_project.add_developer(user)
allow(::Ci::CreatePipelineService).to receive(:new)
.and_raise(RuntimeError, 'undefined failure')
end
it 'drops the bridge without creating a pipeline' do
expect { subject }
.to raise_error(RuntimeError, /undefined failure/)
.and change { Ci::Pipeline.count }.by(0)
expect(bridge.reload).to be_failed
expect(bridge.failure_reason).to eq('data_integrity_failure')
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class ArchiveTraceService
include ::Gitlab::ExclusiveLeaseHelpers
EXCLUSIVE_LOCK_KEY = 'archive_trace_service:batch_execute:lock'
LOCK_TIMEOUT = 56.minutes
LOOP_TIMEOUT = 55.minutes
LOOP_LIMIT = 2000
BATCH_SIZE = 100
# rubocop: disable CodeReuse/ActiveRecord
def batch_execute(worker_name:)
start_time = Time.current
in_lock(EXCLUSIVE_LOCK_KEY, ttl: LOCK_TIMEOUT, retries: 1) do
Ci::Build.with_stale_live_trace.find_each(batch_size: BATCH_SIZE).with_index do |build, index|
break if Time.current - start_time > LOOP_TIMEOUT
if index > LOOP_LIMIT
Sidekiq.logger.warn(class: worker_name, message: 'Loop limit reached.', job_id: build.id)
break
end
begin
execute(build, worker_name: worker_name)
rescue StandardError
next
end
end
end
end
# rubocop: enable CodeReuse/ActiveRecord
def execute(job, worker_name:)
unless job.trace.archival_attempts_available?
Sidekiq.logger.warn(class: worker_name, message: 'The job is out of archival attempts.', job_id: job.id)
job.trace.attempt_archive_cleanup!
return
end
unless job.trace.can_attempt_archival_now?
Sidekiq.logger.warn(class: worker_name, message: 'The job can not be archived right now.', job_id: job.id)
return
end
job.trace.archive!
job.remove_pending_state!
if job.job_artifacts_trace.present?
job.project.execute_integrations(Gitlab::DataBuilder::ArchiveTrace.build(job), :archive_trace_hooks)
end
rescue ::Gitlab::Ci::Trace::AlreadyArchivedError
# It's already archived, thus we can safely ignore this exception.
rescue StandardError => e
job.trace.increment_archival_attempts!
# Tracks this error with application logs, Sentry, and Prometheus.
# If `archive!` keeps failing for over a week, that could incur data loss.
# (See more https://docs.gitlab.com/ee/administration/job_logs.html#new-incremental-logging-architecture)
# In order to avoid interrupting the system, we do not raise an exception here.
archive_error(e, job, worker_name)
end
private
def failed_archive_counter
@failed_archive_counter ||=
Gitlab::Metrics.counter(:job_trace_archive_failed_total, "Counter of failed attempts of trace archiving")
end
def archive_error(error, job, worker_name)
failed_archive_counter.increment
Sidekiq.logger.warn(
class: worker_name,
message: "Failed to archive trace. message: #{error.message}.",
job_id: job.id
)
Gitlab::ErrorTracking.track_and_raise_for_dev_exception(
error,
issue_url: 'https://gitlab.com/gitlab-org/gitlab-foss/issues/51502',
job_id: job.id
)
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::ArchiveTraceService, '#execute', feature_category: :continuous_integration do
subject { described_class.new.execute(job, worker_name: Ci::ArchiveTraceWorker.name) }
context 'when job is finished' do
let(:job) { create(:ci_build, :success, :trace_live) }
it 'creates an archived trace' do
expect { subject }.not_to raise_error
expect(job.reload.job_artifacts_trace).to be_exist
expect(job.trace_metadata.trace_artifact).to eq(job.job_artifacts_trace)
end
context 'integration hooks' do
it do
expect(job.project).to receive(:execute_integrations) do |data, hook_type|
expect(data).to eq Gitlab::DataBuilder::ArchiveTrace.build(job)
expect(hook_type).to eq :archive_trace_hooks
end
expect { subject }.not_to raise_error
end
end
context 'when trace is already archived' do
let!(:job) { create(:ci_build, :success, :trace_artifact) }
it 'ignores an exception' do
expect { subject }.not_to raise_error
end
it 'does not create an archived trace' do
expect { subject }.not_to change { Ci::JobArtifact.trace.count }
end
context 'when live trace chunks still exist' do
before do
create(:ci_build_trace_chunk, build: job, chunk_index: 0)
end
it 'removes the trace chunks' do
expect { subject }.to change { job.trace_chunks.count }.to(0)
end
context 'when associated data does not exist' do
before do
job.job_artifacts_trace.file.remove!
end
it 'removes the trace artifact and builds a new one' do
existing_trace = job.job_artifacts_trace
expect(existing_trace).to receive(:destroy!).and_call_original
subject
expect(job.reload.job_artifacts_trace).to be_present
expect(job.reload.job_artifacts_trace.file.file).to be_present
end
end
end
end
context 'when the job is out of archival attempts' do
before do
create(:ci_build_trace_metadata,
build: job,
archival_attempts: Ci::BuildTraceMetadata::MAX_ATTEMPTS + 1,
last_archival_attempt_at: 1.week.ago)
end
it 'skips archiving' do
expect(job.trace).not_to receive(:archive!)
subject
end
it 'leaves a warning message in sidekiq log' do
expect(Sidekiq.logger).to receive(:warn).with(
class: Ci::ArchiveTraceWorker.name,
message: 'The job is out of archival attempts.',
job_id: job.id)
subject
end
context 'job has archive and chunks' do
let(:job) { create(:ci_build, :success, :trace_artifact) }
before do
create(:ci_build_trace_chunk, build: job, chunk_index: 0)
end
context 'archive is not completed' do
before do
job.job_artifacts_trace.file.remove!
end
it 'cleanups any stale archive data' do
expect(job.job_artifacts_trace).to be_present
subject
expect(job.reload.job_artifacts_trace).to be_nil
end
end
it 'removes trace chunks' do
expect { subject }.to change { job.trace_chunks.count }.to(0)
end
end
end
context 'when the archival process is backed off' do
before do
create(:ci_build_trace_metadata,
build: job,
archival_attempts: Ci::BuildTraceMetadata::MAX_ATTEMPTS - 1,
last_archival_attempt_at: 1.hour.ago)
end
it 'skips archiving' do
expect(job.trace).not_to receive(:archive!)
subject
end
it 'leaves a warning message in sidekiq log' do
expect(Sidekiq.logger).to receive(:warn).with(
class: Ci::ArchiveTraceWorker.name,
message: 'The job can not be archived right now.',
job_id: job.id)
subject
end
end
end
context 'when job is running' do
let(:job) { create(:ci_build, :running, :trace_live) }
it 'increments Prometheus counter, sends crash report to Sentry and ignore an error for continuing to archive' do
expect(Gitlab::ErrorTracking)
.to receive(:track_and_raise_for_dev_exception)
.with(::Gitlab::Ci::Trace::ArchiveError,
issue_url: 'https://gitlab.com/gitlab-org/gitlab-foss/issues/51502',
job_id: job.id).once
expect(Sidekiq.logger).to receive(:warn).with(
class: Ci::ArchiveTraceWorker.name,
message: "Failed to archive trace. message: Job is not finished yet.",
job_id: job.id).and_call_original
expect(Gitlab::Metrics)
.to receive(:counter)
.with(:job_trace_archive_failed_total, "Counter of failed attempts of trace archiving")
.and_call_original
expect { subject }.not_to raise_error
expect(job.trace_metadata.archival_attempts).to eq(1)
end
end
describe '#batch_execute' do
subject { described_class.new.batch_execute(worker_name: Ci::ArchiveTraceWorker.name) }
let_it_be_with_reload(:job) { create(:ci_build, :success, :trace_live, finished_at: 1.day.ago) }
let_it_be_with_reload(:job2) { create(:ci_build, :success, :trace_live, finished_at: 1.day.ago) }
it 'archives multiple traces' do
expect { subject }.not_to raise_error
expect(job.reload.job_artifacts_trace).to be_exist
expect(job2.reload.job_artifacts_trace).to be_exist
end
it 'processes traces independently' do
allow_next_instance_of(Gitlab::Ci::Trace) do |instance|
orig_method = instance.method(:archive!)
allow(instance).to receive(:archive!) do
raise('Unexpected error') if instance.job.id == job.id
orig_method.call
end
end
expect { subject }.not_to raise_error
expect(job.reload.job_artifacts_trace).to be_nil
expect(job2.reload.job_artifacts_trace).to be_exist
end
context 'when timeout is reached' do
before do
stub_const("#{described_class}::LOOP_TIMEOUT", 0.seconds)
end
it 'stops executing traces' do
expect { subject }.not_to raise_error
expect(job.reload.job_artifacts_trace).to be_nil
end
end
context 'when loop limit is reached' do
before do
stub_const("#{described_class}::LOOP_LIMIT", -1)
end
it 'skips archiving' do
expect(job.trace).not_to receive(:archive!)
subject
end
it 'stops executing traces' do
expect(Sidekiq.logger).to receive(:warn).with(
class: Ci::ArchiveTraceWorker.name,
message: "Loop limit reached.",
job_id: job.id)
expect { subject }.not_to raise_error
expect(job.reload.job_artifacts_trace).to be_nil
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class UnlockArtifactsService < ::BaseService
BATCH_SIZE = 100
def execute(ci_ref, before_pipeline = nil)
results = {
unlocked_pipelines: 0,
unlocked_job_artifacts: 0,
unlocked_pipeline_artifacts: 0
}
loop do
unlocked_pipelines = []
unlocked_job_artifacts = []
::Ci::Pipeline.transaction do
unlocked_pipelines = unlock_pipelines(ci_ref, before_pipeline)
unlocked_job_artifacts = unlock_job_artifacts(unlocked_pipelines)
results[:unlocked_pipeline_artifacts] += unlock_pipeline_artifacts(unlocked_pipelines)
end
break if unlocked_pipelines.empty?
results[:unlocked_pipelines] += unlocked_pipelines.length
results[:unlocked_job_artifacts] += unlocked_job_artifacts.length
end
results
end
# rubocop:disable CodeReuse/ActiveRecord
def unlock_job_artifacts_query(pipeline_ids)
ci_job_artifacts = ::Ci::JobArtifact.arel_table
build_ids = ::Ci::Build.select(:id).where(commit_id: pipeline_ids)
returning = Arel::Nodes::Grouping.new(ci_job_artifacts[:id])
Arel::UpdateManager.new
.table(ci_job_artifacts)
.where(ci_job_artifacts[:job_id].in(Arel.sql(build_ids.to_sql)))
.set([[ci_job_artifacts[:locked], ::Ci::JobArtifact.lockeds[:unlocked]]])
.to_sql + " RETURNING #{returning.to_sql}"
end
# rubocop:enable CodeReuse/ActiveRecord
# rubocop:disable CodeReuse/ActiveRecord
def unlock_pipelines_query(ci_ref, before_pipeline)
ci_pipelines = ::Ci::Pipeline.arel_table
pipelines_scope = ci_ref.pipelines.artifacts_locked
pipelines_scope = pipelines_scope.before_pipeline(before_pipeline) if before_pipeline
pipelines_scope = pipelines_scope.select(:id).limit(BATCH_SIZE).lock('FOR UPDATE SKIP LOCKED')
returning = Arel::Nodes::Grouping.new(ci_pipelines[:id])
Arel::UpdateManager.new
.table(ci_pipelines)
.where(ci_pipelines[:id].in(Arel.sql(pipelines_scope.to_sql)))
.set([[ci_pipelines[:locked], ::Ci::Pipeline.lockeds[:unlocked]]])
.to_sql + " RETURNING #{returning.to_sql}"
end
# rubocop:enable CodeReuse/ActiveRecord
private
def unlock_job_artifacts(pipelines)
return if pipelines.empty?
::Ci::JobArtifact.connection.exec_query(
unlock_job_artifacts_query(pipelines.rows.flatten)
)
end
# rubocop:disable CodeReuse/ActiveRecord
def unlock_pipeline_artifacts(pipelines)
return 0 if pipelines.empty?
::Ci::PipelineArtifact.where(pipeline_id: pipelines.rows.flatten).update_all(locked: :unlocked)
end
# rubocop:enable CodeReuse/ActiveRecord
def unlock_pipelines(ci_ref, before_pipeline)
::Ci::Pipeline.connection.exec_query(unlock_pipelines_query(ci_ref, before_pipeline))
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::UnlockArtifactsService, feature_category: :continuous_integration do
using RSpec::Parameterized::TableSyntax
where(:tag) do
[
[false],
[true]
]
end
with_them do
let(:ref) { 'master' }
let(:ref_path) { tag ? "#{::Gitlab::Git::TAG_REF_PREFIX}#{ref}" : "#{::Gitlab::Git::BRANCH_REF_PREFIX}#{ref}" }
let(:ci_ref) { create(:ci_ref, ref_path: ref_path) }
let(:project) { ci_ref.project }
let(:source_job) { create(:ci_build, pipeline: pipeline) }
let!(:old_unlocked_pipeline) { create(:ci_pipeline, :with_persisted_artifacts, ref: ref, tag: tag, project: project, locked: :unlocked) }
let!(:older_pipeline) { create(:ci_pipeline, :with_persisted_artifacts, ref: ref, tag: tag, project: project, locked: :artifacts_locked) }
let!(:older_ambiguous_pipeline) { create(:ci_pipeline, :with_persisted_artifacts, ref: ref, tag: !tag, project: project, locked: :artifacts_locked) }
let!(:code_coverage_pipeline) { create(:ci_pipeline, :with_coverage_report_artifact, ref: ref, tag: tag, project: project, locked: :artifacts_locked) }
let!(:pipeline) { create(:ci_pipeline, :with_persisted_artifacts, ref: ref, tag: tag, project: project, locked: :artifacts_locked) }
let!(:child_pipeline) { create(:ci_pipeline, :with_persisted_artifacts, ref: ref, tag: tag, child_of: pipeline, project: project, locked: :artifacts_locked) }
let!(:newer_pipeline) { create(:ci_pipeline, :with_persisted_artifacts, ref: ref, tag: tag, project: project, locked: :artifacts_locked) }
let!(:other_ref_pipeline) { create(:ci_pipeline, :with_persisted_artifacts, ref: 'other_ref', tag: tag, project: project, locked: :artifacts_locked) }
let!(:sources_pipeline) { create(:ci_sources_pipeline, source_job: source_job, source_project: project, pipeline: child_pipeline, project: project) }
before do
stub_const("#{described_class}::BATCH_SIZE", 1)
end
describe '#execute' do
subject(:execute) { described_class.new(pipeline.project, pipeline.user).execute(ci_ref, before_pipeline) }
context 'when running on a ref before a pipeline' do
let(:before_pipeline) { pipeline }
it 'unlocks artifacts from older pipelines' do
expect { execute }.to change { older_pipeline.reload.locked }.from('artifacts_locked').to('unlocked')
end
it 'does not unlock artifacts for tag or branch with same name as ref' do
expect { execute }.not_to change { older_ambiguous_pipeline.reload.locked }.from('artifacts_locked')
end
it 'does not unlock artifacts from newer pipelines' do
expect { execute }.not_to change { newer_pipeline.reload.locked }.from('artifacts_locked')
end
it 'does not lock artifacts from old unlocked pipelines' do
expect { execute }.not_to change { old_unlocked_pipeline.reload.locked }.from('unlocked')
end
it 'does not unlock artifacts from the same pipeline' do
expect { execute }.not_to change { pipeline.reload.locked }.from('artifacts_locked')
end
it 'does not unlock artifacts for other refs' do
expect { execute }.not_to change { other_ref_pipeline.reload.locked }.from('artifacts_locked')
end
it 'does not unlock artifacts for child pipeline' do
expect { execute }.not_to change { child_pipeline.reload.locked }.from('artifacts_locked')
end
it 'unlocks job artifact records' do
expect { execute }.to change { ::Ci::JobArtifact.artifact_unlocked.count }.from(0).to(2)
end
it 'unlocks pipeline artifact records' do
expect { execute }.to change { ::Ci::PipelineArtifact.artifact_unlocked.count }.from(0).to(1)
end
end
context 'when running on just the ref' do
let(:before_pipeline) { nil }
it 'unlocks artifacts from older pipelines' do
expect { execute }.to change { older_pipeline.reload.locked }.from('artifacts_locked').to('unlocked')
end
it 'unlocks artifacts from newer pipelines' do
expect { execute }.to change { newer_pipeline.reload.locked }.from('artifacts_locked').to('unlocked')
end
it 'unlocks artifacts from the same pipeline' do
expect { execute }.to change { pipeline.reload.locked }.from('artifacts_locked').to('unlocked')
end
it 'does not unlock artifacts for tag or branch with same name as ref' do
expect { execute }.not_to change { older_ambiguous_pipeline.reload.locked }.from('artifacts_locked')
end
it 'does not lock artifacts from old unlocked pipelines' do
expect { execute }.not_to change { old_unlocked_pipeline.reload.locked }.from('unlocked')
end
it 'does not unlock artifacts for other refs' do
expect { execute }.not_to change { other_ref_pipeline.reload.locked }.from('artifacts_locked')
end
it 'unlocks job artifact records' do
expect { execute }.to change { ::Ci::JobArtifact.artifact_unlocked.count }.from(0).to(8)
end
it 'unlocks pipeline artifact records' do
expect { execute }.to change { ::Ci::PipelineArtifact.artifact_unlocked.count }.from(0).to(1)
end
end
end
describe '#unlock_pipelines_query' do
subject { described_class.new(pipeline.project, pipeline.user).unlock_pipelines_query(ci_ref, before_pipeline) }
context 'when running on a ref before a pipeline' do
let(:before_pipeline) { pipeline }
it 'produces the expected SQL string' do
# To be removed when the ignored column id_convert_to_bigint for ci_pipelines is removed
# see https://gitlab.com/gitlab-org/gitlab/-/issues/397000
selected_columns =
Ci::Pipeline.column_names.map do |field|
Ci::Pipeline.connection.quote_table_name("#{Ci::Pipeline.table_name}.#{field}")
end.join(', ')
expect(subject.squish).to eq <<~SQL.squish
UPDATE
"ci_pipelines"
SET
"locked" = 0
WHERE
"ci_pipelines"."id" IN
(SELECT
"ci_pipelines"."id"
FROM
"ci_pipelines"
WHERE
"ci_pipelines"."ci_ref_id" = #{ci_ref.id}
AND "ci_pipelines"."locked" = 1
AND "ci_pipelines"."id" < #{before_pipeline.id}
AND "ci_pipelines"."id" NOT IN
(WITH RECURSIVE
"base_and_descendants"
AS
((SELECT
#{selected_columns}
FROM
"ci_pipelines"
WHERE
"ci_pipelines"."id" = #{before_pipeline.id})
UNION
(SELECT
#{selected_columns}
FROM
"ci_pipelines",
"base_and_descendants",
"ci_sources_pipelines"
WHERE
"ci_sources_pipelines"."pipeline_id" = "ci_pipelines"."id"
AND "ci_sources_pipelines"."source_pipeline_id" = "base_and_descendants"."id"
AND "ci_sources_pipelines"."source_project_id" = "ci_sources_pipelines"."project_id"))
SELECT
"id"
FROM
"base_and_descendants"
AS
"ci_pipelines")
LIMIT 1
FOR UPDATE
SKIP LOCKED)
RETURNING ("ci_pipelines"."id")
SQL
end
end
context 'when running on just the ref' do
let(:before_pipeline) { nil }
it 'produces the expected SQL string' do
expect(subject.squish).to eq <<~SQL.squish
UPDATE
"ci_pipelines"
SET
"locked" = 0
WHERE
"ci_pipelines"."id" IN
(SELECT
"ci_pipelines"."id"
FROM
"ci_pipelines"
WHERE
"ci_pipelines"."ci_ref_id" = #{ci_ref.id}
AND "ci_pipelines"."locked" = 1
LIMIT 1
FOR UPDATE
SKIP LOCKED)
RETURNING
("ci_pipelines"."id")
SQL
end
end
end
describe '#unlock_job_artifacts_query' do
subject { described_class.new(pipeline.project, pipeline.user).unlock_job_artifacts_query(pipeline_ids) }
let(:builds_table) { Ci::Build.quoted_table_name }
context 'when given a single pipeline ID' do
let(:pipeline_ids) { [older_pipeline.id] }
it 'produces the expected SQL string' do
expect(subject.squish).to eq <<~SQL.squish
UPDATE
"ci_job_artifacts"
SET
"locked" = 0
WHERE
"ci_job_artifacts"."job_id" IN
(SELECT
#{builds_table}."id"
FROM
#{builds_table}
WHERE
#{builds_table}."type" = 'Ci::Build'
AND #{builds_table}."commit_id" = #{older_pipeline.id})
RETURNING
("ci_job_artifacts"."id")
SQL
end
end
context 'when given multiple pipeline IDs' do
let(:pipeline_ids) { [older_pipeline.id, newer_pipeline.id, pipeline.id] }
it 'produces the expected SQL string' do
expect(subject.squish).to eq <<~SQL.squish
UPDATE
"ci_job_artifacts"
SET
"locked" = 0
WHERE
"ci_job_artifacts"."job_id" IN
(SELECT
#{builds_table}."id"
FROM
#{builds_table}
WHERE
#{builds_table}."type" = 'Ci::Build'
AND #{builds_table}."commit_id" IN (#{pipeline_ids.join(', ')}))
RETURNING
("ci_job_artifacts"."id")
SQL
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
# Cancel a pipelines cancelable jobs and optionally it's child pipelines cancelable jobs
class CancelPipelineService
include Gitlab::OptimisticLocking
include Gitlab::Allowable
##
# @cascade_to_children - if true cancels all related child pipelines for parent child pipelines
# @auto_canceled_by_pipeline - store the pipeline_id of the pipeline that triggered cancellation
# @execute_async - if true cancel the children asyncronously
def initialize(
pipeline:,
current_user:,
cascade_to_children: true,
auto_canceled_by_pipeline: nil,
execute_async: true)
@pipeline = pipeline
@current_user = current_user
@cascade_to_children = cascade_to_children
@auto_canceled_by_pipeline = auto_canceled_by_pipeline
@execute_async = execute_async
end
def execute
return permission_error_response unless can?(current_user, :cancel_pipeline, pipeline)
force_execute
end
# This method should be used only when we want to always cancel the pipeline without
# checking whether the current_user has permissions to do so, or when we don't have
# a current_user available in the context.
def force_execute
return ServiceResponse.error(message: 'No pipeline provided', reason: :no_pipeline) unless pipeline
unless pipeline.cancelable?
return ServiceResponse.error(message: 'Pipeline is not cancelable', reason: :pipeline_not_cancelable)
end
log_pipeline_being_canceled
pipeline.update_column(:auto_canceled_by_id, @auto_canceled_by_pipeline.id) if @auto_canceled_by_pipeline
cancel_jobs(pipeline.cancelable_statuses)
return ServiceResponse.success unless cascade_to_children?
# cancel any bridges that could spin up new child pipelines
cancel_jobs(pipeline.bridges_in_self_and_project_descendants.cancelable)
cancel_children
ServiceResponse.success
end
private
attr_reader :pipeline, :current_user
def log_pipeline_being_canceled
Gitlab::AppJsonLogger.info(
event: 'pipeline_cancel_running',
pipeline_id: pipeline.id,
auto_canceled_by_pipeline_id: @auto_canceled_by_pipeline&.id,
cascade_to_children: cascade_to_children?,
execute_async: execute_async?,
**Gitlab::ApplicationContext.current
)
end
def cascade_to_children?
@cascade_to_children
end
def execute_async?
@execute_async
end
def cancel_jobs(jobs)
retries = 3
retry_lock(jobs, retries, name: 'ci_pipeline_cancel_running') do |jobs_to_cancel|
preloaded_relations = [:project, :pipeline, :deployment, :taggings]
jobs_to_cancel.find_in_batches do |batch|
relation = CommitStatus.id_in(batch)
Preloaders::CommitStatusPreloader.new(relation).execute(preloaded_relations)
relation.each { |job| cancel_job(job) }
end
end
end
def cancel_job(job)
if @auto_canceled_by_pipeline
job.auto_canceled_by_id = @auto_canceled_by_pipeline.id
job.auto_canceled_by_partition_id = @auto_canceled_by_pipeline.partition_id
end
job.cancel
end
def permission_error_response
ServiceResponse.error(
message: 'Insufficient permissions to cancel the pipeline',
reason: :insufficient_permissions
)
end
# For parent child-pipelines only (not multi-project)
def cancel_children
pipeline.all_child_pipelines.each do |child_pipeline|
if execute_async?
::Ci::CancelPipelineWorker.perform_async(
child_pipeline.id,
@auto_canceled_by_pipeline&.id
)
else
# cascade_to_children is false because we iterate through children
# we also cancel bridges prior to prevent more children
self.class.new(
pipeline: child_pipeline.reset,
current_user: nil,
cascade_to_children: false,
execute_async: execute_async?,
auto_canceled_by_pipeline: @auto_canceled_by_pipeline
).force_execute
end
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::CancelPipelineService, :aggregate_failures, feature_category: :continuous_integration do
let_it_be(:project) { create(:project) }
let_it_be(:current_user) { project.owner }
let_it_be(:pipeline) { create(:ci_pipeline, project: project) }
let(:service) do
described_class.new(
pipeline: pipeline,
current_user: current_user,
cascade_to_children: cascade_to_children,
auto_canceled_by_pipeline: auto_canceled_by_pipeline,
execute_async: execute_async)
end
let(:cascade_to_children) { true }
let(:auto_canceled_by_pipeline) { nil }
let(:execute_async) { true }
shared_examples 'force_execute' do
context 'when pipeline is not cancelable' do
it 'returns an error' do
expect(response).to be_error
expect(response.reason).to eq(:pipeline_not_cancelable)
end
end
context 'when pipeline is cancelable' do
before do
create(:ci_build, :running, pipeline: pipeline)
create(:ci_build, :created, pipeline: pipeline)
create(:ci_build, :success, pipeline: pipeline)
end
it 'logs the event' do
allow(Gitlab::AppJsonLogger).to receive(:info)
subject
expect(Gitlab::AppJsonLogger)
.to have_received(:info)
.with(
a_hash_including(
event: 'pipeline_cancel_running',
pipeline_id: pipeline.id,
auto_canceled_by_pipeline_id: nil,
cascade_to_children: true,
execute_async: true
)
)
end
it 'cancels all cancelable jobs' do
expect(response).to be_success
expect(pipeline.all_jobs.pluck(:status)).to match_array(%w[canceled canceled success])
end
context 'when auto_canceled_by_pipeline is provided' do
let(:auto_canceled_by_pipeline) { create(:ci_pipeline) }
it 'updates the pipeline and jobs with it' do
subject
expect(pipeline.auto_canceled_by_id).to eq(auto_canceled_by_pipeline.id)
expect(pipeline.all_jobs.canceled.pluck(:auto_canceled_by_id).uniq)
.to eq([auto_canceled_by_pipeline.id])
expect(pipeline.all_jobs.canceled.pluck(:auto_canceled_by_partition_id).uniq)
.to eq([auto_canceled_by_pipeline.partition_id])
end
end
context 'when pipeline has child pipelines' do
let(:child_pipeline) { create(:ci_pipeline, child_of: pipeline) }
let!(:child_job) { create(:ci_build, :running, pipeline: child_pipeline) }
let(:grandchild_pipeline) { create(:ci_pipeline, child_of: child_pipeline) }
let!(:grandchild_job) { create(:ci_build, :running, pipeline: grandchild_pipeline) }
before do
child_pipeline.source_bridge.update!(status: :running)
grandchild_pipeline.source_bridge.update!(status: :running)
end
context 'when execute_async: false' do
let(:execute_async) { false }
it 'cancels the bridge jobs and child jobs' do
expect(response).to be_success
expect(pipeline.bridges.pluck(:status)).to be_all('canceled')
expect(child_pipeline.bridges.pluck(:status)).to be_all('canceled')
expect(child_job.reload).to be_canceled
expect(grandchild_job.reload).to be_canceled
end
end
context 'when execute_async: true' do
it 'schedules the child pipelines for async cancelation' do
expect(::Ci::CancelPipelineWorker)
.to receive(:perform_async)
.with(child_pipeline.id, nil)
expect(::Ci::CancelPipelineWorker)
.to receive(:perform_async)
.with(grandchild_pipeline.id, nil)
expect(response).to be_success
expect(pipeline.bridges.pluck(:status)).to be_all('canceled')
end
end
context 'when cascade_to_children: false' do
let(:execute_async) { true }
let(:cascade_to_children) { false }
it 'does not cancel child pipelines' do
expect(::Ci::CancelPipelineWorker)
.not_to receive(:perform_async)
expect(response).to be_success
expect(pipeline.bridges.pluck(:status)).to be_all('canceled')
expect(child_job.reload).to be_running
end
end
end
context 'when preloading relations' do
let(:pipeline1) { create(:ci_pipeline, :created) }
let(:pipeline2) { create(:ci_pipeline, :created) }
before do
create(:ci_build, :pending, pipeline: pipeline1)
create(:generic_commit_status, :pending, pipeline: pipeline1)
create(:ci_build, :pending, pipeline: pipeline2)
create(:ci_build, :pending, pipeline: pipeline2)
create(:generic_commit_status, :pending, pipeline: pipeline2)
create(:generic_commit_status, :pending, pipeline: pipeline2)
create(:generic_commit_status, :pending, pipeline: pipeline2)
end
it 'preloads relations for each build to avoid N+1 queries' do
control1 = ActiveRecord::QueryRecorder.new do
described_class.new(pipeline: pipeline1, current_user: current_user).force_execute
end
control2 = ActiveRecord::QueryRecorder.new do
described_class.new(pipeline: pipeline2, current_user: current_user).force_execute
end
extra_update_queries = 4 # transition ... => :canceled, queue pop
extra_generic_commit_status_validation_queries = 2 # name_uniqueness_across_types
expect(control2.count)
.to eq(control1.count + extra_update_queries + extra_generic_commit_status_validation_queries)
end
end
end
end
describe '#execute' do
subject(:response) { service.execute }
it_behaves_like 'force_execute'
context 'when user does not have permissions to cancel the pipeline' do
let(:current_user) { create(:user) }
it 'returns an error when user does not have permissions to cancel pipeline' do
expect(response).to be_error
expect(response.reason).to eq(:insufficient_permissions)
end
end
end
describe '#force_execute' do
subject(:response) { service.force_execute }
it_behaves_like 'force_execute'
context 'when pipeline is not provided' do
let(:pipeline) { nil }
it 'returns an error' do
expect(response).to be_error
expect(response.reason).to eq(:no_pipeline)
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
# This service resets skipped jobs so they can be processed again.
# It affects the jobs that depend on the passed in job parameter.
class ResetSkippedJobsService < ::BaseService
def execute(processables)
@processables = Array.wrap(processables)
@pipeline = @processables.first.pipeline
process_subsequent_jobs
reset_source_bridge
end
private
def process_subsequent_jobs
dependent_jobs.each do |job|
process(job)
end
end
def reset_source_bridge
@pipeline.reset_source_bridge!(current_user)
end
# rubocop: disable CodeReuse/ActiveRecord
def dependent_jobs
ordered_by_dag(
@pipeline.processables
.from_union(needs_dependent_jobs, stage_dependent_jobs)
.skipped
.ordered_by_stage
.preload(:needs)
)
end
def process(job)
Gitlab::OptimisticLocking.retry_lock(job, name: 'ci_requeue_job') do |job|
job.process(current_user)
end
end
def stage_dependent_jobs
# Get all jobs after the earliest stage of the inputted jobs
min_stage_idx = @processables.map(&:stage_idx).min
@pipeline.processables.after_stage(min_stage_idx)
end
def needs_dependent_jobs
# We must include the hierarchy base here because @processables may include both a parent job
# and its dependents, and we do not want to exclude those dependents from being processed.
::Gitlab::Ci::ProcessableObjectHierarchy.new(
::Ci::Processable.where(id: @processables.map(&:id))
).base_and_descendants
end
def ordered_by_dag(jobs)
sorted_job_names = sort_jobs(jobs).each_with_index.to_h
jobs.group_by(&:stage_idx).flat_map do |_, stage_jobs|
stage_jobs.sort_by { |job| sorted_job_names.fetch(job.name) }
end
end
def sort_jobs(jobs)
Gitlab::Ci::YamlProcessor::Dag.order(
jobs.to_h do |job|
[job.name, job.needs.map(&:name)]
end
)
end
# rubocop: enable CodeReuse/ActiveRecord
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::ResetSkippedJobsService, :sidekiq_inline, feature_category: :continuous_integration do
let_it_be(:project) { create(:project, :empty_repo) }
let_it_be(:user) { project.first_owner }
let(:pipeline) do
Ci::CreatePipelineService.new(project, user, { ref: 'master' }).execute(:push).payload
end
let(:a1) { find_job('a1') }
let(:a2) { find_job('a2') }
let(:b1) { find_job('b1') }
let(:input_processables) { a1 } # This is the input used when running service.execute()
before_all do
project.repository.create_file(user, 'init', 'init', message: 'init', branch_name: 'master')
end
subject(:service) { described_class.new(project, user) }
shared_examples 'with a stage-dag mixed pipeline' do
let(:config) do
<<-YAML
stages: [a, b, c]
a1:
stage: a
script: exit $(($RANDOM % 2))
a2:
stage: a
script: exit 0
needs: [a1]
a3:
stage: a
script: exit 0
needs: [a2]
b1:
stage: b
script: exit 0
needs: []
b2:
stage: b
script: exit 0
needs: [a2]
c1:
stage: c
script: exit 0
needs: [b2]
c2:
stage: c
script: exit 0
YAML
end
before do
stub_ci_pipeline_yaml_file(config)
check_jobs_statuses(
a1: 'pending',
a2: 'created',
a3: 'created',
b1: 'pending',
b2: 'created',
c1: 'created',
c2: 'created'
)
b1.success!
check_jobs_statuses(
a1: 'pending',
a2: 'created',
a3: 'created',
b1: 'success',
b2: 'created',
c1: 'created',
c2: 'created'
)
a1.drop!
check_jobs_statuses(
a1: 'failed',
a2: 'skipped',
a3: 'skipped',
b1: 'success',
b2: 'skipped',
c1: 'skipped',
c2: 'skipped'
)
new_a1 = Ci::RetryJobService.new(project, user).clone!(a1)
new_a1.enqueue!
check_jobs_statuses(
a1: 'pending',
a2: 'skipped',
a3: 'skipped',
b1: 'success',
b2: 'skipped',
c1: 'skipped',
c2: 'skipped'
)
end
it 'marks subsequent skipped jobs as processable' do
service.execute(input_processables)
check_jobs_statuses(
a1: 'pending',
a2: 'created',
a3: 'created',
b1: 'success',
b2: 'created',
c1: 'created',
c2: 'created'
)
end
context 'when executed by a different user than the original owner' do
let(:retryer) { create(:user).tap { |u| project.add_maintainer(u) } }
let(:service) { described_class.new(project, retryer) }
it 'reassigns jobs with updated statuses to the retryer' do
expect(jobs_name_status_owner_needs).to contain_exactly(
{ 'name' => 'a1', 'status' => 'pending', 'user_id' => user.id, 'needs' => [] },
{ 'name' => 'a2', 'status' => 'skipped', 'user_id' => user.id, 'needs' => ['a1'] },
{ 'name' => 'a3', 'status' => 'skipped', 'user_id' => user.id, 'needs' => ['a2'] },
{ 'name' => 'b1', 'status' => 'success', 'user_id' => user.id, 'needs' => [] },
{ 'name' => 'b2', 'status' => 'skipped', 'user_id' => user.id, 'needs' => ['a2'] },
{ 'name' => 'c1', 'status' => 'skipped', 'user_id' => user.id, 'needs' => ['b2'] },
{ 'name' => 'c2', 'status' => 'skipped', 'user_id' => user.id, 'needs' => [] }
)
service.execute(input_processables)
expect(jobs_name_status_owner_needs).to contain_exactly(
{ 'name' => 'a1', 'status' => 'pending', 'user_id' => user.id, 'needs' => [] },
{ 'name' => 'a2', 'status' => 'created', 'user_id' => retryer.id, 'needs' => ['a1'] },
{ 'name' => 'a3', 'status' => 'created', 'user_id' => retryer.id, 'needs' => ['a2'] },
{ 'name' => 'b1', 'status' => 'success', 'user_id' => user.id, 'needs' => [] },
{ 'name' => 'b2', 'status' => 'created', 'user_id' => retryer.id, 'needs' => ['a2'] },
{ 'name' => 'c1', 'status' => 'created', 'user_id' => retryer.id, 'needs' => ['b2'] },
{ 'name' => 'c2', 'status' => 'created', 'user_id' => retryer.id, 'needs' => [] }
)
end
end
end
shared_examples 'with stage-dag mixed pipeline with some same-stage needs' do
let(:config) do
<<-YAML
stages: [a, b, c]
a1:
stage: a
script: exit $(($RANDOM % 2))
a2:
stage: a
script: exit 0
needs: [a1]
b1:
stage: b
script: exit 0
needs: [b2]
b2:
stage: b
script: exit 0
c1:
stage: c
script: exit 0
needs: [b2]
c2:
stage: c
script: exit 0
YAML
end
before do
stub_ci_pipeline_yaml_file(config)
check_jobs_statuses(
a1: 'pending',
a2: 'created',
b1: 'created',
b2: 'created',
c1: 'created',
c2: 'created'
)
a1.drop!
check_jobs_statuses(
a1: 'failed',
a2: 'skipped',
b1: 'skipped',
b2: 'skipped',
c1: 'skipped',
c2: 'skipped'
)
new_a1 = Ci::RetryJobService.new(project, user).clone!(a1)
new_a1.enqueue!
check_jobs_statuses(
a1: 'pending',
a2: 'skipped',
b1: 'skipped',
b2: 'skipped',
c1: 'skipped',
c2: 'skipped'
)
end
it 'marks subsequent skipped jobs as processable' do
service.execute(input_processables)
check_jobs_statuses(
a1: 'pending',
a2: 'created',
b1: 'created',
b2: 'created',
c1: 'created',
c2: 'created'
)
end
end
shared_examples 'with same-stage needs' do
let(:config) do
<<-YAML
a1:
script: exit $(($RANDOM % 2))
b1:
script: exit 0
needs: [a1]
c1:
script: exit 0
needs: [b1]
YAML
end
before do
stub_ci_pipeline_yaml_file(config)
check_jobs_statuses(
a1: 'pending',
b1: 'created',
c1: 'created'
)
a1.drop!
check_jobs_statuses(
a1: 'failed',
b1: 'skipped',
c1: 'skipped'
)
new_a1 = Ci::RetryJobService.new(project, user).clone!(a1)
new_a1.enqueue!
check_jobs_statuses(
a1: 'pending',
b1: 'skipped',
c1: 'skipped'
)
end
it 'marks subsequent skipped jobs as processable' do
service.execute(input_processables)
check_jobs_statuses(
a1: 'pending',
b1: 'created',
c1: 'created'
)
end
end
context 'with same-stage needs where the parent jobs do not share the same descendants' do
let(:config) do
<<-YAML
a1:
script: exit $(($RANDOM % 2))
a2:
script: exit $(($RANDOM % 2))
b1:
script: exit 0
needs: [a1]
b2:
script: exit 0
needs: [a2]
c1:
script: exit 0
needs: [b1]
c2:
script: exit 0
needs: [b2]
YAML
end
before do
stub_ci_pipeline_yaml_file(config)
check_jobs_statuses(
a1: 'pending',
a2: 'pending',
b1: 'created',
b2: 'created',
c1: 'created',
c2: 'created'
)
a1.drop!
a2.drop!
check_jobs_statuses(
a1: 'failed',
a2: 'failed',
b1: 'skipped',
b2: 'skipped',
c1: 'skipped',
c2: 'skipped'
)
new_a1 = Ci::RetryJobService.new(project, user).clone!(a1)
new_a1.enqueue!
check_jobs_statuses(
a1: 'pending',
a2: 'failed',
b1: 'skipped',
b2: 'skipped',
c1: 'skipped',
c2: 'skipped'
)
new_a2 = Ci::RetryJobService.new(project, user).clone!(a2)
new_a2.enqueue!
check_jobs_statuses(
a1: 'pending',
a2: 'pending',
b1: 'skipped',
b2: 'skipped',
c1: 'skipped',
c2: 'skipped'
)
end
# This demonstrates that when only a1 is inputted, only the *1 subsequent jobs are reset.
# This is in contrast to the following example when both a1 and a2 are inputted.
it 'marks subsequent skipped jobs as processable' do
service.execute(input_processables)
check_jobs_statuses(
a1: 'pending',
a2: 'pending',
b1: 'created',
b2: 'skipped',
c1: 'created',
c2: 'skipped'
)
end
context 'when multiple processables are inputted' do
# When both a1 and a2 are inputted, all subsequent jobs are reset.
it 'marks subsequent skipped jobs as processable' do
input_processables = [a1, a2]
service.execute(input_processables)
check_jobs_statuses(
a1: 'pending',
a2: 'pending',
b1: 'created',
b2: 'created',
c1: 'created',
c2: 'created'
)
end
end
end
context 'when a single processable is inputted' do
it_behaves_like 'with a stage-dag mixed pipeline'
it_behaves_like 'with stage-dag mixed pipeline with some same-stage needs'
it_behaves_like 'with same-stage needs'
end
context 'when multiple processables are inputted' do
let(:input_processables) { [a1, b1] }
it_behaves_like 'with a stage-dag mixed pipeline'
it_behaves_like 'with stage-dag mixed pipeline with some same-stage needs'
it_behaves_like 'with same-stage needs'
end
private
def find_job(name)
processables.find_by!(name: name)
end
def check_jobs_statuses(statuses)
expect(processables.order(:name).pluck(:name, :status)).to contain_exactly(*statuses.stringify_keys.to_a)
end
def processables
pipeline.processables.latest
end
def jobs_name_status_owner_needs
processables.reload.map do |job|
job.attributes.slice('name', 'status', 'user_id').merge('needs' => job.needs.map(&:name))
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class CopyCrossDatabaseAssociationsService
def execute(old_build, new_build)
ServiceResponse.success
end
end
end
Ci::CopyCrossDatabaseAssociationsService.prepend_mod_with('Ci::CopyCrossDatabaseAssociationsService')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::CopyCrossDatabaseAssociationsService, feature_category: :continuous_integration do
let_it_be(:project) { create(:project) }
let_it_be(:pipeline) { create(:ci_pipeline, project: project) }
let_it_be(:old_build) { create(:ci_build, pipeline: pipeline) }
let_it_be(:new_build) { create(:ci_build, pipeline: pipeline) }
subject(:execute) { described_class.new.execute(old_build, new_build) }
describe '#execute' do
it 'returns a success response' do
expect(execute).to be_success
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class CompareTestReportsService < CompareReportsBaseService
def comparer_class
Gitlab::Ci::Reports::TestReportsComparer
end
def serializer_class
TestReportsComparerSerializer
end
def get_report(pipeline)
pipeline&.test_reports
end
def build_comparer(base_report, head_report)
# We need to load the test failure history on the test comparer because we display
# this on the MR widget
super.tap do |test_reports_comparer|
::Gitlab::Ci::Reports::TestFailureHistory.new(failed_test_cases(test_reports_comparer), project).load!
end
end
def failed_test_cases(test_reports_comparer)
test_reports_comparer.suite_comparers.flat_map do |suite_comparer|
suite_comparer.limited_tests.new_failures + suite_comparer.limited_tests.existing_failures
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::CompareTestReportsService, feature_category: :continuous_integration do
let(:service) { described_class.new(project) }
let(:project) { create(:project, :repository) }
describe '#execute' do
subject(:comparison) { service.execute(base_pipeline, head_pipeline) }
context 'when head pipeline has test reports' do
let!(:base_pipeline) { nil }
let!(:head_pipeline) { create(:ci_pipeline, :with_test_reports, project: project) }
it 'returns status and data' do
expect(comparison[:status]).to eq(:parsed)
expect(comparison[:data]).to match_schema('entities/test_reports_comparer')
end
end
context 'when base and head pipelines have test reports' do
let!(:base_pipeline) { create(:ci_pipeline, :with_test_reports, project: project) }
let!(:head_pipeline) { create(:ci_pipeline, :with_test_reports, project: project) }
it 'returns status and data' do
expect(comparison[:status]).to eq(:parsed)
expect(comparison[:data]).to match_schema('entities/test_reports_comparer')
end
end
context 'when head pipeline has corrupted test reports' do
let!(:base_pipeline) { nil }
let!(:head_pipeline) { create(:ci_pipeline, project: project) }
before do
build = create(:ci_build, pipeline: head_pipeline, project: head_pipeline.project)
create(:ci_job_artifact, :junit_with_corrupted_data, job: build, project: project)
end
it 'returns a parsed TestReports success status and failure on the individual suite' do
expect(comparison[:status]).to eq(:parsed)
expect(comparison.dig(:data, 'status')).to eq('success')
expect(comparison.dig(:data, 'suites', 0, 'status')).to eq('error')
end
end
context 'test failure history' do
let!(:base_pipeline) { nil }
let!(:head_pipeline) { create(:ci_pipeline, :with_test_reports_with_three_failures, project: project) }
let(:new_failures) do
comparison.dig(:data, 'suites', 0, 'new_failures')
end
let(:recent_failures_per_test_case) do
new_failures.map { |f| f['recent_failures'] }
end
# Create test case failure records based on the head pipeline build
before do
stub_const("Gitlab::Ci::Reports::TestSuiteComparer::DEFAULT_MAX_TESTS", 2)
stub_const("Gitlab::Ci::Reports::TestSuiteComparer::DEFAULT_MIN_TESTS", 1)
build = head_pipeline.builds.last
build.update_column(:finished_at, 1.day.ago) # Just to be sure we are included in the report window
# The JUnit fixture for the given build has 3 failures.
# This service will create 1 test case failure record for each.
Ci::TestFailureHistoryService.new(head_pipeline).execute
end
it 'loads recent failures on limited test cases to avoid building up a huge DB query', :aggregate_failures do
expect(comparison[:data]).to match_schema('entities/test_reports_comparer')
expect(recent_failures_per_test_case).to eq(
[
{ 'count' => 1, 'base_branch' => 'master' },
{ 'count' => 1, 'base_branch' => 'master' }
])
expect(new_failures.count).to eq(2)
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class PlayBuildService < ::BaseService
def execute(build, job_variables_attributes = nil)
check_access!(build, job_variables_attributes)
Ci::EnqueueJobService.new(build, current_user: current_user, variables: job_variables_attributes || []).execute
rescue StateMachines::InvalidTransition
retry_build(build.reset)
end
private
def retry_build(build)
Ci::RetryJobService.new(project, current_user).execute(build)[:job]
end
def check_access!(build, job_variables_attributes)
raise Gitlab::Access::AccessDeniedError unless can?(current_user, :play_job, build)
if job_variables_attributes.present? && !can?(current_user, :set_pipeline_variables, project)
raise Gitlab::Access::AccessDeniedError
end
end
end
end
Ci::PlayBuildService.prepend_mod_with('Ci::PlayBuildService')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::PlayBuildService, '#execute', feature_category: :continuous_integration do
let(:user) { create(:user, developer_projects: [project]) }
let(:project) { create(:project) }
let(:pipeline) { create(:ci_pipeline, project: project) }
let(:build) { create(:ci_build, :manual, pipeline: pipeline) }
let(:service) do
described_class.new(project, user)
end
context 'when project does not have repository yet' do
let(:project) { create(:project) }
it 'allows user to play build if protected branch rules are met' do
create(:protected_branch, :developers_can_merge, name: build.ref, project: project)
service.execute(build)
expect(build.reload).to be_pending
end
it 'does not allow user with developer role to play build' do
expect { service.execute(build) }
.to raise_error Gitlab::Access::AccessDeniedError
end
end
context 'when project has repository' do
let(:project) { create(:project, :repository) }
it 'allows user with developer role to play a build' do
service.execute(build)
expect(build.reload).to be_pending
end
it 'prevents a blocked developer from playing a build' do
user.block!
expect { service.execute(build) }.to raise_error(Gitlab::Access::AccessDeniedError)
end
end
context 'when build is a playable manual action' do
let(:build) { create(:ci_build, :manual, pipeline: pipeline) }
let!(:branch) { create(:protected_branch, :developers_can_merge, name: build.ref, project: project) }
it 'enqueues the build' do
expect(service.execute(build)).to eq build
expect(build.reload).to be_pending
end
it 'reassignes build user correctly' do
service.execute(build)
expect(build.reload.user).to eq user
end
context 'when a subsequent job is skipped' do
let!(:job) { create(:ci_build, :skipped, pipeline: pipeline, stage_idx: build.stage_idx + 1) }
it 'marks the subsequent job as processable' do
expect { service.execute(build) }.to change { job.reload.status }.from('skipped').to('created')
end
end
context 'when variables are supplied' do
let(:job_variables) do
[{ key: 'first', secret_value: 'first' },
{ key: 'second', secret_value: 'second' }]
end
subject { service.execute(build, job_variables) }
it 'assigns the variables to the build' do
subject
expect(build.reload.job_variables.map(&:key)).to contain_exactly('first', 'second')
end
context 'and variables are invalid' do
let(:job_variables) { [{}] }
it 'resets the attributes of the build' do
build.update!(job_variables_attributes: [{ key: 'old', value: 'old variable' }])
subject
expect(build.job_variables.map(&:key)).to contain_exactly('old')
end
end
context 'when user defined variables are restricted' do
before do
project.update!(restrict_user_defined_variables: true)
end
context 'when user is maintainer' do
before do
project.add_maintainer(user)
end
it 'assigns the variables to the build' do
subject
expect(build.reload.job_variables.map(&:key)).to contain_exactly('first', 'second')
end
end
context 'when user is developer' do
it 'raises an error' do
expect { subject }.to raise_error Gitlab::Access::AccessDeniedError
end
end
end
end
end
context 'when build is not a playable manual action' do
let(:build) { create(:ci_build, :success, pipeline: pipeline) }
let!(:branch) { create(:protected_branch, :developers_can_merge, name: build.ref, project: project) }
it 'duplicates the build' do
duplicate = service.execute(build)
expect(duplicate).not_to eq build
expect(duplicate).to be_pending
end
it 'assigns users correctly' do
duplicate = service.execute(build)
expect(build.user).not_to eq user
expect(duplicate.user).to eq user
end
context 'and is not retryable' do
let(:build) { create(:ci_build, :deployment_rejected, pipeline: pipeline) }
it 'does not duplicate the build' do
expect { service.execute(build) }.not_to change { Ci::Build.count }
end
it 'does not enqueue the build' do
expect { service.execute(build) }.not_to change { build.status }
end
end
end
context 'when build is not action' do
let(:user) { create(:user) }
let(:build) { create(:ci_build, :success, pipeline: pipeline) }
it 'raises an error' do
expect { service.execute(build) }
.to raise_error Gitlab::Access::AccessDeniedError
end
end
context 'when user does not have ability to trigger action' do
let(:user) { create(:user) }
let!(:branch) { create(:protected_branch, :developers_can_merge, name: build.ref, project: project) }
it 'raises an error' do
expect { service.execute(build) }
.to raise_error Gitlab::Access::AccessDeniedError
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class RetryPipelineService < ::BaseService
include Gitlab::OptimisticLocking
def execute(pipeline)
access_response = check_access(pipeline)
return access_response if access_response.error?
pipeline.ensure_scheduling_type!
builds_relation(pipeline).find_each do |build|
next unless can_be_retried?(build)
Ci::RetryJobService.new(project, current_user).clone!(build)
end
pipeline.processables.latest.skipped.find_each do |skipped|
retry_optimistic_lock(skipped, name: 'ci_retry_pipeline') { |build| build.process(current_user) }
end
pipeline.reset_source_bridge!(current_user)
::MergeRequests::AddTodoWhenBuildFailsService
.new(project: project, current_user: current_user)
.close_all(pipeline)
start_pipeline(pipeline)
ServiceResponse.success
rescue Gitlab::Access::AccessDeniedError => e
ServiceResponse.error(message: e.message, http_status: :forbidden)
end
def check_access(pipeline)
if can?(current_user, :update_pipeline, pipeline)
ServiceResponse.success
else
ServiceResponse.error(message: '403 Forbidden', http_status: :forbidden)
end
end
private
def builds_relation(pipeline)
pipeline.retryable_builds.preload_needs
end
def can_be_retried?(build)
can?(current_user, :update_build, build)
end
def start_pipeline(pipeline)
Ci::PipelineCreation::StartPipelineService.new(pipeline).execute
end
end
end
Ci::RetryPipelineService.prepend_mod_with('Ci::RetryPipelineService')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::RetryPipelineService, '#execute', feature_category: :continuous_integration do
include ProjectForksHelper
let_it_be_with_refind(:user) { create(:user) }
let_it_be_with_refind(:project) { create(:project) }
let(:pipeline) { create(:ci_pipeline, project: project) }
let(:build_stage) { create(:ci_stage, name: 'build', position: 0, pipeline: pipeline) }
let(:test_stage) { create(:ci_stage, name: 'test', position: 1, pipeline: pipeline) }
let(:deploy_stage) { create(:ci_stage, name: 'deploy', position: 2, pipeline: pipeline) }
subject(:service) { described_class.new(project, user) }
context 'when user has full ability to modify pipeline' do
before do
project.add_developer(user)
create(:protected_branch, :developers_can_merge, name: pipeline.ref, project: project)
end
context 'when there are already retried jobs present' do
before do
create_build('rspec', :canceled, build_stage, retried: true)
create_build('rspec', :failed, build_stage)
end
it 'does not retry jobs that has already been retried' do
expect(statuses.first).to be_retried
expect { service.execute(pipeline) }
.to change { CommitStatus.count }.by(1)
end
end
context 'when there are failed builds in the last stage' do
before do
create_build('rspec 1', :success, build_stage)
create_build('rspec 2', :failed, test_stage)
create_build('rspec 3', :canceled, test_stage)
end
it 'enqueues all builds in the last stage' do
service.execute(pipeline)
expect(build('rspec 2')).to be_pending
expect(build('rspec 3')).to be_pending
expect(pipeline.reload).to be_running
end
end
context 'when there are failed or canceled builds in the first stage' do
before do
create_build('rspec 1', :failed, build_stage)
create_build('rspec 2', :canceled, build_stage)
create_build('rspec 3', :canceled, test_stage)
create_build('spinach 1', :canceled, deploy_stage)
end
it 'retries builds failed builds and marks subsequent for processing' do
service.execute(pipeline)
expect(build('rspec 1')).to be_pending
expect(build('rspec 2')).to be_pending
expect(build('rspec 3')).to be_created
expect(build('spinach 1')).to be_created
expect(pipeline.reload).to be_running
end
it 'changes ownership of subsequent builds' do
expect(build('rspec 2').user).not_to eq(user)
expect(build('rspec 3').user).not_to eq(user)
expect(build('spinach 1').user).not_to eq(user)
service.execute(pipeline)
expect(build('rspec 2').user).to eq(user)
expect(build('rspec 3').user).to eq(user)
expect(build('spinach 1').user).to eq(user)
end
end
context 'when there is failed build present which was run on failure' do
before do
create_build('rspec 1', :failed, build_stage)
create_build('rspec 2', :canceled, build_stage)
create_build('rspec 3', :canceled, test_stage)
create_build('report 1', :failed, deploy_stage)
end
it 'retries builds only in the first stage' do
service.execute(pipeline)
expect(build('rspec 1')).to be_pending
expect(build('rspec 2')).to be_pending
expect(build('rspec 3')).to be_created
expect(build('report 1')).to be_created
expect(pipeline.reload).to be_running
end
it 'creates a new job for report job in this case' do
service.execute(pipeline)
expect(statuses.find_by(name: 'report 1', status: 'failed')).to be_retried
end
end
context 'when there is a failed test in a DAG' do
before do
create_build('build', :success, build_stage)
create_build('build2', :success, build_stage)
test_build = create_build('test', :failed, test_stage, scheduling_type: :dag)
create(:ci_build_need, build: test_build, name: 'build')
create(:ci_build_need, build: test_build, name: 'build2')
end
it 'retries the test' do
service.execute(pipeline)
expect(build('build')).to be_success
expect(build('build2')).to be_success
expect(build('test')).to be_pending
expect(build('test').needs.map(&:name)).to match_array(%w[build build2])
end
context 'when there is a failed DAG test without needs' do
before do
create_build('deploy', :failed, deploy_stage, scheduling_type: :dag)
end
it 'retries the test' do
service.execute(pipeline)
expect(build('build')).to be_success
expect(build('build2')).to be_success
expect(build('test')).to be_pending
expect(build('deploy')).to be_pending
end
end
end
context 'when the last stage was skipped' do
before do
create_build('build 1', :success, build_stage)
create_build('test 2', :failed, test_stage)
create_build('report 3', :skipped, deploy_stage)
create_build('report 4', :skipped, deploy_stage)
end
it 'retries builds only in the first stage' do
service.execute(pipeline)
expect(build('build 1')).to be_success
expect(build('test 2')).to be_pending
expect(build('report 3')).to be_created
expect(build('report 4')).to be_created
expect(pipeline.reload).to be_running
end
end
context 'when pipeline contains manual actions' do
context 'when there are optional manual actions only' do
context 'when there is a canceled manual action in first stage' do
before do
create_build('rspec 1', :failed, build_stage)
create_build('staging', :canceled, build_stage, when: :manual, allow_failure: true)
create_build('rspec 2', :canceled, test_stage)
end
it 'retries failed builds and marks subsequent for processing' do
service.execute(pipeline)
expect(build('rspec 1')).to be_pending
expect(build('staging')).to be_manual
expect(build('rspec 2')).to be_created
expect(pipeline.reload).to be_running
end
it 'changes ownership of subsequent builds' do
expect(build('staging').user).not_to eq(user)
expect(build('rspec 2').user).not_to eq(user)
service.execute(pipeline)
expect(build('staging').user).to eq(user)
expect(build('rspec 2').user).to eq(user)
end
end
end
context 'when pipeline has blocking manual actions defined' do
context 'when pipeline retry should enqueue builds' do
before do
create_build('test', :failed, build_stage)
create_build('deploy', :canceled, build_stage, when: :manual, allow_failure: false)
create_build('verify', :canceled, test_stage)
end
it 'retries failed builds' do
service.execute(pipeline)
expect(build('test')).to be_pending
expect(build('deploy')).to be_manual
expect(build('verify')).to be_created
expect(pipeline.reload).to be_running
end
end
context 'when pipeline retry should block pipeline immediately' do
before do
create_build('test', :success, build_stage)
create_build('deploy:1', :success, test_stage, when: :manual, allow_failure: false)
create_build('deploy:2', :failed, test_stage, when: :manual, allow_failure: false)
create_build('verify', :canceled, deploy_stage)
end
it 'reprocesses blocking manual action and blocks pipeline' do
service.execute(pipeline)
expect(build('deploy:1')).to be_success
expect(build('deploy:2')).to be_manual
expect(build('verify')).to be_created
expect(pipeline.reload).to be_blocked
end
end
end
context 'when there is a skipped manual action in last stage' do
before do
create_build('rspec 1', :canceled, build_stage)
create_build('rspec 2', :skipped, build_stage, when: :manual, allow_failure: true)
create_build('staging', :skipped, test_stage, when: :manual, allow_failure: true)
end
it 'retries canceled job and reprocesses manual actions' do
service.execute(pipeline)
expect(build('rspec 1')).to be_pending
expect(build('rspec 2')).to be_manual
expect(build('staging')).to be_created
expect(pipeline.reload).to be_running
end
end
context 'when there is a created manual action in the last stage' do
before do
create_build('rspec 1', :canceled, build_stage)
create_build('staging', :created, test_stage, when: :manual, allow_failure: true)
end
it 'retries canceled job and does not update the manual action' do
service.execute(pipeline)
expect(build('rspec 1')).to be_pending
expect(build('staging')).to be_created
expect(pipeline.reload).to be_running
end
end
context 'when there is a created manual action in the first stage' do
before do
create_build('rspec 1', :canceled, build_stage)
create_build('staging', :created, build_stage, when: :manual, allow_failure: true)
end
it 'retries canceled job and processes the manual action' do
service.execute(pipeline)
expect(build('rspec 1')).to be_pending
expect(build('staging')).to be_manual
expect(pipeline.reload).to be_running
end
end
context 'when there is a failed manual action' do
before do
create_build('rspec', :success, build_stage)
create_build('manual-rspec', :failed, build_stage, when: :manual, allow_failure: true)
end
it 'processes the manual action' do
service.execute(pipeline)
expect(build('rspec')).to be_success
expect(build('manual-rspec')).to be_manual
expect(pipeline.reload).to be_success
end
end
end
it 'closes all todos about failed jobs for pipeline' do
expect(::MergeRequests::AddTodoWhenBuildFailsService)
.to receive_message_chain(:new, :close_all)
service.execute(pipeline)
end
it 'reprocesses the pipeline' do
expect_any_instance_of(Ci::ProcessPipelineService).to receive(:execute)
service.execute(pipeline)
end
context 'when pipeline has processables with nil scheduling_type' do
let!(:build1) { create_build('build1', :success, build_stage) }
let!(:build2) { create_build('build2', :failed, build_stage) }
let!(:build3) { create_build('build3', :failed, test_stage) }
let!(:build3_needs_build1) { create(:ci_build_need, build: build3, name: build1.name) }
before do
statuses.update_all(scheduling_type: nil)
end
it 'populates scheduling_type of processables' do
service.execute(pipeline)
expect(build1.reload.scheduling_type).to eq('stage')
expect(build2.reload.scheduling_type).to eq('stage')
expect(build3.reload.scheduling_type).to eq('dag')
end
end
context 'when the pipeline is a downstream pipeline and the bridge is depended' do
let!(:bridge) { create(:ci_bridge, :strategy_depend, status: 'success') }
before do
create(:ci_sources_pipeline, pipeline: pipeline, source_job: bridge)
end
context 'without permission' do
it 'does nothing to the bridge' do
expect { service.execute(pipeline) }.to not_change { bridge.reload.status }
.and not_change { bridge.reload.user }
end
end
context 'with permission' do
let!(:bridge_pipeline) { create(:ci_pipeline, project: create(:project)) }
let!(:bridge) do
create(:ci_bridge, :strategy_depend, status: 'success', pipeline: bridge_pipeline)
end
before do
bridge_pipeline.project.add_maintainer(user)
end
it 'marks source bridge as pending' do
expect { service.execute(pipeline) }.to change { bridge.reload.status }.to('pending')
end
it 'assigns the current user to the source bridge' do
expect { service.execute(pipeline) }.to change { bridge.reload.user }.to(user)
end
end
end
context 'when there are skipped jobs in later stages' do
before do
create_build('build 1', :success, build_stage)
create_build('test 2', :failed, test_stage)
create_build('report 3', :skipped, deploy_stage)
create_bridge('deploy 4', :skipped, deploy_stage)
end
it 'retries failed jobs and processes skipped jobs' do
service.execute(pipeline)
expect(build('build 1')).to be_success
expect(build('test 2')).to be_pending
expect(build('report 3')).to be_created
expect(build('deploy 4')).to be_created
expect(pipeline.reload).to be_running
end
end
context 'when user is not allowed to retry build' do
before do
build = create(:ci_build, pipeline: pipeline, status: :failed)
allow_next_instance_of(Ci::RetryJobService) do |service|
allow(service).to receive(:can?).with(user, :update_build, build).and_return(false)
end
end
it 'returns an error' do
response = service.execute(pipeline)
expect(response.http_status).to eq(:forbidden)
expect(response.errors).to include('403 Forbidden')
expect(pipeline.reload).not_to be_running
end
end
end
context 'when user is not allowed to retry pipeline' do
it 'returns an error' do
response = service.execute(pipeline)
expect(response.http_status).to eq(:forbidden)
expect(response.errors).to include('403 Forbidden')
expect(pipeline.reload).not_to be_running
end
end
context 'when user is not allowed to trigger manual action' do
before do
project.add_developer(user)
create(:protected_branch, :maintainers_can_push, name: pipeline.ref, project: project)
end
context 'when there is a failed manual action present' do
before do
create_build('test', :failed, build_stage)
create_build('deploy', :failed, build_stage, when: :manual)
create_build('verify', :canceled, test_stage)
end
it 'returns an error' do
response = service.execute(pipeline)
expect(response.http_status).to eq(:forbidden)
expect(response.errors).to include('403 Forbidden')
expect(pipeline.reload).not_to be_running
end
end
context 'when there is a failed manual action in later stage' do
before do
create_build('test', :failed, build_stage)
create_build('deploy', :failed, test_stage, when: :manual)
create_build('verify', :canceled, deploy_stage)
end
it 'returns an error' do
response = service.execute(pipeline)
expect(response.http_status).to eq(:forbidden)
expect(response.errors).to include('403 Forbidden')
expect(pipeline.reload).not_to be_running
end
end
end
context 'when maintainer is allowed to push to forked project' do
let(:user) { create(:user) }
let(:project) { create(:project, :public) }
let(:forked_project) { fork_project(project) }
let(:pipeline) { create(:ci_pipeline, project: forked_project, ref: 'fixes') }
before do
project.add_maintainer(user)
create_build('rspec 1', :failed, test_stage, project: project, ref: pipeline.ref)
allow_any_instance_of(Project).to receive(:empty_repo?).and_return(false)
allow_any_instance_of(Project).to receive(:branch_allows_collaboration?).and_return(true)
end
it 'allows to retry failed pipeline' do
service.execute(pipeline)
expect(build('rspec 1')).to be_pending
expect(pipeline).to be_running
end
end
def statuses
pipeline.reload.statuses
end
# The method name can be confusing because this can actually return both Ci::Build and Ci::Bridge
def build(name)
statuses.latest.find_by(name: name)
end
def create_build(name, status, stage, **opts)
create_processable(:ci_build, name, status, stage, **opts)
end
def create_bridge(name, status, stage, **opts)
create_processable(:ci_bridge, name, status, stage, **opts)
end
def create_processable(type, name, status, stage, **opts)
create(
type,
name: name,
status: status,
ci_stage: stage,
stage_idx: stage.position,
pipeline: pipeline,
**opts
) do |_job|
::Ci::ProcessPipelineService.new(pipeline).execute
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class PipelineScheduleService < BaseService
def execute(schedule)
return unless project.persisted?
# Ensure `next_run_at` is set properly before creating a pipeline.
# Otherwise, multiple pipelines could be created in a short interval.
schedule.schedule_next_run!
RunPipelineScheduleWorker.perform_async(schedule.id, current_user&.id)
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::PipelineScheduleService, feature_category: :continuous_integration do
let_it_be(:user) { create(:user) }
let_it_be(:project) { create(:project) }
let(:service) { described_class.new(project, user) }
describe '#execute' do
subject { service.execute(schedule) }
let_it_be(:schedule) { create(:ci_pipeline_schedule, project: project, owner: user) }
it 'schedules next run' do
expect(schedule).to receive(:schedule_next_run!)
subject
end
it 'runs RunPipelineScheduleWorker' do
expect(RunPipelineScheduleWorker)
.to receive(:perform_async).with(schedule.id, schedule.owner.id)
subject
end
context 'when owner is nil' do
let(:schedule) { create(:ci_pipeline_schedule, project: project, owner: nil) }
it 'does not raise an error' do
expect { subject }.not_to raise_error
end
end
context 'when the project is missing' do
let(:project) { create(:project).tap(&:delete) }
it 'does not raise an exception' do
expect { subject }.not_to raise_error
end
it 'does not run RunPipelineScheduleWorker' do
expect(RunPipelineScheduleWorker)
.not_to receive(:perform_async).with(schedule.id, schedule.owner.id)
subject
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
##
# We call this service everytime we persist a CI/CD job.
#
# In most cases a job should already have a stage assigned, but in cases it
# doesn't have we need to either find existing one or create a brand new
# stage.
#
class EnsureStageService < BaseService
EnsureStageError = Class.new(StandardError)
def execute(build)
@build = build
return if build.stage_id.present?
return if build.invalid?
ensure_stage.tap do |stage|
build.stage_id = stage.id
yield stage if block_given?
end
end
private
def ensure_stage(attempts: 2)
find_stage || create_stage
rescue ActiveRecord::RecordNotUnique
retry if (attempts -= 1) > 0
raise EnsureStageError, <<~EOS
We failed to find or create a unique pipeline stage after 2 retries.
This should never happen and is most likely the result of a bug in
the database load balancing code.
EOS
end
# rubocop: disable CodeReuse/ActiveRecord
def find_stage
@build.pipeline.stages.find_by(name: @build.stage)
end
# rubocop: enable CodeReuse/ActiveRecord
def create_stage
Ci::Stage.create!(
name: @build.stage,
position: @build.stage_idx,
pipeline: @build.pipeline,
project: @build.project
)
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::EnsureStageService, '#execute', feature_category: :continuous_integration do
let_it_be(:project) { create(:project) }
let_it_be(:user) { create(:user) }
let(:stage) { create(:ci_stage) }
let(:job) { build(:ci_build) }
let(:service) { described_class.new(project, user) }
context 'when build has a stage assigned' do
it 'does not create a new stage' do
job.assign_attributes(stage_id: stage.id)
expect { service.execute(job) }.not_to change { Ci::Stage.count }
end
end
context 'when build does not have a stage assigned' do
it 'creates a new stage' do
job.assign_attributes(stage_id: nil, stage: 'test')
expect { service.execute(job) }.to change { Ci::Stage.count }.by(1)
end
end
context 'when build is invalid' do
it 'does not create a new stage' do
job.assign_attributes(stage_id: nil, ref: nil)
expect { service.execute(job) }.not_to change { Ci::Stage.count }
end
end
context 'when new stage can not be created because of an exception' do
before do
allow(Ci::Stage).to receive(:create!)
.and_raise(ActiveRecord::RecordNotUnique.new('Duplicates!'))
end
it 'retries up to two times' do
job.assign_attributes(stage_id: nil)
expect(service).to receive(:find_stage).twice
expect { service.execute(job) }
.to raise_error(Ci::EnsureStageService::EnsureStageError)
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class ChangeVariableService < BaseContainerService
def execute
case params[:action]
when :create
container.variables.create(params[:variable_params])
when :update
variable.tap do |target_variable|
target_variable.update(params[:variable_params].except(:key))
end
when :destroy
variable.tap do |target_variable|
target_variable.destroy
end
end
end
private
def variable
params[:variable] || find_variable
end
def find_variable
identifier = params[:variable_params].slice(:id).presence || params[:variable_params].slice(:key)
container.variables.find_by!(identifier) # rubocop:disable CodeReuse/ActiveRecord
end
end
end
::Ci::ChangeVariableService.prepend_mod_with('Ci::ChangeVariableService')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::ChangeVariableService, feature_category: :secrets_management do
let(:service) { described_class.new(container: group, current_user: user, params: params) }
let_it_be(:user) { create(:user) }
let(:group) { create(:group) }
describe '#execute' do
subject(:execute) { service.execute }
context 'when creating a variable' do
let(:params) { { variable_params: { key: 'new_variable', value: 'variable_value' }, action: :create } }
it 'persists a variable' do
expect { execute }.to change(Ci::GroupVariable, :count).from(0).to(1)
end
end
context 'when updating a variable' do
let!(:variable) { create(:ci_group_variable, value: 'old_value') }
let(:params) { { variable_params: { key: variable.key, value: 'new_value' }, action: :update } }
before do
group.variables << variable
end
it 'updates a variable' do
expect { execute }.to change { variable.reload.value }.from('old_value').to('new_value')
end
context 'when the variable does not exist' do
before do
variable.destroy!
end
it 'raises a record not found error' do
expect { execute }.to raise_error(::ActiveRecord::RecordNotFound)
end
end
end
context 'when destroying a variable' do
let!(:variable) { create(:ci_group_variable) }
let(:params) { { variable_params: { key: variable.key }, action: :destroy } }
before do
group.variables << variable
end
it 'destroys a variable' do
expect { execute }.to change { Ci::GroupVariable.exists?(variable.id) }.from(true).to(false)
end
context 'when the variable does not exist' do
before do
variable.destroy!
end
it 'raises a record not found error' do
expect { execute }.to raise_error(::ActiveRecord::RecordNotFound)
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class DestroyPipelineService < BaseService
def execute(pipeline)
raise Gitlab::Access::AccessDeniedError unless can?(current_user, :destroy_pipeline, pipeline)
Ci::ExpirePipelineCacheService.new.execute(pipeline, delete: true)
# ensure cancellation happens sync so we accumulate compute minutes successfully
# before deleting the pipeline.
::Ci::CancelPipelineService.new(
pipeline: pipeline,
current_user: current_user,
cascade_to_children: true,
execute_async: false).force_execute
# The pipeline, the builds, job and pipeline artifacts all get destroyed here.
# Ci::Pipeline#destroy triggers fast destroy on job_artifacts and
# build_trace_chunks to remove the records and data stored in object storage.
# ci_builds records are deleted using ON DELETE CASCADE from ci_pipelines
#
pipeline.reset.destroy!
ServiceResponse.success(message: 'Pipeline not found')
rescue ActiveRecord::RecordNotFound
ServiceResponse.error(message: 'Pipeline not found')
end
end
end
Ci::DestroyPipelineService.prepend_mod
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe ::Ci::DestroyPipelineService, feature_category: :continuous_integration do
let_it_be(:project) { create(:project, :repository) }
let!(:pipeline) { create(:ci_pipeline, :success, project: project, sha: project.commit.id) }
subject { described_class.new(project, user).execute(pipeline) }
context 'user is owner' do
let(:user) { project.first_owner }
it 'destroys the pipeline' do
subject
expect { pipeline.reload }.to raise_error(ActiveRecord::RecordNotFound)
end
it 'clears the cache', :use_clean_rails_redis_caching do
create(:commit_status, :success, pipeline: pipeline, ref: pipeline.ref)
expect(project.pipeline_status.has_status?).to be_truthy
subject
# We need to reset lazy_latest_pipeline cache to simulate a new request
BatchLoader::Executor.clear_current
# Need to use find to avoid memoization
expect(Project.find(project.id).pipeline_status.has_status?).to be_falsey
end
it 'does not log an audit event' do
expect { subject }.not_to change { AuditEvent.count }
end
context 'when the pipeline has jobs' do
let!(:build) { create(:ci_build, project: project, pipeline: pipeline) }
it 'destroys associated jobs' do
subject
expect { build.reload }.to raise_error(ActiveRecord::RecordNotFound)
end
it 'destroys associated stages' do
stages = pipeline.stages
subject
expect(stages).to all(raise_error(ActiveRecord::RecordNotFound))
end
context 'when job has artifacts' do
let!(:artifact) { create(:ci_job_artifact, :archive, job: build) }
it 'destroys associated artifacts' do
subject
expect { artifact.reload }.to raise_error(ActiveRecord::RecordNotFound)
end
it 'inserts deleted objects for object storage files' do
expect { subject }.to change { Ci::DeletedObject.count }
end
end
context 'when job has trace chunks' do
let(:connection_params) { Gitlab.config.artifacts.object_store.connection.symbolize_keys }
let(:connection) { ::Fog::Storage.new(connection_params) }
before do
stub_object_storage(connection_params: connection_params, remote_directory: 'artifacts')
stub_artifacts_object_storage
end
let!(:trace_chunk) { create(:ci_build_trace_chunk, :fog_with_data, build: build) }
it 'destroys associated trace chunks' do
subject
expect { trace_chunk.reload }.to raise_error(ActiveRecord::RecordNotFound)
end
it 'removes data from object store' do
expect { subject }.to change { Ci::BuildTraceChunks::Fog.new.data(trace_chunk) }
end
end
end
context 'when pipeline is in cancelable state', :sidekiq_inline do
let!(:build) { create(:ci_build, :running, pipeline: pipeline) }
let!(:child_pipeline) { create(:ci_pipeline, :running, child_of: pipeline) }
let!(:child_build) { create(:ci_build, :running, pipeline: child_pipeline) }
it 'cancels the pipelines sync' do
cancel_pipeline_service = instance_double(::Ci::CancelPipelineService)
expect(::Ci::CancelPipelineService)
.to receive(:new)
.with(pipeline: pipeline, current_user: user, cascade_to_children: true, execute_async: false)
.and_return(cancel_pipeline_service)
expect(cancel_pipeline_service).to receive(:force_execute)
subject
end
end
end
context 'user is not owner' do
let(:user) { create(:user) }
it 'raises an exception' do
expect { subject }.to raise_error(Gitlab::Access::AccessDeniedError)
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class AppendBuildTraceService
Result = Struct.new(:status, :stream_size, keyword_init: true)
TraceRangeError = Class.new(StandardError)
attr_reader :build, :params
def initialize(build, params)
@build = build
@params = params
end
def execute(body_data)
# TODO:
# it seems that `Content-Range` as formatted by runner is wrong,
# the `byte_end` should point to final byte, but it points byte+1
# that means that we have to calculate end of body,
# as we cannot use `content_length[1]`
# Issue: https://gitlab.com/gitlab-org/gitlab-runner/issues/3275
content_range = stream_range.split('-')
body_start = content_range[0].to_i
body_end = body_start + body_data.bytesize
if first_debug_chunk?(body_start)
# Update the build metadata prior to appending trace content
build.enable_debug_trace!
end
if trace_size_exceeded?(body_end)
build.drop(:trace_size_exceeded)
return Result.new(status: 403)
end
stream_size = build.trace.append(body_data, body_start)
unless stream_size == body_end
log_range_error(stream_size, body_end)
return Result.new(status: 416, stream_size: stream_size)
end
Result.new(status: 202, stream_size: stream_size)
end
private
delegate :project, to: :build
def first_debug_chunk?(body_start)
body_start == 0 && debug_trace
end
def stream_range
params.fetch(:content_range)
end
def debug_trace
params.fetch(:debug_trace, false)
end
def log_range_error(stream_size, body_end)
extra = {
build_id: build.id,
body_end: body_end,
stream_size: stream_size,
stream_class: stream_size.class,
stream_range: stream_range
}
build.trace_chunks.last.try do |chunk|
extra.merge!(
chunk_index: chunk.chunk_index,
chunk_store: chunk.data_store,
chunks_count: build.trace_chunks.count
)
end
::Gitlab::ErrorTracking
.log_exception(TraceRangeError.new, extra)
end
def trace_size_exceeded?(size)
project.actual_limits.exceeded?(:ci_jobs_trace_size_limit, size / 1.megabyte)
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::AppendBuildTraceService, feature_category: :continuous_integration do
let_it_be(:project) { create(:project) }
let_it_be(:pipeline) { create(:ci_pipeline, project: project) }
let_it_be_with_reload(:build) { create(:ci_build, :running, pipeline: pipeline) }
before do
stub_feature_flags(ci_enable_live_trace: true)
end
context 'build trace append is successful' do
it 'returns a correct stream size and status code' do
stream_size = 192.kilobytes
body_data = 'x' * stream_size
content_range = "0-#{stream_size}"
result = described_class
.new(build, content_range: content_range)
.execute(body_data)
expect(result.status).to eq 202
expect(result.stream_size).to eq stream_size
expect(build.trace_chunks.count).to eq 2
end
end
context 'when could not correctly append to a trace' do
it 'responds with content range violation and data stored' do
allow(build).to receive_message_chain(:trace, :append) { 16 }
result = described_class
.new(build, content_range: '0-128')
.execute('x' * 128)
expect(result.status).to eq 416
expect(result.stream_size).to eq 16
end
it 'logs exception if build has live trace' do
build.trace.append('abcd', 0)
expect(::Gitlab::ErrorTracking)
.to receive(:log_exception)
.with(anything, hash_including(chunk_index: 0, chunk_store: 'redis_trace_chunks'))
result = described_class
.new(build, content_range: '0-128')
.execute('x' * 128)
expect(result.status).to eq 416
expect(result.stream_size).to eq 4
end
end
context 'when the trace size is exceeded' do
before do
project.actual_limits.update!(ci_jobs_trace_size_limit: 1)
end
it 'returns 403 status code' do
stream_size = 1.25.megabytes
body_data = 'x' * stream_size
content_range = "0-#{stream_size}"
result = described_class
.new(build, content_range: content_range)
.execute(body_data)
expect(result.status).to eq 403
expect(result.stream_size).to be_nil
expect(build.trace_chunks.count).to eq 0
expect(build.reload).to be_failed
expect(build.failure_reason).to eq 'trace_size_exceeded'
end
end
context 'when debug_trace param is provided' do
let(:metadata) { Ci::BuildMetadata.find_by(build_id: build) }
let(:stream_size) { 192.kilobytes }
let(:body_data) { 'x' * stream_size }
let(:content_range) { "#{body_start}-#{stream_size}" }
context 'when sending the first trace' do
let(:body_start) { 0 }
it 'updates build metadata debug_trace_enabled' do
described_class
.new(build, content_range: content_range, debug_trace: true)
.execute(body_data)
expect(metadata.debug_trace_enabled).to be(true)
end
end
context 'when sending the second trace' do
let(:body_start) { 1 }
it 'does not update build metadata debug_trace_enabled', :aggregate_failures do
query_recorder = ActiveRecord::QueryRecorder.new do
described_class.new(build, content_range: content_range, debug_trace: true).execute(body_data)
end
expect(metadata.debug_trace_enabled).to be(false)
expect(query_recorder.log).not_to include(/p_ci_builds_metadata/)
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class UpdateBuildQueueService
InvalidQueueTransition = Class.new(StandardError)
attr_reader :metrics
def initialize(metrics = ::Gitlab::Ci::Queue::Metrics)
@metrics = metrics
end
##
# Add a build to the pending builds queue
#
def push(build, transition)
raise InvalidQueueTransition unless transition.to == 'pending'
transition.within_transaction do
result = build.create_queuing_entry!
unless result.empty?
metrics.increment_queue_operation(:build_queue_push)
result.rows.dig(0, 0)
end
end
end
##
# Remove a build from the pending builds queue
#
def pop(build, transition)
raise InvalidQueueTransition unless transition.from == 'pending'
transition.within_transaction { remove!(build) }
end
##
# Force remove build from the queue, without checking a transition state
#
def remove!(build)
removed = build.all_queuing_entries.delete_all
if removed > 0
metrics.increment_queue_operation(:build_queue_pop)
build.id
end
end
##
# Add shared runner build tracking entry (used for queuing).
#
def track(build, transition)
return unless build.shared_runner_build?
raise InvalidQueueTransition unless transition.to == 'running'
transition.within_transaction do
result = ::Ci::RunningBuild.upsert_shared_runner_build!(build)
unless result.empty?
metrics.increment_queue_operation(:shared_runner_build_new)
result.rows.dig(0, 0)
end
end
end
##
# Remove a runtime build tracking entry for a shared runner build (used for
# queuing).
#
def untrack(build, transition)
return unless build.shared_runner_build?
raise InvalidQueueTransition unless transition.from == 'running'
transition.within_transaction do
removed = build.all_runtime_metadata.delete_all
if removed > 0
metrics.increment_queue_operation(:shared_runner_build_done)
build.id
end
end
end
##
# Unblock runner associated with given project / build
#
def tick(build)
tick_for(build, build.project.all_available_runners)
end
private
def tick_for(build, runners)
runners = runners.with_recent_runner_queue
runners = runners.with_tags
metrics.observe_active_runners(-> { runners.to_a.size })
runners.each do |runner|
metrics.increment_runner_tick(runner)
runner.pick_build!(build)
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::UpdateBuildQueueService, feature_category: :continuous_integration do
let(:project) { create(:project, :repository) }
let(:pipeline) { create(:ci_pipeline, project: project) }
let(:build) { create(:ci_build, pipeline: pipeline) }
describe 'pending builds queue push / pop' do
describe '#push' do
let(:transition) { double('transition') }
before do
allow(transition).to receive(:to).and_return('pending')
allow(transition).to receive(:within_transaction).and_yield
end
context 'when pending build can be created' do
it 'creates a new pending build in transaction' do
queued = subject.push(build, transition)
expect(queued).to eq build.id
end
it 'increments queue push metric' do
metrics = spy('metrics')
described_class.new(metrics).push(build, transition)
expect(metrics)
.to have_received(:increment_queue_operation)
.with(:build_queue_push)
end
end
context 'when invalid transition is detected' do
it 'raises an error' do
allow(transition).to receive(:to).and_return('created')
expect { subject.push(build, transition) }
.to raise_error(described_class::InvalidQueueTransition)
end
end
context 'when duplicate entry exists' do
before do
create(:ci_pending_build, build: build, project: build.project)
end
it 'does nothing and returns build id' do
queued = subject.push(build, transition)
expect(queued).to eq build.id
end
end
end
describe '#pop' do
let(:transition) { double('transition') }
before do
allow(transition).to receive(:from).and_return('pending')
allow(transition).to receive(:within_transaction).and_yield
end
context 'when pending build exists' do
before do
create(:ci_pending_build, build: build, project: build.project)
end
it 'removes pending build in a transaction' do
dequeued = subject.pop(build, transition)
expect(dequeued).to eq build.id
end
it 'increments queue pop metric' do
metrics = spy('metrics')
described_class.new(metrics).pop(build, transition)
expect(metrics)
.to have_received(:increment_queue_operation)
.with(:build_queue_pop)
end
end
context 'when pending build does not exist' do
it 'does nothing if there is no pending build to remove' do
dequeued = subject.pop(build, transition)
expect(dequeued).to be_nil
end
end
context 'when invalid transition is detected' do
it 'raises an error' do
allow(transition).to receive(:from).and_return('created')
expect { subject.pop(build, transition) }
.to raise_error(described_class::InvalidQueueTransition)
end
end
end
describe '#remove!' do
context 'when pending build exists' do
before do
create(:ci_pending_build, build: build, project: build.project)
end
it 'removes pending build in a transaction' do
dequeued = subject.remove!(build)
expect(dequeued).to eq build.id
end
end
context 'when pending build does not exist' do
it 'does nothing if there is no pending build to remove' do
dequeued = subject.remove!(build)
expect(dequeued).to be_nil
end
end
end
end
describe 'shared runner builds tracking' do
let(:runner) { create(:ci_runner, :instance_type) }
let(:build) { create(:ci_build, runner: runner, pipeline: pipeline) }
describe '#track' do
let(:transition) { double('transition') }
before do
allow(transition).to receive(:to).and_return('running')
allow(transition).to receive(:within_transaction).and_yield
end
context 'when a shared runner build can be tracked' do
it 'creates a new shared runner build tracking entry' do
build_id = subject.track(build, transition)
expect(build_id).to eq build.id
end
it 'increments new shared runner build metric' do
metrics = spy('metrics')
described_class.new(metrics).track(build, transition)
expect(metrics)
.to have_received(:increment_queue_operation)
.with(:shared_runner_build_new)
end
end
context 'when invalid transition is detected' do
it 'raises an error' do
allow(transition).to receive(:to).and_return('pending')
expect { subject.track(build, transition) }
.to raise_error(described_class::InvalidQueueTransition)
end
end
context 'when duplicate entry exists' do
before do
create(:ci_running_build, build: build, project: project, runner: runner)
end
it 'does nothing and returns build id' do
build_id = subject.track(build, transition)
expect(build_id).to eq build.id
end
end
end
describe '#untrack' do
let(:transition) { double('transition') }
before do
allow(transition).to receive(:from).and_return('running')
allow(transition).to receive(:within_transaction).and_yield
end
context 'when shared runner build tracking entry exists' do
before do
create(:ci_running_build, build: build, project: project, runner: runner)
end
it 'removes shared runner build' do
build_id = subject.untrack(build, transition)
expect(build_id).to eq build.id
end
it 'increments shared runner build done metric' do
metrics = spy('metrics')
described_class.new(metrics).untrack(build, transition)
expect(metrics)
.to have_received(:increment_queue_operation)
.with(:shared_runner_build_done)
end
end
context 'when tracking entry does not exist' do
it 'does nothing if there is no tracking entry to remove' do
build_id = subject.untrack(build, transition)
expect(build_id).to be_nil
end
end
context 'when invalid transition is detected' do
it 'raises an error' do
allow(transition).to receive(:from).and_return('pending')
expect { subject.untrack(build, transition) }
.to raise_error(described_class::InvalidQueueTransition)
end
end
end
end
describe '#tick' do
shared_examples 'refreshes runner' do
it 'ticks runner queue value' do
expect { subject.tick(build) }.to change { runner.ensure_runner_queue_value }
end
end
shared_examples 'does not refresh runner' do
it 'ticks runner queue value' do
expect { subject.tick(build) }.not_to change { runner.ensure_runner_queue_value }
end
end
shared_examples 'matching build' do
context 'when there is a online runner that can pick build' do
before do
runner.update!(contacted_at: 30.minutes.ago)
end
it_behaves_like 'refreshes runner'
it 'avoids running redundant queries' do
expect(Ci::Runner).not_to receive(:owned_or_instance_wide)
subject.tick(build)
end
end
end
shared_examples 'mismatching tags' do
context 'when there is no runner that can pick build due to tag mismatch' do
before do
build.tag_list = [:docker]
end
it_behaves_like 'does not refresh runner'
end
end
shared_examples 'recent runner queue' do
context 'when there is runner with expired cache' do
before do
runner.update!(contacted_at: Ci::Runner.recent_queue_deadline)
end
it_behaves_like 'does not refresh runner'
end
end
context 'when updating project runners' do
let(:runner) { create(:ci_runner, :project, projects: [project]) }
it_behaves_like 'matching build'
it_behaves_like 'mismatching tags'
it_behaves_like 'recent runner queue'
context 'when the runner is assigned to another project' do
let(:another_project) { create(:project) }
let(:runner) { create(:ci_runner, :project, projects: [another_project]) }
it_behaves_like 'does not refresh runner'
end
end
context 'when updating shared runners' do
let(:runner) { create(:ci_runner, :instance) }
it_behaves_like 'matching build'
it_behaves_like 'mismatching tags'
it_behaves_like 'recent runner queue'
context 'when there is no runner that can pick build due to being disabled on project' do
before do
build.project.shared_runners_enabled = false
end
it_behaves_like 'does not refresh runner'
end
end
context 'when updating group runners' do
let(:group) { create(:group) }
let(:project) { create(:project, group: group) }
let(:runner) { create(:ci_runner, :group, groups: [group]) }
it_behaves_like 'matching build'
it_behaves_like 'mismatching tags'
it_behaves_like 'recent runner queue'
context 'when there is no runner that can pick build due to being disabled on project' do
before do
build.project.group_runners_enabled = false
end
it_behaves_like 'does not refresh runner'
end
end
context 'avoids N+1 queries', :request_store do
let!(:build) { create(:ci_build, pipeline: pipeline, tag_list: %w[a b]) }
let!(:project_runner) { create(:ci_runner, :project, :online, projects: [project], tag_list: %w[a b c]) }
it 'does execute the same amount of queries regardless of number of runners' do
control_count = ActiveRecord::QueryRecorder.new { subject.tick(build) }.count
create_list(:ci_runner, 10, :project, :online, projects: [project], tag_list: %w[b c d])
expect { subject.tick(build) }.not_to exceed_all_query_limit(control_count)
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
# TODO: when using this class with exposed artifacts we see that there are
# 2 responsibilities:
# 1. reactive caching interface (same in all cases)
# 2. data generator (report comparison in most of the case but not always)
# issue: https://gitlab.com/gitlab-org/gitlab/issues/34224
class CompareReportsBaseService < ::BaseService
def execute(base_pipeline, head_pipeline)
return parsing_payload(base_pipeline, head_pipeline) if base_pipeline&.running?
base_report = get_report(base_pipeline)
head_report = get_report(head_pipeline)
comparer = build_comparer(base_report, head_report)
{
status: :parsed,
key: key(base_pipeline, head_pipeline),
data: serializer_class
.new(**serializer_params)
.represent(comparer).as_json
}
rescue Gitlab::Ci::Parsers::ParserError => e
{
status: :error,
key: key(base_pipeline, head_pipeline),
status_reason: e.message
}
end
def latest?(base_pipeline, head_pipeline, data)
data&.fetch(:key, nil) == key(base_pipeline, head_pipeline)
end
protected
def parsing_payload(base_pipeline, head_pipeline)
{
status: :parsing,
key: key(base_pipeline, head_pipeline)
}
end
def build_comparer(base_report, head_report)
comparer_class.new(base_report, head_report)
end
private
def key(base_pipeline, head_pipeline)
[
base_pipeline&.id, base_pipeline&.updated_at,
head_pipeline&.id, head_pipeline&.updated_at
]
end
def comparer_class
raise NotImplementedError
end
def serializer_class
raise NotImplementedError
end
def serializer_params
{ project: project, current_user: current_user }
end
def get_report(pipeline)
raise NotImplementedError
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::CompareReportsBaseService, feature_category: :continuous_integration do
let(:service) { described_class.new(project) }
let(:project) { create(:project, :repository) }
let!(:base_pipeline) { nil }
let!(:head_pipeline) { create(:ci_pipeline, :with_test_reports, project: project) }
let!(:key) { service.send(:key, base_pipeline, head_pipeline) }
describe '#latest?' do
subject { service.latest?(base_pipeline, head_pipeline, data) }
context 'when cache key is latest' do
let(:data) { { key: key } }
it { is_expected.to be_truthy }
end
context 'when cache key is outdated' do
before do
head_pipeline.update_column(:updated_at, 10.minutes.ago)
end
let(:data) { { key: key } }
it { is_expected.to be_falsy }
end
context 'when cache key is empty' do
let(:data) { { key: nil } }
it { is_expected.to be_falsy }
end
end
describe '#execute' do
context 'when base_pipeline is running' do
let!(:base_pipeline) { create(:ci_pipeline, :running, project: project) }
subject { service.execute(base_pipeline, head_pipeline) }
it { is_expected.to eq(status: :parsing, key: key) }
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class CreateCommitStatusService < BaseService
include ::Gitlab::ExclusiveLeaseHelpers
include ::Gitlab::Utils::StrongMemoize
include ::Services::ReturnServiceResponses
delegate :sha, to: :commit
def execute(optional_commit_status_params:)
in_lock(pipeline_lock_key, **pipeline_lock_params) do
@optional_commit_status_params = optional_commit_status_params
unsafe_execute
end
end
private
attr_reader :pipeline, :stage, :commit_status, :optional_commit_status_params
def unsafe_execute
return not_found('Commit') if commit.blank?
return bad_request('State is required') if params[:state].blank?
return not_found('References for commit') if ref.blank?
@pipeline = first_matching_pipeline || create_pipeline
return forbidden unless ::Ability.allowed?(current_user, :update_pipeline, pipeline)
@stage = find_or_create_external_stage
@commit_status = find_or_build_external_commit_status
return bad_request(commit_status.errors.messages) if commit_status.invalid?
response = add_or_update_external_job
return bad_request(response.message) if response.error?
update_merge_request_head_pipeline
response
end
def ref
params[:ref] || first_matching_pipeline&.ref ||
repository.branch_names_contains(sha).first
end
strong_memoize_attr :ref
def commit
project.commit(params[:sha])
end
strong_memoize_attr :commit
def first_matching_pipeline
pipelines = project.ci_pipelines.newest_first(sha: sha)
pipelines = pipelines.for_ref(params[:ref]) if params[:ref]
pipelines = pipelines.id_in(params[:pipeline_id]) if params[:pipeline_id]
pipelines.first
end
strong_memoize_attr :first_matching_pipeline
def name
params[:name] || params[:context] || 'default'
end
def create_pipeline
project.ci_pipelines.build(
source: :external,
sha: sha,
ref: ref,
user: current_user,
protected: project.protected_for?(ref)
).tap do |new_pipeline|
new_pipeline.ensure_project_iid!
new_pipeline.save!
end
end
def find_or_create_external_stage
pipeline.stages.safe_find_or_create_by!(name: 'external') do |stage| # rubocop:disable Performance/ActiveRecordSubtransactionMethods
stage.position = ::GenericCommitStatus::EXTERNAL_STAGE_IDX
stage.project = project
end
end
def find_or_build_external_commit_status
::GenericCommitStatus.running_or_pending.find_or_initialize_by( # rubocop:disable CodeReuse/ActiveRecord
project: project,
pipeline: pipeline,
name: name,
ref: ref,
user: current_user,
protected: project.protected_for?(ref),
ci_stage: stage,
stage_idx: stage.position,
stage: 'external'
).tap do |new_commit_status|
new_commit_status.assign_attributes(optional_commit_status_params)
end
end
def add_or_update_external_job
::Ci::Pipelines::AddJobService.new(pipeline).execute!(commit_status) do |job|
apply_job_state!(job)
end
end
def update_merge_request_head_pipeline
return unless pipeline.latest?
::MergeRequest
.from_project(project).from_source_branches(ref)
.update_all(head_pipeline_id: pipeline.id)
end
def apply_job_state!(job)
case params[:state]
when 'pending'
job.enqueue!
when 'running'
job.enqueue
job.run!
when 'success'
job.success!
when 'failed'
job.drop!(:api_failure)
when 'canceled'
job.cancel!
else
raise('invalid state')
end
end
def pipeline_lock_key
"api:commit_statuses:project:#{project.id}:sha:#{params[:sha]}"
end
def pipeline_lock_params
{
ttl: 5.seconds,
sleep_sec: 0.1.seconds,
retries: 20
}
end
def not_found(message)
error("404 #{message} Not Found", :not_found)
end
def bad_request(message)
error(message, :bad_request)
end
def forbidden
error("403 Forbidden", :forbidden)
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::CreateCommitStatusService, :clean_gitlab_redis_cache, feature_category: :continuous_integration do
using RSpec::Parameterized::TableSyntax
subject(:response) { execute_service(params) }
let_it_be_with_refind(:project) { create(:project, :repository) }
let_it_be(:commit) { project.repository.commit }
let_it_be(:guest) { create_user(:guest) }
let_it_be(:reporter) { create_user(:reporter) }
let_it_be(:developer) { create_user(:developer) }
let(:user) { developer }
let(:sha) { commit.id }
let(:params) { { state: 'pending' } }
let(:job) { response.payload[:job] }
%w[pending running success failed canceled].each do |status|
context "for #{status}" do
let(:params) { { state: status } }
context 'when pipeline for sha does not exists' do
it 'creates commit status and sets pipeline iid' do
expect(response).to be_success
expect(job.sha).to eq(commit.id)
expect(job.status).to eq(status)
expect(job.name).to eq('default')
expect(job.ref).not_to be_empty
expect(job.target_url).to be_nil
expect(job.description).to be_nil
expect(job.pipeline_id).not_to be_nil
expect(CommitStatus.find(job.id)).to be_api_failure if status == 'failed'
expect(::Ci::Pipeline.last.iid).not_to be_nil
end
end
end
end
context 'when status transitions from pending' do
before do
execute_service(state: 'pending')
end
%w[running success failed canceled].each do |status|
context "for #{status}" do
let(:params) { { state: status } }
it "changes to #{status}" do
expect { response }
.to not_change { ::Ci::Pipeline.count }.from(1)
.and not_change { ::Ci::Stage.count }.from(1)
.and not_change { ::CommitStatus.count }.from(1)
expect(response).to be_success
expect(job.status).to eq(status)
end
end
end
context 'for invalid transition' do
let(:params) { { state: 'pending' } }
it 'returns bad request and error message' do
expect { response }
.to not_change { ::Ci::Pipeline.count }.from(1)
.and not_change { ::Ci::Stage.count }.from(1)
.and not_change { ::CommitStatus.count }.from(1)
expect(response).to be_error
expect(response.http_status).to eq(:bad_request)
expect(response.message).to eq(
"Cannot transition status via :enqueue from :pending (Reason(s): Status cannot transition via \"enqueue\")"
)
end
end
end
context 'with all optional parameters' do
context 'when creating a commit status' do
let(:params) do
{
sha: sha,
state: 'success',
context: 'coverage',
ref: 'master',
description: 'test',
coverage: 80.0,
target_url: 'http://gitlab.com/status'
}
end
it 'creates commit status' do
expect { response }
.to change { ::Ci::Pipeline.count }.by(1)
.and change { ::Ci::Stage.count }.by(1)
.and change { ::CommitStatus.count }.by(1)
expect(response).to be_success
expect(job.sha).to eq(commit.id)
expect(job.status).to eq('success')
expect(job.name).to eq('coverage')
expect(job.ref).to eq('master')
expect(job.coverage).to eq(80.0)
expect(job.description).to eq('test')
expect(job.target_url).to eq('http://gitlab.com/status')
end
context 'when merge request exists for given branch' do
let!(:merge_request) do
create(:merge_request, source_project: project, source_branch: 'master', target_branch: 'develop')
end
it 'sets head pipeline' do
expect { response }
.to change { ::Ci::Pipeline.count }.by(1)
.and change { ::Ci::Stage.count }.by(1)
.and change { ::CommitStatus.count }.by(1)
expect(response).to be_success
expect(merge_request.reload.head_pipeline).not_to be_nil
end
end
end
context 'when updating a commit status' do
let(:parameters) do
{
state: 'success',
name: 'coverage',
ref: 'master'
}
end
let(:updatable_optional_attributes) do
{
description: 'new description',
coverage: 90.0
}
end
let(:params) { parameters.merge(updatable_optional_attributes) }
# creating the initial commit status
before do
execute_service(
sha: sha,
state: 'running',
context: 'coverage',
ref: 'master',
description: 'coverage test',
coverage: 10.0,
target_url: 'http://gitlab.com/status'
)
end
it 'updates a commit status' do
expect { response }
.to not_change { ::Ci::Pipeline.count }.from(1)
.and not_change { ::Ci::Stage.count }.from(1)
.and not_change { ::CommitStatus.count }.from(1)
expect(response).to be_success
expect(job.sha).to eq(commit.id)
expect(job.status).to eq('success')
expect(job.name).to eq('coverage')
expect(job.ref).to eq('master')
expect(job.coverage).to eq(90.0)
expect(job.description).to eq('new description')
expect(job.target_url).to eq('http://gitlab.com/status')
end
context 'when the `state` parameter is sent the same' do
let(:parameters) do
{
sha: sha,
state: 'running',
name: 'coverage',
ref: 'master'
}
end
it 'does not update the commit status' do
expect { response }
.to not_change { ::Ci::Pipeline.count }.from(1)
.and not_change { ::Ci::Stage.count }.from(1)
.and not_change { ::CommitStatus.count }.from(1)
expect(response).to be_error
expect(response.http_status).to eq(:bad_request)
expect(response.message).to eq(
"Cannot transition status via :run from :running (Reason(s): Status cannot transition via \"run\")"
)
commit_status = project.commit_statuses.find_by!(name: 'coverage')
expect(commit_status.description).to eq('coverage test')
expect(commit_status.coverage).to eq(10.0)
end
end
end
context 'when a pipeline id is specified' do
let!(:first_pipeline) do
project.ci_pipelines.build(source: :push, sha: commit.id, ref: 'master', status: 'created').tap do |p|
p.ensure_project_iid! # Necessary to avoid cross-database modification error
p.save!
end
end
let!(:other_pipeline) do
project.ci_pipelines.build(source: :push, sha: commit.id, ref: 'master', status: 'created').tap do |p|
p.ensure_project_iid! # Necessary to avoid cross-database modification error
p.save!
end
end
let(:params) do
{
sha: sha,
pipeline_id: other_pipeline.id,
state: 'success',
ref: 'master'
}
end
it 'update the correct pipeline', :sidekiq_might_not_need_inline do
expect { response }
.to not_change { ::Ci::Pipeline.count }.from(2)
.and change { ::Ci::Stage.count }.by(1)
.and change { ::CommitStatus.count }.by(1)
expect(first_pipeline.reload.status).to eq('created')
expect(other_pipeline.reload.status).to eq('success')
end
end
end
context 'when retrying a commit status' do
subject(:response) do
execute_service(state: 'failed', name: 'test', ref: 'master')
execute_service(state: 'success', name: 'test', ref: 'master')
end
it 'correctly posts a new commit status' do
expect { response }
.to change { ::Ci::Pipeline.count }.by(1)
.and change { ::Ci::Stage.count }.by(1)
.and change { ::CommitStatus.count }.by(2)
expect(response).to be_success
expect(job.sha).to eq(commit.id)
expect(job.status).to eq('success')
end
it 'retries the commit status', :sidekiq_might_not_need_inline do
response
expect(CommitStatus.count).to eq 2
expect(CommitStatus.first).to be_retried
expect(CommitStatus.last.pipeline).to be_success
end
end
context 'when status is invalid' do
let(:params) { { state: 'invalid' } }
it 'does not create commit status' do
expect { response }
.to change { ::Ci::Pipeline.count }.by(1)
.and change { ::Ci::Stage.count }.by(1)
.and not_change { ::CommitStatus.count }.from(0)
expect(response).to be_error
expect(response.http_status).to eq(:bad_request)
expect(response.message).to eq('invalid state')
end
end
context 'when request without a state made' do
let(:params) { {} }
it 'does not create commit status' do
expect { response }
.to not_change { ::Ci::Pipeline.count }.from(0)
.and not_change { ::Ci::Stage.count }.from(0)
.and not_change { ::CommitStatus.count }.from(0)
expect(response).to be_error
expect(response.http_status).to eq(:bad_request)
expect(response.message).to eq('State is required')
end
end
context 'when updating a protected ref' do
let(:params) { { state: 'running', ref: 'master' } }
before do
create(:protected_branch, project: project, name: 'master')
end
context 'with user as developer' do
let(:user) { developer }
it 'does not create commit status' do
expect { response }
.to change { ::Ci::Pipeline.count }.by(1)
.and not_change { ::Ci::Stage.count }.from(0)
.and not_change { ::CommitStatus.count }.from(0)
expect(response).to be_error
expect(response.http_status).to eq(:forbidden)
expect(response.message).to eq('403 Forbidden')
end
end
context 'with user as maintainer' do
let(:user) { create_user(:maintainer) }
it 'creates commit status' do
expect { response }
.to change { ::Ci::Pipeline.count }.by(1)
.and change { ::Ci::Stage.count }.by(1)
.and change { ::CommitStatus.count }.by(1)
expect(response).to be_success
end
end
end
context 'when commit SHA is invalid' do
let(:sha) { 'invalid_sha' }
let(:params) { { state: 'running', sha: sha } }
it 'returns not found error' do
expect { response }
.to not_change { ::Ci::Pipeline.count }.from(0)
.and not_change { ::Ci::Stage.count }.from(0)
.and not_change { ::CommitStatus.count }.from(0)
expect(response).to be_error
expect(response.http_status).to eq(:not_found)
expect(response.message).to eq('404 Commit Not Found')
end
end
context 'when target URL is an invalid address' do
let(:params) { { state: 'pending', target_url: 'invalid url' } }
it 'responds with bad request status and validation errors' do
expect { response }
.to change { ::Ci::Pipeline.count }.by(1)
.and change { ::Ci::Stage.count }.by(1)
.and not_change { ::CommitStatus.count }.from(0)
expect(response).to be_error
expect(response.http_status).to eq(:bad_request)
expect(response.message[:target_url])
.to include 'is blocked: Only allowed schemes are http, https'
end
end
context 'when target URL is an unsupported scheme' do
let(:params) { { state: 'pending', target_url: 'git://example.com' } }
it 'responds with bad request status and validation errors' do
expect { response }
.to change { ::Ci::Pipeline.count }.by(1)
.and change { ::Ci::Stage.count }.by(1)
.and not_change { ::CommitStatus.count }.from(0)
expect(response).to be_error
expect(response.http_status).to eq(:bad_request)
expect(response.message[:target_url])
.to include 'is blocked: Only allowed schemes are http, https'
end
end
context 'when trying to update a status of a different type' do
let!(:pipeline) { create(:ci_pipeline, project: project, sha: sha, ref: 'ref') }
let!(:ci_build) { create(:ci_build, pipeline: pipeline, name: 'test-job') }
let(:params) { { state: 'pending', name: 'test-job' } }
before do
execute_service(params)
end
it 'responds with bad request status and validation errors' do
expect { response }
.to not_change { ::Ci::Pipeline.count }.from(1)
.and not_change { ::Ci::Stage.count }.from(2)
.and not_change { ::CommitStatus.count }.from(1)
expect(response).to be_error
expect(response.http_status).to eq(:bad_request)
expect(response.message[:name])
.to include 'has already been taken'
end
end
context 'with partitions', :ci_partitionable do
let(:current_partition_id) { ci_testing_partition_id }
let(:params) { { state: 'running' } }
before do
allow(Ci::Pipeline)
.to receive(:current_partition_value) { current_partition_id }
end
it 'creates records in the current partition' do
expect { response }
.to change { ::Ci::Pipeline.count }.by(1)
.and change { ::Ci::Stage.count }.by(1)
.and change { ::CommitStatus.count }.by(1)
expect(response).to be_success
status = CommitStatus.find(job.id)
expect(status.partition_id).to eq(current_partition_id)
expect(status.pipeline.partition_id).to eq(current_partition_id)
end
end
context 'for race condition' do
let(:licenses_snyk_params) { { state: 'running', name: 'licenses', description: 'testing' } }
let(:security_snyk_params) { { state: 'running', name: 'security', description: 'testing' } }
let(:snyk_params_list) { [licenses_snyk_params, security_snyk_params] }
it 'creates one pipeline and two jobs (one for licenses, one for security)' do
expect do
snyk_params_list.map do |snyk_params|
Thread.new do
response = execute_service(snyk_params)
expect(response).to be_success
end
end.each(&:join)
end
.to change { ::Ci::Pipeline.count }.by(1)
.and change { ::Ci::Stage.count }.by(1)
.and change { ::CommitStatus.count }.by(2)
end
end
def create_user(access_level_trait)
user = create(:user)
create(:project_member, access_level_trait, user: user, project: project)
user
end
def execute_service(params = self.params)
described_class
.new(project, user, params)
.execute(optional_commit_status_params: params.slice(*%i[target_url description coverage]))
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class CompareCodequalityReportsService < CompareReportsBaseService
def comparer_class
Gitlab::Ci::Reports::CodequalityReportsComparer
end
def serializer_class
CodequalityReportsComparerSerializer
end
def get_report(pipeline)
pipeline&.codequality_reports
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::CompareCodequalityReportsService, feature_category: :continuous_integration do
let(:service) { described_class.new(project) }
let(:project) { create(:project, :repository) }
describe '#execute' do
subject { service.execute(base_pipeline, head_pipeline) }
context 'when head pipeline has a codequality report' do
let(:base_pipeline) { nil }
let(:head_pipeline) { create(:ci_pipeline, :with_codequality_reports, project: project) }
it 'returns status and data' do
expect(subject[:status]).to eq(:parsed)
expect(subject[:data]).to match_schema('entities/codequality_reports_comparer')
end
end
context 'when base and head pipelines have codequality reports' do
let(:base_pipeline) { create(:ci_pipeline, :with_codequality_reports, project: project) }
let(:head_pipeline) { create(:ci_pipeline, :with_codequality_reports, project: project) }
it 'returns status and data' do
expect(subject[:status]).to eq(:parsed)
expect(subject[:data]).to match_schema('entities/codequality_reports_comparer')
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class DeleteObjectsService
TransactionInProgressError = Class.new(StandardError)
TRANSACTION_MESSAGE = "can't perform network calls inside a database transaction"
BATCH_SIZE = 100
RETRY_IN = 10.minutes
def execute
objects = load_next_batch
destroy_everything(objects)
end
def remaining_batches_count(max_batch_count:)
Ci::DeletedObject
.ready_for_destruction(max_batch_count * BATCH_SIZE)
.size
.fdiv(BATCH_SIZE)
.ceil
end
private
# rubocop: disable CodeReuse/ActiveRecord
def load_next_batch
# `find_by_sql` performs a write in this case and we need to wrap it in
# a transaction to stick to the primary database.
Ci::DeletedObject.transaction do
Ci::DeletedObject.find_by_sql([next_batch_sql, new_pick_up_at: RETRY_IN.from_now])
end
end
# rubocop: enable CodeReuse/ActiveRecord
def next_batch_sql
<<~SQL.squish
UPDATE "ci_deleted_objects"
SET "pick_up_at" = :new_pick_up_at
WHERE "ci_deleted_objects"."id" IN (#{locked_object_ids_sql})
RETURNING *
SQL
end
def locked_object_ids_sql
Ci::DeletedObject.lock_for_destruction(BATCH_SIZE).to_sql
end
def destroy_everything(objects)
raise TransactionInProgressError, TRANSACTION_MESSAGE if transaction_open?
return unless objects.any?
deleted = objects.select(&:delete_file_from_storage)
Ci::DeletedObject.id_in(deleted.map(&:id)).delete_all
end
def transaction_open?
Ci::DeletedObject.connection.transaction_open?
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::DeleteObjectsService, :aggregate_failures, feature_category: :continuous_integration do
let(:service) { described_class.new }
let(:artifact) { create(:ci_job_artifact, :archive) }
let(:data) { [artifact] }
describe '#execute' do
before do
Ci::DeletedObject.bulk_import(data)
# We disable the check because the specs are wrapped in a transaction
allow(service).to receive(:transaction_open?).and_return(false)
end
subject(:execute) { service.execute }
it 'deletes records' do
expect { execute }.to change { Ci::DeletedObject.count }.by(-1)
end
it 'deletes files' do
expect { execute }.to change { artifact.file.exists? }
end
context 'when trying to execute without records' do
let(:data) { [] }
it 'does not change the number of objects' do
expect { execute }.not_to change { Ci::DeletedObject.count }
end
end
context 'when trying to remove the same file multiple times' do
let(:objects) { Ci::DeletedObject.all.to_a }
before do
expect(service).to receive(:load_next_batch).twice.and_return(objects)
end
it 'executes successfully' do
2.times { expect(service.execute).to be_truthy }
end
end
context 'with artifacts both ready and not ready for deletion' do
let(:data) { [] }
let!(:past_ready) { create(:ci_deleted_object, pick_up_at: 2.days.ago) }
let!(:ready) { create(:ci_deleted_object, pick_up_at: 1.day.ago) }
it 'skips records with pick_up_at in the future' do
not_ready = create(:ci_deleted_object, pick_up_at: 1.day.from_now)
expect { execute }.to change { Ci::DeletedObject.count }.from(3).to(1)
expect(not_ready.reload.present?).to be_truthy
end
it 'limits the number of records removed' do
stub_const("#{described_class}::BATCH_SIZE", 1)
expect { execute }.to change { Ci::DeletedObject.count }.by(-1)
end
it 'removes records in order' do
stub_const("#{described_class}::BATCH_SIZE", 1)
execute
expect { past_ready.reload }.to raise_error(ActiveRecord::RecordNotFound)
expect(ready.reload.present?).to be_truthy
end
it 'updates pick_up_at timestamp' do
allow(service).to receive(:destroy_everything)
execute
expect(past_ready.reload.pick_up_at).to be_like_time(10.minutes.from_now)
end
it 'does not delete objects for which file deletion has failed' do
expect(past_ready)
.to receive(:delete_file_from_storage)
.and_return(false)
expect(service)
.to receive(:load_next_batch)
.and_return([past_ready, ready])
expect { execute }.to change { Ci::DeletedObject.count }.from(2).to(1)
expect(past_ready.reload.present?).to be_truthy
end
end
context 'with an open database transaction' do
it 'raises an exception and does not remove records' do
expect(service).to receive(:transaction_open?).and_return(true)
expect { execute }
.to raise_error(Ci::DeleteObjectsService::TransactionInProgressError)
.and change { Ci::DeletedObject.count }.by(0)
end
end
end
describe '#remaining_batches_count' do
subject { service.remaining_batches_count(max_batch_count: 3) }
context 'when there is less than one batch size' do
before do
Ci::DeletedObject.bulk_import(data)
end
it { is_expected.to eq(1) }
end
context 'when there is more than one batch size' do
before do
objects_scope = double
expect(Ci::DeletedObject)
.to receive(:ready_for_destruction)
.and_return(objects_scope)
expect(objects_scope).to receive(:size).and_return(110)
end
it { is_expected.to eq(2) }
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class DropPipelineService
PRELOADED_RELATIONS = [:project, :pipeline, :metadata, :deployment, :taggings].freeze
# execute service asynchronously for each cancelable pipeline
def execute_async_for_all(pipelines, failure_reason, context_user)
pipelines.cancelable.select(:id).find_in_batches do |pipelines_batch|
Ci::DropPipelineWorker.bulk_perform_async_with_contexts(
pipelines_batch,
arguments_proc: -> (pipeline) { [pipeline.id, failure_reason] },
context_proc: -> (_) { { user: context_user } }
)
end
end
def execute(pipeline, failure_reason, retries: 3)
Gitlab::OptimisticLocking.retry_lock(pipeline.cancelable_statuses, retries, name: 'ci_pipeline_drop_running') do |cancelables|
cancelables.find_in_batches do |batch|
preload_associations_for_drop(batch)
batch.each do |job|
job.drop(failure_reason)
end
end
end
end
private
def preload_associations_for_drop(commit_status_batch)
Preloaders::CommitStatusPreloader.new(commit_status_batch).execute(PRELOADED_RELATIONS)
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::DropPipelineService, feature_category: :continuous_integration do
let_it_be(:user) { create(:user) }
let(:failure_reason) { :user_blocked }
let!(:cancelable_pipeline) { create(:ci_pipeline, :running, user: user) }
let!(:running_build) { create(:ci_build, :running, pipeline: cancelable_pipeline) }
let!(:commit_status_running) { create(:commit_status, :running, pipeline: cancelable_pipeline) }
let!(:success_pipeline) { create(:ci_pipeline, :success, user: user) }
let!(:success_build) { create(:ci_build, :success, pipeline: success_pipeline) }
let!(:commit_status_success) { create(:commit_status, :success, pipeline: cancelable_pipeline) }
describe '#execute_async_for_all' do
subject { described_class.new.execute_async_for_all(user.pipelines, failure_reason, user) }
it 'drops only cancelable pipelines asynchronously', :sidekiq_inline do
subject
expect(cancelable_pipeline.reload).to be_failed
expect(running_build.reload).to be_failed
expect(success_pipeline.reload).to be_success
expect(success_build.reload).to be_success
end
end
describe '#execute' do
subject { described_class.new.execute(cancelable_pipeline.id, failure_reason) }
def drop_pipeline!(pipeline)
described_class.new.execute(pipeline, failure_reason)
end
it 'drops each cancelable build in the pipeline', :aggregate_failures do
drop_pipeline!(cancelable_pipeline)
expect(running_build.reload).to be_failed
expect(running_build.failure_reason).to eq(failure_reason.to_s)
expect(success_build.reload).to be_success
end
it 'avoids N+1 queries when reading data' do
control_count = ActiveRecord::QueryRecorder.new do
drop_pipeline!(cancelable_pipeline)
end.count
writes_per_build = 2
load_balancer_queries = 3
expected_reads_count = control_count - writes_per_build
create_list(:ci_build, 5, :running, pipeline: cancelable_pipeline)
expect do
drop_pipeline!(cancelable_pipeline)
end.not_to exceed_query_limit(expected_reads_count + (5 * writes_per_build) + load_balancer_queries)
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class PipelineTriggerService < BaseService
include Gitlab::Utils::StrongMemoize
include Services::ReturnServiceResponses
include Ci::DownstreamPipelineHelpers
def execute
if trigger_from_token
set_application_context_from_trigger(trigger_from_token)
create_pipeline_from_trigger(trigger_from_token)
elsif job_from_token
set_application_context_from_job(job_from_token)
create_pipeline_from_job(job_from_token)
end
rescue Ci::AuthJobFinder::AuthError => e
error(e.message, 401)
end
private
PAYLOAD_VARIABLE_KEY = 'TRIGGER_PAYLOAD'
PAYLOAD_VARIABLE_HIDDEN_PARAMS = %i[token].freeze
def create_pipeline_from_trigger(trigger)
# this check is to not leak the presence of the project if user cannot read it
return unless trigger.project == project
return unless can?(trigger.owner, :read_project, project)
response = Ci::CreatePipelineService
.new(project, trigger.owner, ref: params[:ref], variables_attributes: variables)
.execute(:trigger, ignore_skip_ci: true) do |pipeline|
pipeline.trigger_requests.build(trigger: trigger)
end
pipeline_service_response(response.payload)
end
def pipeline_service_response(pipeline)
if pipeline.created_successfully?
success(pipeline: pipeline)
elsif pipeline.persisted?
err = pipeline.errors.messages.presence || pipeline.failure_reason.presence || 'Could not create pipeline'
error(err, :unprocessable_entity)
else
error(pipeline.errors.messages, :bad_request)
end
end
def trigger_from_token
strong_memoize(:trigger) do
Ci::Trigger.find_by_token(params[:token].to_s)
end
end
def create_pipeline_from_job(job)
# this check is to not leak the presence of the project if user cannot read it
return unless can?(job.user, :read_project, project)
response = Ci::CreatePipelineService
.new(project, job.user, ref: params[:ref], variables_attributes: variables)
.execute(:pipeline, ignore_skip_ci: true) do |pipeline|
source = job.sourced_pipelines.build(
source_pipeline: job.pipeline,
source_project: job.project,
pipeline: pipeline,
project: project)
pipeline.source_pipeline = source
end
log_downstream_pipeline_creation(response.payload)
pipeline_service_response(response.payload)
end
def job_from_token
strong_memoize(:job) do
Ci::AuthJobFinder.new(token: params[:token].to_s).execute!
end
end
def variables
param_variables + [payload_variable]
end
def param_variables
params[:variables].to_h.map do |key, value|
{ key: key, value: value }
end
end
def payload_variable
{ key: PAYLOAD_VARIABLE_KEY,
value: Gitlab::Json.dump(params.except(*PAYLOAD_VARIABLE_HIDDEN_PARAMS)),
variable_type: :file }
end
def set_application_context_from_trigger(trigger)
Gitlab::ApplicationContext.push(
user: trigger.owner,
project: trigger.project
)
end
def set_application_context_from_job(job)
Gitlab::ApplicationContext.push(
user: job.user,
project: job.project,
runner: job.runner
)
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::PipelineTriggerService, feature_category: :continuous_integration do
include AfterNextHelpers
let_it_be(:project) { create(:project, :repository) }
before do
stub_ci_pipeline_to_return_yaml_file
end
describe '#execute' do
let_it_be(:user) { create(:user) }
let(:result) { described_class.new(project, user, params).execute }
before do
project.add_developer(user)
end
shared_examples 'detecting an unprocessable pipeline trigger' do
context 'when the pipeline was not created successfully' do
let(:fail_pipeline) do
receive(:execute).and_wrap_original do |original, *args, **kwargs|
response = original.call(*args, **kwargs)
pipeline = response.payload
pipeline.update!(failure_reason: 'unknown_failure')
response
end
end
before do
allow_next(Ci::CreatePipelineService).to fail_pipeline
end
it 'has the correct status code' do
expect { result }.to change { Ci::Pipeline.count }
expect(result).to be_error
expect(result.http_status).to eq(:unprocessable_entity)
end
end
end
context 'with a trigger token' do
let(:trigger) { create(:ci_trigger, project: project, owner: user) }
context 'when trigger belongs to a different project' do
let(:params) { { token: trigger.token, ref: 'master', variables: nil } }
let(:trigger) { create(:ci_trigger, project: create(:project), owner: user) }
it 'does nothing' do
expect { result }.not_to change { Ci::Pipeline.count }
end
end
context 'when trigger owner does not have a permission to read a project' do
let(:params) { { token: trigger.token, ref: 'master', variables: nil } }
let(:trigger) { create(:ci_trigger, project: project, owner: create(:user)) }
it 'does nothing' do
expect { result }.not_to change { Ci::Pipeline.count }
end
end
context 'when params have an existing trigger token' do
context 'when params have an existing ref' do
let(:params) { { token: trigger.token, ref: 'master', variables: nil } }
it 'triggers a pipeline' do
expect { result }.to change { Ci::Pipeline.count }.by(1)
expect(result[:pipeline].ref).to eq('master')
expect(result[:pipeline].project).to eq(project)
expect(result[:pipeline].user).to eq(trigger.owner)
expect(result[:pipeline].trigger_requests.to_a)
.to eq(result[:pipeline].builds.map(&:trigger_request).uniq)
expect(result[:status]).to eq(:success)
end
it 'stores the payload as a variable' do
expect { result }.to change { Ci::PipelineVariable.count }.by(1)
var = result[:pipeline].variables.first
expect(var.key).to eq('TRIGGER_PAYLOAD')
expect(var.value).to eq('{"ref":"master","variables":null}')
expect(var.variable_type).to eq('file')
end
context 'when commit message has [ci skip]' do
before do
allow_next(Ci::Pipeline).to receive(:git_commit_message) { '[ci skip]' }
end
it 'ignores [ci skip] and create as general' do
expect { result }.to change { Ci::Pipeline.count }.by(1)
expect(result).to be_success
end
end
context 'when params have a variable' do
let(:params) { { token: trigger.token, ref: 'master', variables: variables } }
let(:variables) { { 'AAA' => 'AAA123' } }
it 'has variables' do
expect { result }.to change { Ci::PipelineVariable.count }.by(2)
.and change { Ci::TriggerRequest.count }.by(1)
expect(result[:pipeline].variables.map { |v| { v.key => v.value } }.first).to eq(variables)
expect(result[:pipeline].trigger_requests.last.variables).to be_nil
end
end
context 'when params have duplicate variables' do
let(:params) { { token: trigger.token, ref: 'master', variables: variables } }
let(:variables) { { 'TRIGGER_PAYLOAD' => 'duplicate value' } }
it 'creates a failed pipeline without variables' do
expect { result }.to change { Ci::Pipeline.count }
expect(result).to be_error
expect(result.message[:base]).to eq(['Duplicate variable name: TRIGGER_PAYLOAD'])
end
end
it_behaves_like 'detecting an unprocessable pipeline trigger'
end
context 'when params have a non-existant ref' do
let(:params) { { token: trigger.token, ref: 'invalid-ref', variables: nil } }
it 'does not trigger a pipeline' do
expect { result }.not_to change { Ci::Pipeline.count }
expect(result).to be_error
expect(result.http_status).to eq(:bad_request)
end
end
end
context 'when params have a non-existant trigger token' do
let(:params) { { token: 'invalid-token', ref: nil, variables: nil } }
it 'does not trigger a pipeline' do
expect { result }.not_to change { Ci::Pipeline.count }
expect(result).to be_nil
end
end
end
context 'with a pipeline job token' do
let!(:pipeline) { create(:ci_empty_pipeline, project: project) }
let(:job) { create(:ci_build, :running, pipeline: pipeline, user: user) }
context 'when job user does not have a permission to read a project' do
let(:params) { { token: job.token, ref: 'master', variables: nil } }
let(:job) { create(:ci_build, pipeline: pipeline, user: create(:user)) }
it 'does nothing' do
expect { result }.not_to change { Ci::Pipeline.count }
end
end
context 'when job is not running' do
let(:params) { { token: job.token, ref: 'master', variables: nil } }
let(:job) { create(:ci_build, :success, pipeline: pipeline, user: user) }
it 'does nothing', :aggregate_failures do
expect { result }.not_to change { Ci::Pipeline.count }
expect(result[:message]).to eq('Job is not running')
expect(result[:http_status]).to eq(401)
end
end
context 'when job does not have a project' do
let(:params) { { token: job.token, ref: 'master', variables: nil } }
let(:job) { create(:ci_build, status: :running, pipeline: pipeline, user: user) }
it 'does nothing', :aggregate_failures do
job.update!(project: nil)
expect { result }.not_to change { Ci::Pipeline.count }
expect(result[:message]).to eq('Project has been deleted!')
expect(result[:http_status]).to eq(401)
end
end
context 'when params have an existsed job token' do
context 'when params have an existsed ref' do
let(:params) { { token: job.token, ref: 'master', variables: nil } }
it 'triggers a pipeline' do
expect { result }.to change { Ci::Pipeline.count }.by(1)
expect(result[:pipeline].ref).to eq('master')
expect(result[:pipeline].project).to eq(project)
expect(result[:pipeline].user).to eq(job.user)
expect(result[:status]).to eq(:success)
end
it_behaves_like 'logs downstream pipeline creation' do
let(:downstream_pipeline) { result[:pipeline] }
let(:expected_root_pipeline) { pipeline }
let(:expected_hierarchy_size) { 2 }
let(:expected_downstream_relationship) { :multi_project }
end
context 'when commit message has [ci skip]' do
before do
allow_next_instance_of(Ci::Pipeline) do |instance|
allow(instance).to receive(:git_commit_message) { '[ci skip]' }
end
end
it 'ignores [ci skip] and create as general' do
expect { result }.to change { Ci::Pipeline.count }.by(1)
expect(result[:status]).to eq(:success)
end
end
context 'when params have a variable' do
let(:params) { { token: job.token, ref: 'master', variables: variables } }
let(:variables) { { 'AAA' => 'AAA123' } }
it 'has variables' do
expect { result }.to change { Ci::PipelineVariable.count }.by(2)
.and change { Ci::Sources::Pipeline.count }.by(1)
expect(result[:pipeline].variables.map { |v| { v.key => v.value } }.first).to eq(variables)
expect(job.sourced_pipelines.last.pipeline_id).to eq(result[:pipeline].id)
end
end
context 'when params have duplicate variables' do
let(:params) { { token: job.token, ref: 'master', variables: variables } }
let(:variables) { { 'TRIGGER_PAYLOAD' => 'duplicate value' } }
it 'creates a failed pipeline without variables' do
expect { result }.to change { Ci::Pipeline.count }
expect(result).to be_error
expect(result.message[:base]).to eq(['Duplicate variable name: TRIGGER_PAYLOAD'])
end
end
it_behaves_like 'detecting an unprocessable pipeline trigger'
end
context 'when params have a non-existant ref' do
let(:params) { { token: job.token, ref: 'invalid-ref', variables: nil } }
it 'does not trigger a job in the pipeline' do
expect { result }.not_to change { Ci::Pipeline.count }
expect(result).to be_error
expect(result.http_status).to eq(:bad_request)
end
end
end
context 'when params have a non-existsed trigger token' do
let(:params) { { token: 'invalid-token', ref: nil, variables: nil } }
it 'does not trigger a pipeline' do
expect { result }.not_to change { Ci::Pipeline.count }
expect(result).to be_nil
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class BuildCancelService
def initialize(build, user)
@build = build
@user = user
end
def execute
return forbidden unless allowed?
return unprocessable_entity unless build.cancelable?
build.cancel
ServiceResponse.success(payload: build)
end
private
attr_reader :build, :user
def allowed?
user.can?(:cancel_build, build)
end
def forbidden
ServiceResponse.error(message: 'Forbidden', http_status: :forbidden)
end
def unprocessable_entity
ServiceResponse.error(message: 'Unprocessable entity', http_status: :unprocessable_entity)
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::BuildCancelService, feature_category: :continuous_integration do
let_it_be(:user) { create(:user) }
let_it_be(:project) { create(:project) }
let_it_be(:pipeline) { create(:ci_pipeline, project: project) }
describe '#execute' do
subject(:execute) { described_class.new(build, user).execute }
context 'when user is authorized to cancel the build' do
before do
project.add_maintainer(user)
end
context 'when build is cancelable' do
let!(:build) { create(:ci_build, :cancelable, pipeline: pipeline) }
it 'transits build to canceled', :aggregate_failures do
response = execute
expect(response).to be_success
expect(response.payload.reload).to be_canceled
end
end
context 'when build is not cancelable' do
let!(:build) { create(:ci_build, :canceled, pipeline: pipeline) }
it 'responds with unprocessable entity', :aggregate_failures do
response = execute
expect(response).to be_error
expect(response.http_status).to eq(:unprocessable_entity)
end
end
end
context 'when user is not authorized to cancel the build' do
let!(:build) { create(:ci_build, :cancelable, pipeline: pipeline) }
it 'responds with forbidden', :aggregate_failures do
response = execute
expect(response).to be_error
expect(response.http_status).to eq(:forbidden)
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class RunScheduledBuildService < ::BaseService
def execute(build)
unless can?(current_user, :update_build, build)
raise Gitlab::Access::AccessDeniedError
end
build.enqueue_scheduled!
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::RunScheduledBuildService, feature_category: :continuous_integration do
let(:user) { create(:user) }
let(:project) { create(:project) }
let(:pipeline) { create(:ci_pipeline, project: project) }
subject { described_class.new(project, user).execute(build) }
context 'when user can update build' do
before do
project.add_developer(user)
create(:protected_branch, :developers_can_merge, name: pipeline.ref, project: project)
end
context 'when build is scheduled' do
context 'when scheduled_at is expired' do
let(:build) { create(:ci_build, :expired_scheduled, user: user, project: project, pipeline: pipeline) }
it 'can run the build' do
expect { subject }.not_to raise_error
expect(build).to be_pending
end
context 'when build requires resource' do
let(:resource_group) { create(:ci_resource_group, project: project) }
before do
build.update!(resource_group: resource_group)
end
it 'transits to waiting for resource status' do
expect { subject }.to change { build.status }.from('scheduled').to('waiting_for_resource')
end
end
end
context 'when scheduled_at is not expired' do
let(:build) { create(:ci_build, :scheduled, user: user, project: project, pipeline: pipeline) }
it 'can not run the build' do
expect { subject }.to raise_error(StateMachines::InvalidTransition)
expect(build).to be_scheduled
end
end
end
context 'when build is not scheduled' do
let(:build) { create(:ci_build, :created, user: user, project: project, pipeline: pipeline) }
it 'can not run the build' do
expect { subject }.to raise_error(StateMachines::InvalidTransition)
expect(build).to be_created
end
end
end
context 'when user can not update build' do
context 'when build is scheduled' do
let(:build) { create(:ci_build, :scheduled, user: user, project: project, pipeline: pipeline) }
it 'can not run the build' do
expect { subject }.to raise_error(Gitlab::Access::AccessDeniedError)
expect(build).to be_scheduled
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
# TODO: a couple of points with this approach:
# + reuses existing architecture and reactive caching
# - it's not a report comparison and some comparing features must be turned off.
# see CompareReportsBaseService for more notes.
# issue: https://gitlab.com/gitlab-org/gitlab/issues/34224
class GenerateCodequalityMrDiffReportService < CompareReportsBaseService
def execute(base_pipeline, head_pipeline)
merge_request = MergeRequest.find_by_id(params[:id])
{
status: :parsed,
key: key(base_pipeline, head_pipeline),
data: head_pipeline.pipeline_artifacts.find_by_file_type(:code_quality_mr_diff).present.for_files(merge_request)
}
rescue StandardError => e
Gitlab::ErrorTracking.track_exception(e, project_id: project.id)
{
status: :error,
key: key(base_pipeline, head_pipeline),
status_reason: _('An error occurred while fetching codequality mr diff reports.')
}
end
def latest?(base_pipeline, head_pipeline, data)
data&.fetch(:key, nil) == key(base_pipeline, head_pipeline)
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::GenerateCodequalityMrDiffReportService, feature_category: :code_review_workflow do
let(:service) { described_class.new(project) }
let(:project) { create(:project, :repository) }
describe '#execute' do
subject { service.execute(base_pipeline, head_pipeline) }
context 'when head pipeline has codequality mr diff report' do
let!(:merge_request) { create(:merge_request, :with_codequality_mr_diff_reports, source_project: project, id: 123456789) }
let!(:service) { described_class.new(project, nil, id: merge_request.id) }
let!(:head_pipeline) { merge_request.head_pipeline }
let!(:base_pipeline) { nil }
it 'returns status and data', :aggregate_failures do
expect_any_instance_of(Ci::PipelineArtifact) do |instance|
expect(instance).to receive(:present)
expect(instance).to receive(:for_files).with(merge_request).and_call_original
end
expect(subject[:status]).to eq(:parsed)
expect(subject[:data]).to eq(files: {})
end
end
context 'when head pipeline does not have a codequality mr diff report' do
let!(:merge_request) { create(:merge_request, source_project: project) }
let!(:service) { described_class.new(project, nil, id: merge_request.id) }
let!(:head_pipeline) { merge_request.head_pipeline }
let!(:base_pipeline) { nil }
it 'returns status and error message' do
expect(subject[:status]).to eq(:error)
expect(subject[:status_reason]).to include('An error occurred while fetching codequality mr diff reports.')
end
end
context 'when head pipeline has codequality mr diff report and no merge request associated' do
let!(:head_pipeline) { create(:ci_pipeline, :with_codequality_mr_diff_report, project: project) }
let!(:base_pipeline) { nil }
it 'returns status and error message' do
expect(subject[:status]).to eq(:error)
expect(subject[:status_reason]).to include('An error occurred while fetching codequality mr diff reports.')
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class GenerateKubeconfigService
def initialize(pipeline, token:, environment:)
@pipeline = pipeline
@token = token
@environment = environment
@template = Gitlab::Kubernetes::Kubeconfig::Template.new
end
def execute
template.add_cluster(
name: cluster_name,
url: Gitlab::Kas.tunnel_url
)
agent_authorizations.each do |authorization|
agent = authorization.agent
user = user_name(agent)
template.add_user(
name: user,
token: agent_token(agent)
)
template.add_context(
name: context_name(agent),
namespace: context_namespace(authorization),
cluster: cluster_name,
user: user
)
end
template
end
private
attr_reader :pipeline, :token, :environment, :template
def agent_authorizations
::Clusters::Agents::Authorizations::CiAccess::FilterService.new(
pipeline.cluster_agent_authorizations,
environment: environment
).execute
end
def cluster_name
'gitlab'
end
def user_name(agent)
['agent', agent.id].join(delimiter)
end
def context_name(agent)
[agent.project.full_path, agent.name].join(delimiter)
end
def context_namespace(authorization)
authorization.config['default_namespace']
end
def agent_token(agent)
['ci', agent.id, token].join(delimiter)
end
def delimiter
':'
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::GenerateKubeconfigService, feature_category: :deployment_management do
describe '#execute' do
let_it_be(:group) { create(:group) }
let_it_be(:project) { create(:project, group: group) }
let_it_be(:pipeline) { create(:ci_empty_pipeline, project: project) }
let_it_be(:build) { create(:ci_build, project: project, pipeline: pipeline) }
let_it_be(:agent_project) { create(:project, group: group, name: 'project-containing-agent-config') }
let_it_be(:project_agent_authorization) do
agent = create(:cluster_agent, project: agent_project)
create(:agent_ci_access_project_authorization, agent: agent, project: project)
end
let_it_be(:group_agent_authorization) do
agent = create(:cluster_agent, project: agent_project)
create(:agent_ci_access_group_authorization, agent: agent, group: group)
end
let(:template) do
instance_double(
Gitlab::Kubernetes::Kubeconfig::Template,
add_cluster: nil,
add_user: nil,
add_context: nil
)
end
let(:agent_authorizations) { [project_agent_authorization, group_agent_authorization] }
let(:filter_service) do
instance_double(
::Clusters::Agents::Authorizations::CiAccess::FilterService,
execute: agent_authorizations
)
end
subject(:execute) { described_class.new(pipeline, token: build.token, environment: nil).execute }
before do
allow(Gitlab::Kubernetes::Kubeconfig::Template).to receive(:new).and_return(template)
allow(::Clusters::Agents::Authorizations::CiAccess::FilterService).to receive(:new).and_return(filter_service)
end
it 'returns a Kubeconfig Template' do
expect(execute).to eq(template)
end
it 'adds a cluster' do
expect(template).to receive(:add_cluster).with(
name: 'gitlab',
url: Gitlab::Kas.tunnel_url
).once
execute
end
it "filters the pipeline's agents by `nil` environment" do
expect(::Clusters::Agents::Authorizations::CiAccess::FilterService).to receive(:new).with(
pipeline.cluster_agent_authorizations,
environment: nil
)
execute
end
it 'adds user and context for all eligible agents', :aggregate_failures do
agent_authorizations.each do |authorization|
expect(template).to receive(:add_user).with(
name: "agent:#{authorization.agent.id}",
token: "ci:#{authorization.agent.id}:#{build.token}"
)
expect(template).to receive(:add_context).with(
name: "#{agent_project.full_path}:#{authorization.agent.name}",
namespace: 'production',
cluster: 'gitlab',
user: "agent:#{authorization.agent.id}"
)
end
execute
end
context 'when environment is specified' do
subject(:execute) { described_class.new(pipeline, token: build.token, environment: 'production').execute }
it "filters the pipeline's agents by the specified environment" do
expect(::Clusters::Agents::Authorizations::CiAccess::FilterService).to receive(:new).with(
pipeline.cluster_agent_authorizations,
environment: 'production'
)
execute
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class UpdateBuildStateService
include ::Gitlab::Utils::StrongMemoize
include ::Gitlab::ExclusiveLeaseHelpers
Result = Struct.new(:status, :backoff, keyword_init: true)
InvalidTraceError = Class.new(StandardError)
ACCEPT_TIMEOUT = 5.minutes.freeze
attr_reader :build, :params, :metrics
def initialize(build, params, metrics = ::Gitlab::Ci::Trace::Metrics.new)
@build = build
@params = params
@metrics = metrics
end
def execute
unless accept_available?
return update_build_state!
end
ensure_pending_state!
in_build_trace_lock do
process_build_state!
end
end
private
def ensure_pending_state!
pending_state.created_at
end
def process_build_state!
if live_chunks_pending?
if pending_state_outdated?
discard_build_trace!
update_build_state!
else
accept_build_state!
end
else
validate_build_trace!
update_build_state!
end
end
def accept_build_state!
build.trace_chunks.live.find_each do |chunk|
chunk.schedule_to_persist!
end
metrics.increment_trace_operation(operation: :accepted)
::Gitlab::Ci::Runner::Backoff.new(pending_state.created_at).then do |backoff|
Result.new(status: 202, backoff: backoff.to_seconds)
end
end
def validate_build_trace!
return unless has_chunks?
unless live_chunks_pending?
metrics.increment_trace_operation(operation: :finalized)
metrics.observe_migration_duration(pending_state_seconds)
end
::Gitlab::Ci::Trace::Checksum.new(build).then do |checksum|
unless checksum.valid?
metrics.increment_trace_operation(operation: :invalid)
metrics.increment_error_counter(error_reason: :chunks_invalid_checksum)
if checksum.corrupted?
metrics.increment_trace_operation(operation: :corrupted)
metrics.increment_error_counter(error_reason: :chunks_invalid_size)
end
next unless log_invalid_chunks?
::Gitlab::ErrorTracking.log_exception(InvalidTraceError.new,
project_path: build.project.full_path,
build_id: build.id,
state_crc32: checksum.state_crc32,
chunks_crc32: checksum.chunks_crc32,
chunks_count: checksum.chunks_count,
chunks_corrupted: checksum.corrupted?
)
end
end
end
def update_build_state!
case build_state
when 'running'
build.touch if build.needs_touch?
Result.new(status: 200)
when 'success'
build.success!
Result.new(status: 200)
when 'failed'
build.drop_with_exit_code!(params[:failure_reason], params[:exit_code])
Result.new(status: 200)
else
Result.new(status: 400)
end
end
def discard_build_trace!
metrics.increment_trace_operation(operation: :discarded)
end
def accept_available?
!build_running? && has_checksum? && chunks_migration_enabled?
end
def live_chunks_pending?
build.trace_chunks.live.any?
end
def has_chunks?
build.trace_chunks.any?
end
def pending_state_outdated?
pending_state_duration > ACCEPT_TIMEOUT
end
def pending_state_duration
Time.current - pending_state.created_at
end
def pending_state_seconds
pending_state_duration.seconds
end
def build_state
params.dig(:state).to_s
end
def has_checksum?
trace_checksum.present?
end
def build_running?
build_state == 'running'
end
def trace_checksum
params.dig(:output, :checksum) || params.dig(:checksum)
end
def trace_bytesize
params.dig(:output, :bytesize)
end
def pending_state
strong_memoize(:pending_state) { ensure_pending_state }
end
def ensure_pending_state
build_state = Ci::BuildPendingState.safe_find_or_create_by(
build_id: build.id,
partition_id: build.partition_id,
state: params.fetch(:state),
trace_checksum: trace_checksum,
trace_bytesize: trace_bytesize,
failure_reason: failure_reason
)
unless build_state.present?
metrics.increment_trace_operation(operation: :conflict)
end
build_state || build.pending_state
end
def failure_reason
reason = params.dig(:failure_reason)
return unless reason
Ci::BuildPendingState.failure_reasons.fetch(reason.to_s, 'unknown_failure')
end
##
# This method is releasing an exclusive lock on a build trace the moment we
# conclude that build status has been written and the build state update
# has been committed to the database.
#
# Because a build state machine schedules a bunch of workers to run after
# build status transition to complete, we do not want to keep the lease
# until all the workers are scheduled because it opens a possibility of
# race conditions happening.
#
# Instead of keeping the lease until the transition is fully done and
# workers are scheduled, we immediately release the lock after the database
# commit happens.
#
def in_build_trace_lock(&block)
build.trace.lock do |_, lease| # rubocop:disable CodeReuse/ActiveRecord
build.run_on_status_commit { lease.cancel }
yield
end
rescue ::Gitlab::Ci::Trace::LockedError
metrics.increment_trace_operation(operation: :locked)
accept_build_state!
end
def chunks_migration_enabled?
::Feature.enabled?(:ci_enable_live_trace, build.project) &&
::Feature.enabled?(:ci_accept_trace, build.project, type: :ops)
end
def log_invalid_chunks?
::Feature.enabled?(:ci_trace_log_invalid_chunks, build.project, type: :ops)
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::UpdateBuildStateService, feature_category: :continuous_integration do
let_it_be(:project) { create(:project) }
let_it_be(:pipeline) { create(:ci_pipeline, project: project) }
let(:build) { create(:ci_build, :running, pipeline: pipeline) }
let(:metrics) { spy('metrics') }
subject { described_class.new(build, params) }
before do
stub_feature_flags(ci_enable_live_trace: true)
end
context 'when build has unknown failure reason' do
let(:params) do
{
output: { checksum: 'crc32:12345678', bytesize: 123 },
state: 'failed',
failure_reason: 'no idea here',
exit_code: 42
}
end
it 'updates a build status' do
result = subject.execute
expect(build).to be_failed
expect(result.status).to eq 200
end
end
context 'when build has failed' do
let(:params) do
{
output: { checksum: 'crc32:12345678', bytesize: 123 },
state: 'failed',
failure_reason: 'script_failure',
exit_code: 7
}
end
it 'sends a build failed event to Snowplow' do
expect(::Ci::TrackFailedBuildWorker)
.to receive(:perform_async).with(build.id, params[:exit_code], params[:failure_reason])
subject.execute
end
end
context 'when build does not have checksum' do
context 'when state has changed' do
let(:params) { { state: 'success' } }
it 'updates a state of a running build' do
subject.execute
expect(build).to be_success
end
it 'returns 200 OK status' do
result = subject.execute
expect(result.status).to eq 200
end
it 'does not increment finalized trace metric' do
execute_with_stubbed_metrics!
expect(metrics)
.not_to have_received(:increment_trace_operation)
.with(operation: :finalized)
end
end
context 'when it is a heartbeat request' do
let(:params) { { state: 'success' } }
it 'updates a build timestamp' do
expect { subject.execute }.to change { build.updated_at }
end
end
context 'when state is unknown' do
let(:params) { { state: 'unknown' } }
it 'responds with 400 bad request' do
result = subject.execute
expect(result.status).to eq 400
expect(build).to be_running
end
end
end
context 'when build has a checksum' do
let(:params) do
{
output: { checksum: 'crc32:12345678', bytesize: 123 },
state: 'failed',
failure_reason: 'script_failure',
exit_code: 42
}
end
context 'when build does not have associated trace chunks' do
it 'updates a build status' do
result = subject.execute
expect(build).to be_failed
expect(result.status).to eq 200
end
it 'updates the allow_failure flag' do
expect(build)
.to receive(:drop_with_exit_code!)
.with('script_failure', 42)
.and_call_original
subject.execute
end
it 'does not increment invalid trace metric' do
execute_with_stubbed_metrics!
expect(metrics)
.not_to have_received(:increment_trace_operation)
.with(operation: :invalid)
end
it 'does not increment chunks_invalid_checksum trace metric' do
execute_with_stubbed_metrics!
expect(metrics)
.not_to have_received(:increment_error_counter)
.with(error_reason: :chunks_invalid_checksum)
end
end
context 'when build trace has been migrated' do
before do
create(:ci_build_trace_chunk, :persisted, build: build, initial_data: 'abcd')
end
it 'updates a build state' do
subject.execute
expect(build).to be_failed
end
it 'updates the allow_failure flag' do
expect(build)
.to receive(:drop_with_exit_code!)
.with('script_failure', 42)
.and_call_original
subject.execute
end
it 'responds with 200 OK status' do
result = subject.execute
expect(result.status).to eq 200
end
it 'does not set a backoff value' do
result = subject.execute
expect(result.backoff).to be_nil
end
it 'increments trace finalized operation metric' do
execute_with_stubbed_metrics!
expect(metrics)
.to have_received(:increment_trace_operation)
.with(operation: :finalized)
end
it 'records migration duration in a histogram' do
freeze_time do
create(:ci_build_pending_state, build: build, created_at: 0.5.seconds.ago)
execute_with_stubbed_metrics!
end
expect(metrics)
.to have_received(:observe_migration_duration)
.with(0.5)
end
context 'when trace checksum is not valid' do
it 'increments invalid trace metric' do
execute_with_stubbed_metrics!
expect(metrics)
.to have_received(:increment_trace_operation)
.with(operation: :invalid)
end
it 'increments chunks_invalid_checksum trace metric' do
execute_with_stubbed_metrics!
expect(metrics)
.to have_received(:increment_error_counter)
.with(error_reason: :chunks_invalid_checksum)
end
end
context 'when trace checksum is valid' do
let(:params) do
{ output: { checksum: 'crc32:ed82cd11', bytesize: 4 }, state: 'success' }
end
it 'does not increment invalid or corrupted trace metric' do
execute_with_stubbed_metrics!
expect(metrics)
.not_to have_received(:increment_trace_operation)
.with(operation: :invalid)
expect(metrics)
.not_to have_received(:increment_trace_operation)
.with(operation: :corrupted)
expect(metrics)
.not_to have_received(:increment_error_counter)
.with(error_reason: :chunks_invalid_checksum)
expect(metrics)
.not_to have_received(:increment_error_counter)
.with(error_reason: :chunks_invalid_size)
end
context 'when using deprecated parameters' do
let(:params) do
{ checksum: 'crc32:ed82cd11', state: 'success' }
end
it 'does not increment invalid or corrupted trace metric' do
execute_with_stubbed_metrics!
expect(metrics)
.not_to have_received(:increment_trace_operation)
.with(operation: :invalid)
expect(metrics)
.not_to have_received(:increment_trace_operation)
.with(operation: :corrupted)
expect(metrics)
.not_to have_received(:increment_error_counter)
.with(error_reason: :chunks_invalid_checksum)
expect(metrics)
.not_to have_received(:increment_error_counter)
.with(error_reason: :chunks_invalid_size)
end
end
end
context 'when trace checksum is invalid and the log is corrupted' do
let(:params) do
{ output: { checksum: 'crc32:12345678', bytesize: 1 }, state: 'success' }
end
it 'increments invalid and corrupted trace metrics' do
execute_with_stubbed_metrics!
expect(metrics)
.to have_received(:increment_trace_operation)
.with(operation: :invalid)
expect(metrics)
.to have_received(:increment_trace_operation)
.with(operation: :corrupted)
expect(metrics)
.to have_received(:increment_error_counter)
.with(error_reason: :chunks_invalid_checksum)
expect(metrics)
.to have_received(:increment_error_counter)
.with(error_reason: :chunks_invalid_size)
end
end
context 'when trace checksum is invalid but the log seems fine' do
let(:params) do
{ output: { checksum: 'crc32:12345678', bytesize: 4 }, state: 'success' }
end
it 'does not increment corrupted trace metric' do
execute_with_stubbed_metrics!
expect(metrics)
.to have_received(:increment_trace_operation)
.with(operation: :invalid)
expect(metrics)
.to have_received(:increment_error_counter)
.with(error_reason: :chunks_invalid_checksum)
expect(metrics)
.not_to have_received(:increment_trace_operation)
.with(operation: :corrupted)
expect(metrics)
.not_to have_received(:increment_error_counter)
.with(error_reason: :chunks_invalid_size)
end
end
context 'when failed to acquire a build trace lock' do
it 'accepts a state update request' do
build.trace.lock do
result = subject.execute
expect(result.status).to eq 202
end
end
it 'increment locked trace metric' do
build.trace.lock do
execute_with_stubbed_metrics!
expect(metrics)
.to have_received(:increment_trace_operation)
.with(operation: :locked)
end
end
end
end
context 'when build trace has not been migrated yet' do
before do
create(:ci_build_trace_chunk, :redis_with_data, build: build)
end
it 'does not update a build state' do
subject.execute
expect(build).to be_running
end
it 'responds with 202 accepted' do
result = subject.execute
expect(result.status).to eq 202
end
it 'sets a request backoff value' do
result = subject.execute
expect(result.backoff.to_i).to be > 0
end
it 'schedules live chunks for migration' do
expect(Ci::BuildTraceChunkFlushWorker)
.to receive(:perform_async)
.with(build.trace_chunks.first.id)
subject.execute
end
it 'creates a pending state record' do
subject.execute
build.pending_state.then do |status|
expect(status).to be_present
expect(status.state).to eq 'failed'
expect(status.trace_checksum).to eq 'crc32:12345678'
expect(status.failure_reason).to eq 'script_failure'
end
end
it 'increments trace accepted operation metric' do
execute_with_stubbed_metrics!
expect(metrics)
.to have_received(:increment_trace_operation)
.with(operation: :accepted)
end
it 'does not increment invalid trace metric' do
execute_with_stubbed_metrics!
expect(metrics)
.not_to have_received(:increment_trace_operation)
.with(operation: :invalid)
expect(metrics)
.not_to have_received(:increment_error_counter)
.with(error_reason: :chunks_invalid_checksum)
end
context 'when build pending state is outdated' do
before do
build.create_pending_state(
state: 'failed',
trace_checksum: 'crc32:12345678',
failure_reason: 'script_failure',
created_at: 10.minutes.ago
)
end
it 'responds with 200 OK' do
result = subject.execute
expect(result.status).to eq 200
end
it 'updates build state' do
subject.execute
expect(build.reload).to be_failed
expect(build.failure_reason).to eq 'script_failure'
end
it 'increments discarded traces metric' do
execute_with_stubbed_metrics!
expect(metrics)
.to have_received(:increment_trace_operation)
.with(operation: :discarded)
end
it 'does not increment finalized trace metric' do
execute_with_stubbed_metrics!
expect(metrics)
.not_to have_received(:increment_trace_operation)
.with(operation: :finalized)
end
end
context 'when build pending state has changes' do
before do
build.create_pending_state(
state: 'success',
created_at: 10.minutes.ago
)
end
it 'uses stored state and responds with 200 OK' do
result = subject.execute
expect(result.status).to eq 200
end
it 'increments conflict trace metric' do
execute_with_stubbed_metrics!
expect(metrics)
.to have_received(:increment_trace_operation)
.with(operation: :conflict)
end
end
context 'when live traces are disabled' do
before do
stub_feature_flags(ci_enable_live_trace: false)
end
it 'responds with 200 OK' do
result = subject.execute
expect(result.status).to eq 200
end
end
end
end
def execute_with_stubbed_metrics!
described_class
.new(build, params, metrics)
.execute
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class PlayManualStageService < BaseService
def initialize(project, current_user, params)
super
@pipeline = params[:pipeline]
end
def execute(stage)
stage.processables.manual.each do |processable|
next unless processable.playable?
processable.play(current_user)
rescue Gitlab::Access::AccessDeniedError
logger.error(message: 'Unable to play manual action', processable_id: processable.id)
end
end
private
attr_reader :pipeline, :current_user
def logger
Gitlab::AppLogger
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::PlayManualStageService, '#execute', feature_category: :continuous_integration do
let(:current_user) { create(:user) }
let(:pipeline) { create(:ci_pipeline, user: current_user) }
let(:project) { pipeline.project }
let(:downstream_project) { create(:project) }
let(:service) { described_class.new(project, current_user, pipeline: pipeline) }
let(:stage_status) { 'manual' }
let(:stage) do
create(:ci_stage, pipeline: pipeline, project: project, name: 'test')
end
before do
project.add_maintainer(current_user)
downstream_project.add_maintainer(current_user)
create_builds_for_stage(status: stage_status)
create_bridge_for_stage(status: stage_status)
end
context 'when pipeline has manual processables' do
before do
service.execute(stage)
end
it 'starts manual processables from pipeline' do
expect(pipeline.processables.manual.count).to eq(0)
end
it 'updates manual processables' do
pipeline.processables.each do |processable|
expect(processable.user).to eq(current_user)
end
end
end
context 'when pipeline has no manual processables' do
let(:stage_status) { 'failed' }
before do
service.execute(stage)
end
it 'does not update the processables' do
expect(pipeline.processables.failed.count).to eq(4)
end
end
context 'when user does not have permission on a specific processable' do
before do
allow_next_instance_of(Ci::Processable) do |instance|
allow(instance).to receive(:play).and_raise(Gitlab::Access::AccessDeniedError)
end
service.execute(stage)
end
it 'logs the error' do
expect(Gitlab::AppLogger).to receive(:error)
.exactly(stage.processables.manual.count)
service.execute(stage)
end
end
private
def create_builds_for_stage(options)
options.merge!({
when: 'manual',
pipeline: pipeline,
stage_id: stage.id,
user: pipeline.user
})
create_list(:ci_build, 3, options)
end
def create_bridge_for_stage(options)
options.merge!({
when: 'manual',
pipeline: pipeline,
stage_id: stage.id,
user: pipeline.user,
downstream: downstream_project
})
create(:ci_bridge, options)
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class BuildEraseService
include BaseServiceUtility
def initialize(build, current_user)
@build = build
@current_user = current_user
end
def execute
unless build.erasable?
return ServiceResponse.error(message: _('Build cannot be erased'), http_status: :unprocessable_entity)
end
if build.project.refreshing_build_artifacts_size?
Gitlab::ProjectStatsRefreshConflictsLogger.warn_artifact_deletion_during_stats_refresh(
method: 'Ci::BuildEraseService#execute',
project_id: build.project_id
)
end
destroy_artifacts
erase_trace!
update_erased!
ServiceResponse.success(payload: build)
end
private
attr_reader :build, :current_user
def destroy_artifacts
Ci::JobArtifacts::DestroyBatchService.new(build.job_artifacts).execute
end
def erase_trace!
build.trace.erase!
end
def update_erased!
build.update(erased_by: current_user, erased_at: Time.current, artifacts_expire_at: nil)
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::BuildEraseService, feature_category: :continuous_integration do
let_it_be(:user) { user }
let(:build) { create(:ci_build, :artifacts, :trace_artifact, artifacts_expire_at: 100.days.from_now) }
subject(:service) { described_class.new(build, user) }
describe '#execute' do
context 'when build is erasable' do
before do
allow(build).to receive(:erasable?).and_return(true)
end
it 'is successful' do
result = service.execute
expect(result).to be_success
end
it 'erases artifacts' do
service.execute
expect(build.artifacts_file).not_to be_present
expect(build.artifacts_metadata).not_to be_present
end
it 'erases trace' do
service.execute
expect(build.trace).not_to exist
end
it 'records erasure detail' do
freeze_time do
service.execute
expect(build.erased_by).to eq(user)
expect(build.erased_at).to eq(Time.current)
expect(build.artifacts_expire_at).to be_nil
end
end
context 'when project is undergoing statistics refresh' do
before do
allow(build.project).to receive(:refreshing_build_artifacts_size?).and_return(true)
end
it 'logs a warning' do
expect(Gitlab::ProjectStatsRefreshConflictsLogger)
.to receive(:warn_artifact_deletion_during_stats_refresh)
.with(method: 'Ci::BuildEraseService#execute', project_id: build.project_id)
service.execute
end
end
end
context 'when build is not erasable' do
before do
allow(build).to receive(:erasable?).and_return(false)
end
it 'is not successful' do
result = service.execute
expect(result).to be_error
expect(result.http_status).to eq(:unprocessable_entity)
end
it 'does not erase artifacts' do
service.execute
expect(build.artifacts_file).to be_present
expect(build.artifacts_metadata).to be_present
end
it 'does not erase trace' do
service.execute
expect(build.trace).to exist
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class ListConfigVariablesService < ::BaseService
include ReactiveCaching
self.reactive_cache_key = ->(service) { [service.class.name, service.id] }
self.reactive_cache_work_type = :external_dependency
self.reactive_cache_worker_finder = ->(id, *_args) { from_cache(id) }
def self.from_cache(id)
project_id, user_id = id.split('-')
project = Project.find(project_id)
user = User.find(user_id)
new(project, user)
end
def execute(ref)
sha = project.commit(ref).try(:sha)
with_reactive_cache(sha) { |result| result }
end
def calculate_reactive_cache(sha)
config = ::Gitlab::Ci::ProjectConfig.new(project: project, sha: sha)
return {} unless config.exists?
result = Gitlab::Ci::YamlProcessor.new(
config.content,
project: project,
user: current_user,
sha: sha,
verify_project_sha: true
).execute
result.valid? ? result.root_variables_with_prefill_data : {}
end
# Required for ReactiveCaching, it is also used in `reactive_cache_worker_finder`
def id
"#{project.id}-#{current_user.id}"
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::ListConfigVariablesService,
:use_clean_rails_memory_store_caching, feature_category: :secrets_management do
include ReactiveCachingHelpers
let(:ci_config) { {} }
let(:files) { { '.gitlab-ci.yml' => YAML.dump(ci_config) } }
let(:project) { create(:project, :custom_repo, :auto_devops_disabled, files: files) }
let(:user) { project.creator }
let(:ref) { project.default_branch }
let(:sha) { project.commit(ref).sha }
let(:service) { described_class.new(project, user) }
subject(:result) { service.execute(ref) }
context 'when sending a valid ref' do
let(:ci_config) do
{
variables: {
KEY1: { value: 'val 1', description: 'description 1' },
KEY2: { value: 'val 2', description: '' },
KEY3: { value: 'val 3' },
KEY4: 'val 4'
},
test: {
stage: 'test',
script: 'echo'
}
}
end
let(:expected_result) do
{
'KEY1' => { value: 'val 1', description: 'description 1' },
'KEY2' => { value: 'val 2', description: '' },
'KEY3' => { value: 'val 3' },
'KEY4' => { value: 'val 4' }
}
end
before do
synchronous_reactive_cache(service)
end
it 'returns variables list' do
expect(result).to eq(expected_result)
end
context 'when the ref is a sha from a fork' do
include_context 'when a project repository contains a forked commit'
before do
allow_next_instance_of(Gitlab::Ci::ProjectConfig) do |instance|
allow(instance).to receive(:exists?).and_return(true)
allow(instance).to receive(:content).and_return(YAML.dump(ci_config))
end
end
let(:ref) { forked_commit_sha }
context 'when a project ref contains the sha' do
before do
mock_branch_contains_forked_commit_sha
end
it 'returns variables list' do
expect(result).to eq(expected_result)
end
end
context 'when a project ref does not contain the sha' do
it 'returns empty response' do
expect(result).to eq({})
end
end
end
end
context 'when config has includes' do
let(:ci_config) do
{
include: [{ local: 'other_file.yml' }],
variables: {
KEY1: { value: 'val 1', description: 'description 1' }
},
test: {
stage: 'test',
script: 'echo'
}
}
end
let(:other_file) do
{
variables: {
KEY2: { value: 'val 2', description: 'description 2' }
}
}
end
let(:files) { { '.gitlab-ci.yml' => YAML.dump(ci_config), 'other_file.yml' => YAML.dump(other_file) } }
before do
synchronous_reactive_cache(service)
end
it 'returns variable list' do
expect(result['KEY1']).to eq({ value: 'val 1', description: 'description 1' })
expect(result['KEY2']).to eq({ value: 'val 2', description: 'description 2' })
end
end
context 'when project CI config is external' do
let(:other_project_ci_config) do
{
variables: { KEY1: { value: 'val 1', description: 'description 1' } },
test: { script: 'echo' }
}
end
let(:other_project_files) { { '.gitlab-ci.yml' => YAML.dump(other_project_ci_config) } }
let(:other_project) { create(:project, :custom_repo, files: other_project_files) }
before do
project.update!(ci_config_path: ".gitlab-ci.yml@#{other_project.full_path}:master")
synchronous_reactive_cache(service)
end
context 'when the user has access to the external project' do
before do
other_project.add_developer(user)
end
it 'returns variable list' do
expect(result['KEY1']).to eq({ value: 'val 1', description: 'description 1' })
end
end
context 'when the user has no access to the external project' do
it 'returns empty json' do
expect(result).to eq({})
end
end
end
context 'when sending an invalid ref' do
let(:ref) { 'invalid-ref' }
let(:ci_config) { nil }
before do
synchronous_reactive_cache(service)
end
it 'returns empty json' do
expect(result).to eq({})
end
end
context 'when sending an invalid config' do
let(:ci_config) do
{
variables: {
KEY1: { value: 'val 1', description: 'description 1' }
},
test: {
stage: 'invalid',
script: 'echo'
}
}
end
before do
synchronous_reactive_cache(service)
end
it 'returns empty result' do
expect(result).to eq({})
end
end
context 'when reading from cache' do
let(:reactive_cache_params) { [sha] }
let(:return_value) { { 'KEY1' => { value: 'val 1', description: 'description 1' } } }
before do
stub_reactive_cache(service, return_value, reactive_cache_params)
end
it 'returns variable list' do
expect(result).to eq(return_value)
end
end
context 'when the cache is empty' do
let(:reactive_cache_params) { [sha] }
it 'returns nil and enquques the worker to fill cache' do
expect(ExternalServiceReactiveCachingWorker)
.to receive(:perform_async)
.with(service.class, service.id, *reactive_cache_params)
expect(result).to be_nil
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class PlayBridgeService < ::BaseService
def execute(bridge)
check_access!(bridge)
Ci::EnqueueJobService.new(bridge, current_user: current_user).execute
end
private
def check_access!(bridge)
raise Gitlab::Access::AccessDeniedError unless can?(current_user, :play_job, bridge)
end
end
end
Ci::PlayBridgeService.prepend_mod_with('Ci::PlayBridgeService')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::PlayBridgeService, '#execute', feature_category: :continuous_integration do
let(:project) { create(:project) }
let(:user) { create(:user) }
let(:pipeline) { create(:ci_pipeline, project: project) }
let(:downstream_project) { create(:project) }
let(:bridge) { create(:ci_bridge, :playable, pipeline: pipeline, downstream: downstream_project) }
let(:instance) { described_class.new(project, user) }
subject(:execute_service) { instance.execute(bridge) }
context 'when user can run the bridge' do
before do
allow(instance).to receive(:can?).with(user, :play_job, bridge).and_return(true)
end
it 'marks the bridge pending' do
execute_service
expect(bridge.reload).to be_pending
end
it "updates bridge's user" do
execute_service
expect(bridge.reload.user).to eq(user)
end
it 'enqueues Ci::CreateDownstreamPipelineWorker' do
expect(::Ci::CreateDownstreamPipelineWorker).to receive(:perform_async).with(bridge.id)
execute_service
end
context 'when a subsequent job is skipped' do
let!(:job) { create(:ci_build, :skipped, pipeline: pipeline, stage_idx: bridge.stage_idx + 1) }
before do
create(:ci_build_need, build: job, name: bridge.name)
end
it 'marks the subsequent job as processable' do
expect { execute_service }.to change { job.reload.status }.from('skipped').to('created')
end
end
context 'when bridge is not playable' do
let(:bridge) { create(:ci_bridge, :failed, pipeline: pipeline, downstream: downstream_project) }
it 'raises StateMachines::InvalidTransition' do
expect { execute_service }.to raise_error StateMachines::InvalidTransition
end
end
end
context 'when user can not run the bridge' do
before do
allow(instance).to receive(:can?).with(user, :play_job, bridge).and_return(false)
end
it 'allows user with developer role to play a bridge' do
expect { execute_service }.to raise_error Gitlab::Access::AccessDeniedError
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
# This class loops through all builds with exposed artifacts and returns
# basic information about exposed artifacts for given jobs for the frontend
# to display them as custom links in the merge request.
#
# This service must be used with care.
# Looking for exposed artifacts is very slow and should be done asynchronously.
class FindExposedArtifactsService < ::BaseService
include Gitlab::Routing
MAX_EXPOSED_ARTIFACTS = 10
def for_pipeline(pipeline, limit: MAX_EXPOSED_ARTIFACTS)
results = []
pipeline.builds.latest.with_exposed_artifacts.find_each do |job|
if job_exposed_artifacts = for_job(job)
results << job_exposed_artifacts
end
break if results.size >= limit
end
results
end
def for_job(job)
return unless job.has_exposed_artifacts?
metadata_entries = first_2_metadata_entries_for_artifacts_paths(job)
return if metadata_entries.empty?
{
text: job.artifacts_expose_as,
url: path_for_entries(metadata_entries, job),
job_path: project_job_path(job.project, job),
job_name: job.name
}
end
private
# we don't need to fetch all artifacts entries for a job because
# it could contain many. We only need to know whether it has 1 or more
# artifacts, so fetching the first 2 would be sufficient.
def first_2_metadata_entries_for_artifacts_paths(job)
return [] unless job.artifacts_metadata
job.artifacts_paths
.lazy
.map { |path| job.artifacts_metadata_entry(path, recursive: true) }
.select { |entry| entry.exists? }
.first(2)
end
def path_for_entries(entries, job)
return if entries.empty?
if single_artifact?(entries)
file_project_job_artifacts_path(job.project, job, entries.first.path)
else
browse_project_job_artifacts_path(job.project, job)
end
end
def single_artifact?(entries)
entries.size == 1 && entries.first.file?
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::FindExposedArtifactsService, feature_category: :build_artifacts do
include Gitlab::Routing
let(:metadata) do
Gitlab::Ci::Build::Artifacts::Metadata
.new(metadata_file_stream, path, { recursive: true })
end
let(:metadata_file_stream) do
File.open(Rails.root + 'spec/fixtures/ci_build_artifacts_metadata.gz')
end
let_it_be(:project) { create(:project) }
let(:user) { nil }
after do
metadata_file_stream&.close
end
def create_job_with_artifacts(options)
create(:ci_build, pipeline: pipeline, options: options).tap do |job|
create(:ci_job_artifact, :metadata, job: job)
end
end
describe '#for_pipeline' do
shared_examples 'finds a single match' do
it 'returns the artifact with exact location' do
expect(subject).to eq([{
text: 'Exposed artifact',
url: file_project_job_artifacts_path(project, job, 'other_artifacts_0.1.2/doc_sample.txt'),
job_name: job.name,
job_path: project_job_path(project, job)
}])
end
end
shared_examples 'finds multiple matches' do
it 'returns the path to the artifacts browser' do
expect(subject).to eq([{
text: 'Exposed artifact',
url: browse_project_job_artifacts_path(project, job),
job_name: job.name,
job_path: project_job_path(project, job)
}])
end
end
shared_examples 'does not find any matches' do
it 'returns empty array' do
expect(subject).to eq []
end
end
let_it_be(:pipeline) { create(:ci_pipeline, project: project) }
subject { described_class.new(project, user).for_pipeline(pipeline) }
context 'with jobs having no exposed artifacts' do
let!(:job) do
create_job_with_artifacts(artifacts: {
paths: ['other_artifacts_0.1.2/doc_sample.txt', 'something-else.html']
})
end
it_behaves_like 'does not find any matches'
end
context 'with jobs having no artifacts (metadata)' do
let!(:job) do
create(:ci_build, pipeline: pipeline, options: {
artifacts: {
expose_as: 'Exposed artifact',
paths: ['other_artifacts_0.1.2/doc_sample.txt', 'something-else.html']
}
})
end
it_behaves_like 'does not find any matches'
end
context 'with jobs having at most 1 matching exposed artifact' do
let!(:job) do
create_job_with_artifacts(artifacts: {
expose_as: 'Exposed artifact',
paths: ['other_artifacts_0.1.2/doc_sample.txt', 'something-else.html']
})
end
it_behaves_like 'finds a single match'
end
context 'with jobs having more than 1 matching exposed artifacts' do
let!(:job) do
create_job_with_artifacts(artifacts: {
expose_as: 'Exposed artifact',
paths: [
'ci_artifacts.txt',
'other_artifacts_0.1.2/doc_sample.txt',
'something-else.html'
]
})
end
it_behaves_like 'finds multiple matches'
end
context 'with jobs having more than 1 matching exposed artifacts inside a directory' do
let!(:job) do
create_job_with_artifacts(artifacts: {
expose_as: 'Exposed artifact',
paths: ['tests_encoding/']
})
end
it_behaves_like 'finds multiple matches'
end
context 'with jobs having paths with glob expression' do
let!(:job) do
create_job_with_artifacts(artifacts: {
expose_as: 'Exposed artifact',
paths: ['other_artifacts_0.1.2/doc_sample.txt', 'tests_encoding/*.*']
})
end
it_behaves_like 'finds a single match' # because those with * are ignored
end
context 'limiting results' do
let!(:job1) do
create_job_with_artifacts(artifacts: {
expose_as: 'artifact 1',
paths: ['ci_artifacts.txt']
})
end
let!(:job2) do
create_job_with_artifacts(artifacts: {
expose_as: 'artifact 2',
paths: ['tests_encoding/']
})
end
let!(:job3) do
create_job_with_artifacts(artifacts: {
expose_as: 'should not be exposed',
paths: ['other_artifacts_0.1.2/doc_sample.txt']
})
end
subject { described_class.new(project, user).for_pipeline(pipeline, limit: 2) }
it 'returns first 2 results' do
expect(subject).to eq(
[
{
text: 'artifact 1',
url: file_project_job_artifacts_path(project, job1, 'ci_artifacts.txt'),
job_name: job1.name,
job_path: project_job_path(project, job1)
},
{
text: 'artifact 2',
url: browse_project_job_artifacts_path(project, job2),
job_name: job2.name,
job_path: project_job_path(project, job2)
}
])
end
end
context 'cross-project MR' do
let!(:foreign_project) { create(:project) }
let!(:pipeline) { create(:ci_pipeline, project: foreign_project) }
let!(:job_show) do
create_job_with_artifacts({
artifacts: {
expose_as: 'file artifact',
paths: ['ci_artifacts.txt']
}
})
end
let!(:job_browse) do
create_job_with_artifacts({
artifacts: {
expose_as: 'directory artifact',
paths: ['tests_encoding/']
}
})
end
subject { described_class.new(project, user).for_pipeline(pipeline, limit: 2) }
it 'returns the correct path for cross-project MRs' do
expect(subject).to eq(
[
{
text: 'file artifact',
url: file_project_job_artifacts_path(foreign_project, job_show, 'ci_artifacts.txt'),
job_name: job_show.name,
job_path: project_job_path(foreign_project, job_show)
},
{
text: 'directory artifact',
url: browse_project_job_artifacts_path(foreign_project, job_browse),
job_name: job_browse.name,
job_path: project_job_path(foreign_project, job_browse)
}
])
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class UpdatePendingBuildService
VALID_PARAMS = %i[instance_runners_enabled namespace_id namespace_traversal_ids].freeze
InvalidParamsError = Class.new(StandardError)
InvalidModelError = Class.new(StandardError)
def initialize(model, update_params)
@model = model
@update_params = update_params.symbolize_keys
validations!
end
def execute
@model.pending_builds.each_batch do |relation|
relation.update_all(@update_params)
end
end
private
def validations!
validate_model! && validate_params!
end
def validate_model!
raise InvalidModelError unless @model.is_a?(::Project) || @model.is_a?(::Group)
true
end
def validate_params!
extra_params = @update_params.keys - VALID_PARAMS
raise InvalidParamsError, "Unvalid params: #{extra_params.join(', ')}" unless extra_params.empty?
true
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::UpdatePendingBuildService, feature_category: :continuous_integration do
let_it_be(:group) { create(:group) }
let_it_be(:project) { create(:project, namespace: group) }
let_it_be_with_reload(:pending_build_1) { create(:ci_pending_build, project: project, instance_runners_enabled: false) }
let_it_be_with_reload(:pending_build_2) { create(:ci_pending_build, project: project, instance_runners_enabled: true) }
let_it_be(:update_params) { { instance_runners_enabled: true } }
let(:service) { described_class.new(model, update_params) }
describe '#execute' do
subject(:update_pending_builds) { service.execute }
context 'validations' do
context 'when model is invalid' do
let(:model) { pending_build_1 }
it 'raises an error' do
expect { update_pending_builds }.to raise_error(described_class::InvalidModelError)
end
end
context 'when params is invalid' do
let(:model) { group }
let(:update_params) { { minutes_exceeded: true } }
it 'raises an error' do
expect { update_pending_builds }.to raise_error(described_class::InvalidParamsError)
end
end
end
context 'when model is a group with pending builds' do
let(:model) { group }
it 'updates all pending builds', :aggregate_failures do
update_pending_builds
expect(pending_build_1.instance_runners_enabled).to be_truthy
expect(pending_build_2.instance_runners_enabled).to be_truthy
end
end
context 'when model is a project with pending builds' do
let(:model) { project }
it 'updates all pending builds', :aggregate_failures do
update_pending_builds
expect(pending_build_1.instance_runners_enabled).to be_truthy
expect(pending_build_2.instance_runners_enabled).to be_truthy
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class CreatePipelineService < BaseService
attr_reader :pipeline, :logger
LOG_MAX_DURATION_THRESHOLD = 3.seconds
LOG_MAX_PIPELINE_SIZE = 2_000
LOG_MAX_CREATION_THRESHOLD = 20.seconds
SEQUENCE = [Gitlab::Ci::Pipeline::Chain::Build,
Gitlab::Ci::Pipeline::Chain::Build::Associations,
Gitlab::Ci::Pipeline::Chain::Validate::Abilities,
Gitlab::Ci::Pipeline::Chain::Validate::Repository,
Gitlab::Ci::Pipeline::Chain::Limit::RateLimit,
Gitlab::Ci::Pipeline::Chain::Validate::SecurityOrchestrationPolicy,
Gitlab::Ci::Pipeline::Chain::Skip,
Gitlab::Ci::Pipeline::Chain::Config::Content,
Gitlab::Ci::Pipeline::Chain::Config::Process,
Gitlab::Ci::Pipeline::Chain::Validate::AfterConfig,
Gitlab::Ci::Pipeline::Chain::RemoveUnwantedChatJobs,
Gitlab::Ci::Pipeline::Chain::SeedBlock,
Gitlab::Ci::Pipeline::Chain::EvaluateWorkflowRules,
Gitlab::Ci::Pipeline::Chain::AssignPartition,
Gitlab::Ci::Pipeline::Chain::Seed,
Gitlab::Ci::Pipeline::Chain::Limit::Size,
Gitlab::Ci::Pipeline::Chain::Limit::ActiveJobs,
Gitlab::Ci::Pipeline::Chain::Limit::Deployments,
Gitlab::Ci::Pipeline::Chain::Validate::External,
Gitlab::Ci::Pipeline::Chain::Populate,
Gitlab::Ci::Pipeline::Chain::PopulateMetadata,
Gitlab::Ci::Pipeline::Chain::StopDryRun,
Gitlab::Ci::Pipeline::Chain::EnsureEnvironments,
Gitlab::Ci::Pipeline::Chain::EnsureResourceGroups,
Gitlab::Ci::Pipeline::Chain::Create,
Gitlab::Ci::Pipeline::Chain::CreateCrossDatabaseAssociations,
Gitlab::Ci::Pipeline::Chain::CancelPendingPipelines,
Gitlab::Ci::Pipeline::Chain::Metrics,
Gitlab::Ci::Pipeline::Chain::TemplateUsage,
Gitlab::Ci::Pipeline::Chain::Pipeline::Process].freeze
# Create a new pipeline in the specified project.
#
# @param [Symbol] source What event (Ci::Pipeline.sources) triggers the pipeline
# creation.
# @param [Boolean] ignore_skip_ci Whether skipping a pipeline creation when `[skip ci]` comment
# is present in the commit body
# @param [Boolean] save_on_errors Whether persisting an invalid pipeline when it encounters an
# error during creation (e.g. invalid yaml)
# @param [Ci::TriggerRequest] trigger_request The pipeline trigger triggers the pipeline creation.
# @param [Ci::PipelineSchedule] schedule The pipeline schedule triggers the pipeline creation.
# @param [MergeRequest] merge_request The merge request triggers the pipeline creation.
# @param [Ci::ExternalPullRequest] external_pull_request The external pull request triggers the pipeline creation.
# @param [Ci::Bridge] bridge The bridge job that triggers the downstream pipeline creation.
# @param [String] content The content of .gitlab-ci.yml to override the default config
# contents (e.g. .gitlab-ci.yml in repostiry). Mainly used for
# generating a dangling pipeline.
#
# @return [Ci::Pipeline] The created Ci::Pipeline object.
# rubocop: disable Metrics/ParameterLists
def execute(source, ignore_skip_ci: false, save_on_errors: true, trigger_request: nil, schedule: nil, merge_request: nil, external_pull_request: nil, bridge: nil, **options, &block)
@logger = build_logger
@pipeline = Ci::Pipeline.new
command = Gitlab::Ci::Pipeline::Chain::Command.new(
source: source,
origin_ref: params[:ref],
checkout_sha: params[:checkout_sha],
after_sha: params[:after],
before_sha: params[:before], # The base SHA of the source branch (i.e merge_request.diff_base_sha).
source_sha: params[:source_sha], # The HEAD SHA of the source branch (i.e merge_request.diff_head_sha).
target_sha: params[:target_sha], # The HEAD SHA of the target branch.
trigger_request: trigger_request,
schedule: schedule,
merge_request: merge_request,
external_pull_request: external_pull_request,
ignore_skip_ci: ignore_skip_ci,
save_incompleted: save_on_errors,
seeds_block: block,
variables_attributes: params[:variables_attributes],
project: project,
current_user: current_user,
push_options: params[:push_options] || {},
chat_data: params[:chat_data],
bridge: bridge,
logger: @logger,
**extra_options(**options))
# Ensure we never persist the pipeline when dry_run: true
@pipeline.readonly! if command.dry_run?
Gitlab::Ci::Pipeline::Chain::Sequence
.new(pipeline, command, SEQUENCE)
.build!
if pipeline.persisted?
Gitlab::EventStore.publish(
Ci::PipelineCreatedEvent.new(data: { pipeline_id: pipeline.id })
)
create_namespace_onboarding_action
else
# If pipeline is not persisted, try to recover IID
pipeline.reset_project_iid
end
if error_message = pipeline.full_error_messages.presence || pipeline.failure_reason.presence
ServiceResponse.error(message: error_message, payload: pipeline)
else
ServiceResponse.success(payload: pipeline)
end
ensure
@logger.commit(pipeline: pipeline, caller: self.class.name)
end
# rubocop: enable Metrics/ParameterLists
private
def commit
@commit ||= project.commit(origin_sha || origin_ref)
end
def sha
commit.try(:id)
end
def create_namespace_onboarding_action
Onboarding::PipelineCreatedWorker.perform_async(project.namespace_id)
end
def extra_options(content: nil, dry_run: false)
{ content: content, dry_run: dry_run }
end
def build_logger
Gitlab::Ci::Pipeline::Logger.new(project: project) do |l|
l.log_when do |observations|
observations.any? do |name, observation|
name.to_s.end_with?('duration_s') &&
Array(observation).max >= LOG_MAX_DURATION_THRESHOLD
end
end
l.log_when do |observations|
count = observations['pipeline_size_count']
next false unless count
count >= LOG_MAX_PIPELINE_SIZE
end
l.log_when do |observations|
duration = observations['pipeline_creation_duration_s']
next false unless duration
duration >= LOG_MAX_CREATION_THRESHOLD
end
end
end
end
end
Ci::CreatePipelineService.prepend_mod_with('Ci::CreatePipelineService')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::CreatePipelineService, :yaml_processor_feature_flag_corectness, :clean_gitlab_redis_cache, feature_category: :continuous_integration do
include ProjectForksHelper
let_it_be_with_refind(:project) { create(:project, :repository) }
let_it_be_with_reload(:user) { project.first_owner }
let(:ref_name) { 'refs/heads/master' }
before do
stub_ci_pipeline_to_return_yaml_file
end
describe '#execute' do
# rubocop:disable Metrics/ParameterLists
def execute_service(
source: :push,
before: '00000000',
after: project.commit.id,
ref: ref_name,
trigger_request: nil,
variables_attributes: nil,
merge_request: nil,
external_pull_request: nil,
push_options: nil,
source_sha: nil,
target_sha: nil,
save_on_errors: true)
params = { ref: ref,
before: before,
after: after,
variables_attributes: variables_attributes,
push_options: push_options,
source_sha: source_sha,
target_sha: target_sha }
described_class.new(project, user, params).execute(source,
save_on_errors: save_on_errors,
trigger_request: trigger_request,
merge_request: merge_request,
external_pull_request: external_pull_request) do |pipeline|
yield(pipeline) if block_given?
end
end
# rubocop:enable Metrics/ParameterLists
context 'performance' do
it_behaves_like 'pipelines are created without N+1 SQL queries' do
let(:config1) do
<<~YAML
job1:
stage: build
script: exit 0
job2:
stage: test
script: exit 0
YAML
end
let(:config2) do
<<~YAML
job1:
stage: build
script: exit 0
job2:
stage: test
script: exit 0
job3:
stage: deploy
script: exit 0
YAML
end
let(:accepted_n_plus_ones) do
1 + # SELECT "ci_instance_variables"
1 + # INSERT INTO "ci_stages"
1 + # SELECT "ci_builds".* FROM "ci_builds"
1 + # INSERT INTO "ci_builds"
1 + # INSERT INTO "ci_builds_metadata"
1 # SELECT "taggings".* FROM "taggings"
end
end
end
context 'valid params' do
let(:pipeline) { execute_service.payload }
let(:pipeline_on_previous_commit) do
execute_service(
after: previous_commit_sha_from_ref('master')
).payload
end
it 'responds with success' do
expect(execute_service).to be_success
end
it 'creates a pipeline' do
expect(pipeline).to be_kind_of(Ci::Pipeline)
expect(pipeline).to be_valid
expect(pipeline).to be_persisted
expect(pipeline).to be_push
expect(pipeline).to eq(project.ci_pipelines.last)
expect(pipeline).to have_attributes(user: user)
expect(pipeline).to have_attributes(status: 'created')
expect(pipeline.iid).not_to be_nil
expect(pipeline.repository_source?).to be true
expect(pipeline.builds.first).to be_kind_of(Ci::Build)
expect(pipeline.yaml_errors).not_to be_present
end
it 'increments the prometheus counter' do
counter = spy('pipeline created counter')
allow(Gitlab::Ci::Pipeline::Metrics)
.to receive(:pipelines_created_counter).and_return(counter)
pipeline
expect(counter).to have_received(:increment)
end
it 'records pipeline size in a prometheus histogram' do
histogram = spy('pipeline size histogram')
allow(Gitlab::Ci::Pipeline::Metrics)
.to receive(:pipeline_size_histogram).and_return(histogram)
execute_service
expect(histogram).to have_received(:observe)
.with({ source: 'push', plan: project.actual_plan_name }, 5)
end
it 'tracks included template usage' do
expect_next_instance_of(Gitlab::Ci::Pipeline::Chain::TemplateUsage) do |instance|
expect(instance).to receive(:perform!)
end
execute_service
end
context 'when merge requests already exist for this source branch' do
let!(:merge_request_1) do
create(:merge_request, source_branch: 'feature', target_branch: "master", source_project: project)
end
let!(:merge_request_2) do
create(:merge_request, source_branch: 'feature', target_branch: "v1.1.0", source_project: project)
end
context 'when the head pipeline sha equals merge request sha' do
it 'updates head pipeline of each merge request', :sidekiq_might_not_need_inline do
head_pipeline = execute_service(ref: 'feature', after: nil).payload
expect(merge_request_1.reload.head_pipeline).to eq(head_pipeline)
expect(merge_request_2.reload.head_pipeline).to eq(head_pipeline)
end
end
end
context 'auto-cancel enabled' do
before do
project.update!(auto_cancel_pending_pipelines: 'enabled')
end
it 'does not cancel HEAD pipeline' do
pipeline
pipeline_on_previous_commit
expect(pipeline.reload).to have_attributes(status: 'created', auto_canceled_by_id: nil)
end
it 'auto cancel pending non-HEAD pipelines', :sidekiq_might_not_need_inline do
pipeline_on_previous_commit
pipeline
expect(pipeline_on_previous_commit.reload).to have_attributes(status: 'canceled', auto_canceled_by_id: pipeline.id)
end
it 'cancels running outdated pipelines', :sidekiq_inline do
pipeline_on_previous_commit.reload.run
head_pipeline = execute_service.payload
expect(pipeline_on_previous_commit.reload).to have_attributes(status: 'canceled', auto_canceled_by_id: head_pipeline.id)
end
it 'cancel created outdated pipelines', :sidekiq_might_not_need_inline do
pipeline_on_previous_commit.update!(status: 'created')
pipeline
expect(pipeline_on_previous_commit.reload).to have_attributes(status: 'canceled', auto_canceled_by_id: pipeline.id)
end
it 'does not cancel pipelines from the other branches' do
new_pipeline = execute_service(
ref: 'refs/heads/feature',
after: previous_commit_sha_from_ref('feature')
).payload
pipeline
expect(new_pipeline.reload).to have_attributes(status: 'created', auto_canceled_by_id: nil)
end
context 'when the interruptible attribute is' do
context 'not defined' do
before do
config = YAML.dump(rspec: { script: 'echo' })
stub_ci_pipeline_yaml_file(config)
end
it 'is cancelable' do
pipeline = execute_service.payload
expect(pipeline.builds.find_by(name: 'rspec').interruptible).to be_nil
end
end
context 'set to true' do
before do
config = YAML.dump(rspec: { script: 'echo', interruptible: true })
stub_ci_pipeline_yaml_file(config)
end
it 'is cancelable' do
pipeline = execute_service.payload
expect(pipeline.builds.find_by(name: 'rspec').interruptible).to be_truthy
end
end
context 'set to false' do
before do
config = YAML.dump(rspec: { script: 'echo', interruptible: false })
stub_ci_pipeline_yaml_file(config)
end
it 'is not cancelable' do
pipeline = execute_service.payload
expect(pipeline.builds.find_by(name: 'rspec').interruptible).to be_falsy
end
end
end
context 'interruptible builds' do
before do
stub_ci_pipeline_yaml_file(YAML.dump(config))
end
let(:config) do
{
stages: %w[stage1 stage2 stage3 stage4],
build_1_1: {
stage: 'stage1',
script: 'echo',
interruptible: true
},
build_1_2: {
stage: 'stage1',
script: 'echo',
interruptible: true
},
build_2_1: {
stage: 'stage2',
script: 'echo',
when: 'delayed',
start_in: '10 minutes',
interruptible: true
},
build_3_1: {
stage: 'stage3',
script: 'echo',
interruptible: false
},
build_4_1: {
stage: 'stage4',
script: 'echo'
}
}
end
it 'properly configures interruptible status' do
interruptible_status =
pipeline_on_previous_commit
.builds
.joins(:metadata)
.pluck(:name, "#{Ci::BuildMetadata.quoted_table_name}.interruptible")
expect(interruptible_status).to contain_exactly(
['build_1_1', true],
['build_1_2', true],
['build_2_1', true],
['build_3_1', false],
['build_4_1', nil]
)
end
context 'when only interruptible builds are running' do
context 'when build marked explicitly by interruptible is running' do
it 'cancels running outdated pipelines', :sidekiq_might_not_need_inline do
pipeline_on_previous_commit
.builds
.find_by_name('build_1_2')
.run!
pipeline
expect(pipeline_on_previous_commit.reload).to have_attributes(
status: 'canceled', auto_canceled_by_id: pipeline.id)
end
end
context 'when build that is not marked as interruptible is running' do
it 'cancels running outdated pipelines', :sidekiq_might_not_need_inline do
build_2_1 = pipeline_on_previous_commit
.builds.find_by_name('build_2_1')
build_2_1.enqueue!
build_2_1.reset.run!
pipeline
expect(pipeline_on_previous_commit.reload).to have_attributes(
status: 'canceled', auto_canceled_by_id: pipeline.id)
end
end
end
context 'when an uninterruptible build is running' do
it 'does not cancel running outdated pipelines', :sidekiq_inline do
build_3_1 = pipeline_on_previous_commit
.builds.find_by_name('build_3_1')
build_3_1.enqueue!
build_3_1.reset.run!
pipeline
expect(pipeline_on_previous_commit.reload).to have_attributes(
status: 'running', auto_canceled_by_id: nil)
end
end
context 'when an build is waiting on an interruptible scheduled task' do
it 'cancels running outdated pipelines', :sidekiq_might_not_need_inline do
allow(Ci::BuildScheduleWorker).to receive(:perform_at)
pipeline_on_previous_commit
.builds
.find_by_name('build_2_1')
.schedule!
pipeline
expect(pipeline_on_previous_commit.reload).to have_attributes(
status: 'canceled', auto_canceled_by_id: pipeline.id)
end
end
context 'when a uninterruptible build has finished' do
it 'does not cancel running outdated pipelines', :sidekiq_might_not_need_inline do
pipeline_on_previous_commit
.builds
.find_by_name('build_3_1')
.success!
pipeline
expect(pipeline_on_previous_commit.reload).to have_attributes(
status: 'running', auto_canceled_by_id: nil)
end
end
end
end
context 'auto-cancel disabled' do
before do
project.update!(auto_cancel_pending_pipelines: 'disabled')
end
it 'does not auto cancel created non-HEAD pipelines' do
pipeline_on_previous_commit
pipeline
expect(pipeline_on_previous_commit.reload)
.to have_attributes(status: 'created', auto_canceled_by_id: nil)
end
end
def previous_commit_sha_from_ref(ref)
project.commit(ref).parent.sha
end
end
context "skip tag if there is no build for it" do
it "creates commit if there is appropriate job" do
expect(execute_service.payload).to be_persisted
end
it "creates commit if there is no appropriate job but deploy job has right ref setting" do
config = YAML.dump({ deploy: { script: "ls", only: ["master"] } })
stub_ci_pipeline_yaml_file(config)
expect(execute_service.payload).to be_persisted
end
end
it 'skips creating pipeline for refs without .gitlab-ci.yml', :aggregate_failures do
stub_ci_pipeline_yaml_file(nil)
response = execute_service
expect(response).to be_error
expect(response.message).to eq('Missing CI config file')
expect(response.payload).not_to be_persisted
expect(Ci::Pipeline.count).to eq(0)
expect(Onboarding::PipelineCreatedWorker).not_to receive(:perform_async)
end
shared_examples 'a failed pipeline' do
it 'creates failed pipeline' do
stub_ci_pipeline_yaml_file(ci_yaml)
pipeline = execute_service.payload
expect(pipeline).to be_persisted
expect(pipeline.builds.any?).to be false
expect(pipeline.status).to eq('failed')
expect(pipeline.yaml_errors).not_to be_nil
end
end
context 'config evaluation' do
context 'when config is in a file in repository' do
before do
content = YAML.dump(rspec: { script: 'echo' })
stub_ci_pipeline_yaml_file(content)
end
it 'pull it from the repository' do
pipeline = execute_service.payload
expect(pipeline).to be_repository_source
expect(pipeline.builds.map(&:name)).to eq ['rspec']
end
end
context 'when config is from Auto-DevOps' do
before do
stub_ci_pipeline_yaml_file(nil)
allow_any_instance_of(Project).to receive(:auto_devops_enabled?).and_return(true)
create(:project_auto_devops, project: project)
end
it 'pull it from Auto-DevOps' do
pipeline = execute_service.payload
expect(pipeline).to be_auto_devops_source
expect(pipeline.builds.map(&:name)).to match_array(%w[brakeman-sast build code_quality container_scanning secret_detection semgrep-sast test])
end
end
context 'when config is not found' do
before do
stub_ci_pipeline_yaml_file(nil)
end
it 'responds with error message', :aggregate_failures do
response = execute_service
expect(response).to be_error
expect(response.message).to eq('Missing CI config file')
expect(response.payload).not_to be_persisted
end
end
context 'when an unexpected error is raised' do
before do
expect(Gitlab::Ci::YamlProcessor).to receive(:new)
.and_raise(RuntimeError, 'undefined failure')
end
it 'saves error in pipeline' do
pipeline = execute_service.payload
expect(pipeline.yaml_errors).to include('Undefined error')
end
it 'logs error' do
expect(Gitlab::ErrorTracking).to receive(:track_exception).and_call_original
execute_service
end
end
end
context 'when yaml is invalid' do
let(:ci_yaml) { 'invalid: file: fiile' }
let(:message) { 'Message' }
it_behaves_like 'a failed pipeline'
it 'increments the error metric' do
stub_ci_pipeline_yaml_file(ci_yaml)
counter = Gitlab::Metrics.counter(:gitlab_ci_pipeline_failure_reasons, 'desc')
expect { execute_service }.to change { counter.get(reason: 'config_error') }.by(1)
end
context 'when receive git commit' do
before do
allow_any_instance_of(Ci::Pipeline).to receive(:git_commit_message) { message }
end
it_behaves_like 'a failed pipeline'
end
context 'when config has ports' do
context 'in the main image' do
let(:ci_yaml) do
<<-EOS
image:
name: image:1.0
ports:
- 80
EOS
end
it_behaves_like 'a failed pipeline'
end
context 'in the job image' do
let(:ci_yaml) do
<<-EOS
image: image:1.0
test:
script: rspec
image:
name: image:1.0
ports:
- 80
EOS
end
it_behaves_like 'a failed pipeline'
end
context 'in the service' do
let(:ci_yaml) do
<<-EOS
image: image:1.0
test:
script: rspec
image: image:1.0
services:
- name: test
ports:
- 80
EOS
end
it_behaves_like 'a failed pipeline'
end
end
end
context 'when an unexpected error is raised' do
before do
expect(Gitlab::Ci::YamlProcessor).to receive(:new)
.and_raise(RuntimeError, 'undefined failure')
end
it 'saves error in pipeline' do
pipeline = execute_service.payload
expect(pipeline.yaml_errors).to include('Undefined error')
end
it 'logs error' do
expect(Gitlab::ErrorTracking).to receive(:track_exception).and_call_original
execute_service
end
end
context 'when commit contains a [ci skip] directive' do
shared_examples 'creating a pipeline' do
it 'does not skip pipeline creation' do
pipeline = execute_service.payload
expect(pipeline).to be_persisted
expect(pipeline.builds.first.name).to eq("rspec")
end
end
shared_examples 'skipping a pipeline' do
it 'skips pipeline creation' do
pipeline = execute_service.payload
expect(pipeline).to be_persisted
expect(pipeline.builds.any?).to be false
expect(pipeline.status).to eq("skipped")
end
end
before do
allow_any_instance_of(Ci::Pipeline).to receive(:git_commit_message) { commit_message }
end
skip_commit_messages = [
"some message[ci skip]",
"some message[skip ci]",
"some message[CI SKIP]",
"some message[SKIP CI]",
"some message[ci_skip]",
"some message[skip_ci]",
"some message[ci-skip]",
"some message[skip-ci]"
]
skip_commit_messages.each do |skip_commit_message|
context "when the commit message is #{skip_commit_message}" do
let(:commit_message) { skip_commit_message }
it_behaves_like 'skipping a pipeline'
end
end
context 'when commit message does not contain [ci skip] nor [skip ci]' do
let(:commit_message) { 'some message' }
it_behaves_like 'creating a pipeline'
end
context 'when commit message is nil' do
let(:commit_message) { nil }
it_behaves_like 'creating a pipeline'
end
context 'when there is [ci skip] tag in commit message and yaml is invalid' do
let(:commit_message) { 'some message [ci skip]' }
let(:ci_yaml) { 'invalid: file: fiile' }
before do
stub_ci_pipeline_yaml_file(ci_yaml)
end
it_behaves_like 'skipping a pipeline'
end
end
context 'when push options contain ci.skip' do
let(:push_options) do
{ 'ci' => { 'skip' => true } }
end
it 'creates a pipline in the skipped state' do
pipeline = execute_service(push_options: push_options).payload
# TODO: DRY these up with "skips builds creation if the commit message"
expect(pipeline).to be_persisted
expect(pipeline.builds.any?).to be false
expect(pipeline.status).to eq("skipped")
end
end
context 'when there are no jobs for this pipeline' do
before do
config = YAML.dump({ test: { script: 'ls', only: ['feature'] } })
stub_ci_pipeline_yaml_file(config)
end
it 'does not create a new pipeline', :aggregate_failures do
result = execute_service
expect(result).to be_error
expect(result.message).to eq('Pipeline will not run for the selected trigger. ' \
'The rules configuration prevented any jobs from being added to the pipeline.')
expect(result.payload).not_to be_persisted
expect(Ci::Build.all).to be_empty
expect(Ci::Pipeline.count).to eq(0)
end
describe '#iid' do
let(:internal_id) do
InternalId.find_by(project_id: project.id, usage: :ci_pipelines)
end
before do
expect_any_instance_of(Ci::Pipeline).to receive(:ensure_project_iid!)
.and_call_original
end
it 'rewinds iid', :aggregate_failures do
result = execute_service
expect(result).to be_error
expect(result.payload).not_to be_persisted
expect(internal_id.last_value).to eq(0)
end
end
end
context 'when the configuration includes ID tokens' do
it 'creates variables for the ID tokens' do
config = YAML.dump({
job_with_id_tokens: {
script: 'ls',
id_tokens: {
'TEST_ID_TOKEN' => {
aud: 'https://gitlab.com'
}
}
}
})
stub_ci_pipeline_yaml_file(config)
result = execute_service.payload
expect(result).to be_persisted
expect(result.builds.first.id_tokens).to eq({
'TEST_ID_TOKEN' => { 'aud' => 'https://gitlab.com' }
})
end
end
context 'with manual actions' do
before do
config = YAML.dump({ deploy: { script: 'ls', when: 'manual' } })
stub_ci_pipeline_yaml_file(config)
end
it 'does not create a new pipeline', :sidekiq_inline do
result = execute_service.payload
expect(result).to be_persisted
expect(result.manual_actions).not_to be_empty
end
end
context 'when builds with auto-retries are configured' do
let(:pipeline) { execute_service.payload }
let(:rspec_job) { pipeline.builds.find_by(name: 'rspec') }
before do
stub_ci_pipeline_yaml_file(YAML.dump({
rspec: { script: 'rspec', retry: retry_value }
}))
rspec_job.update!(options: { retry: retry_value })
end
context 'as an integer' do
let(:retry_value) { 2 }
it 'correctly creates builds with auto-retry value configured' do
expect(pipeline).to be_persisted
end
end
context 'as hash' do
let(:retry_value) { { max: 2, when: 'runner_system_failure' } }
it 'correctly creates builds with auto-retry value configured' do
expect(pipeline).to be_persisted
end
end
end
context 'with resource group' do
context 'when resource group is defined' do
before do
config = YAML.dump(
test: { stage: 'test', script: 'ls', resource_group: resource_group_key }
)
stub_ci_pipeline_yaml_file(config)
end
let(:resource_group_key) { 'iOS' }
it 'persists the association correctly' do
result = execute_service.payload
deploy_job = result.builds.find_by_name!(:test)
resource_group = project.resource_groups.find_by_key!(resource_group_key)
expect(result).to be_persisted
expect(deploy_job.resource_group.key).to eq(resource_group_key)
expect(project.resource_groups.count).to eq(1)
expect(resource_group.processables.count).to eq(1)
expect(resource_group.resources.count).to eq(1)
expect(resource_group.resources.first.processable).to eq(nil)
end
context 'when resource group key includes predefined variables' do
let(:resource_group_key) { '$CI_COMMIT_REF_NAME-$CI_JOB_NAME' }
it 'interpolates the variables into the key correctly' do
result = execute_service.payload
expect(result).to be_persisted
expect(project.resource_groups.exists?(key: 'master-test')).to eq(true)
end
end
end
end
context 'when resource group is defined for review app deployment' do
before do
config = YAML.dump(
review_app: {
stage: 'test',
script: 'deploy',
environment: {
name: 'review/$CI_COMMIT_REF_SLUG',
on_stop: 'stop_review_app'
},
resource_group: '$CI_ENVIRONMENT_NAME'
},
stop_review_app: {
stage: 'test',
script: 'stop',
when: 'manual',
environment: {
name: 'review/$CI_COMMIT_REF_SLUG',
action: 'stop'
},
resource_group: '$CI_ENVIRONMENT_NAME'
}
)
stub_ci_pipeline_yaml_file(config)
end
it 'persists the association correctly' do
result = execute_service.payload
deploy_job = result.builds.find_by_name!(:review_app)
stop_job = result.builds.find_by_name!(:stop_review_app)
expect(result).to be_persisted
expect(deploy_job.resource_group.key).to eq('review/master')
expect(stop_job.resource_group.key).to eq('review/master')
expect(project.resource_groups.count).to eq(1)
end
it 'initializes scoped variables only once for each build' do
# Bypassing `stub_build` hack because it distrubs the expectations below.
allow_next_instances_of(Gitlab::Ci::Build::Context::Build, 2) do |build_context|
allow(build_context).to receive(:variables) { Gitlab::Ci::Variables::Collection.new }
end
expect_next_instances_of(::Ci::Build, 2) do |ci_build|
expect(ci_build).to receive(:scoped_variables).once.and_call_original
end
expect(execute_service.payload).to be_created_successfully
end
end
context 'with timeout' do
context 'when builds with custom timeouts are configured' do
before do
config = YAML.dump(rspec: { script: 'rspec', timeout: '2m 3s' })
stub_ci_pipeline_yaml_file(config)
end
it 'correctly creates builds with custom timeout value configured' do
pipeline = execute_service.payload
expect(pipeline).to be_persisted
expect(pipeline.builds.find_by(name: 'rspec').options[:job_timeout]).to eq 123
end
end
end
context 'with release' do
shared_examples_for 'a successful release pipeline' do
before do
stub_ci_pipeline_yaml_file(YAML.dump(config))
end
it 'is valid config' do
pipeline = execute_service.payload
build = pipeline.builds.first
expect(pipeline).to be_kind_of(Ci::Pipeline)
expect(pipeline).to be_valid
expect(pipeline.yaml_errors).not_to be_present
expect(pipeline).to be_persisted
expect(build).to be_kind_of(Ci::Build)
expect(build.options).to eq(config[:release].except(:stage, :only))
expect(build).to be_persisted
end
end
context 'simple example' do
it_behaves_like 'a successful release pipeline' do
let(:config) do
{
release: {
script: ["make changelog | tee release_changelog.txt"],
release: {
tag_name: "v0.06",
description: "./release_changelog.txt"
}
}
}
end
end
end
context 'example with all release metadata' do
it_behaves_like 'a successful release pipeline' do
let(:config) do
{
release: {
script: ["make changelog | tee release_changelog.txt"],
release: {
name: "Release $CI_TAG_NAME",
tag_name: "v0.06",
description: "./release_changelog.txt",
assets: {
links: [
{
name: "cool-app.zip",
url: "http://my.awesome.download.site/1.0-$CI_COMMIT_SHORT_SHA.zip"
},
{
url: "http://my.awesome.download.site/1.0-$CI_COMMIT_SHORT_SHA.exe"
}
]
}
}
}
}
end
end
end
end
shared_examples 'when ref is protected' do
let(:user) { create(:user) }
context 'when user is developer' do
before do
project.add_developer(user)
end
it 'does not create a pipeline', :aggregate_failures do
expect(execute_service.payload).not_to be_persisted
expect(Ci::Pipeline.count).to eq(0)
end
end
context 'when user is maintainer' do
let(:pipeline) { execute_service.payload }
before do
project.add_maintainer(user)
end
it 'creates a protected pipeline' do
expect(pipeline).to be_persisted
expect(pipeline).to be_protected
expect(Ci::Pipeline.count).to eq(1)
end
end
context 'when trigger belongs to no one' do
let(:user) {}
let(:trigger_request) { create(:ci_trigger_request) }
it 'does not create a pipeline', :aggregate_failures do
response = execute_service(trigger_request: trigger_request)
expect(response).to be_error
expect(response.payload).not_to be_persisted
expect(Ci::Pipeline.count).to eq(0)
end
end
context 'when trigger belongs to a developer' do
let(:user) { create(:user) }
let(:trigger) { create(:ci_trigger, owner: user) }
let(:trigger_request) { create(:ci_trigger_request, trigger: trigger) }
before do
project.add_developer(user)
end
it 'does not create a pipeline', :aggregate_failures do
response = execute_service(trigger_request: trigger_request)
expect(response).to be_error
expect(response.payload).not_to be_persisted
expect(Ci::Pipeline.count).to eq(0)
end
end
context 'when trigger belongs to a maintainer' do
let(:user) { create(:user) }
let(:trigger) { create(:ci_trigger, owner: user) }
let(:trigger_request) { create(:ci_trigger_request, trigger: trigger) }
before do
project.add_maintainer(user)
end
it 'creates a pipeline' do
expect(execute_service(trigger_request: trigger_request).payload)
.to be_persisted
expect(Ci::Pipeline.count).to eq(1)
end
end
end
context 'when ref is a protected branch' do
before do
create(:protected_branch, project: project, name: 'master')
end
it_behaves_like 'when ref is protected'
end
context 'when ref is a protected tag' do
let(:ref_name) { 'refs/tags/v1.0.0' }
before do
create(:protected_tag, project: project, name: '*')
end
it_behaves_like 'when ref is protected'
end
context 'when pipeline is running for a tag' do
before do
config = YAML.dump(
test: { script: 'test', only: ['branches'] },
deploy: { script: 'deploy', only: ['tags'] }
)
stub_ci_pipeline_yaml_file(config)
end
it 'creates a tagged pipeline' do
pipeline = execute_service(ref: 'v1.0.0').payload
expect(pipeline.tag?).to be true
end
end
context 'when pipeline is running for a nonexistant-branch' do
let(:gitlab_ci_yaml) { YAML.dump(test: { script: 'test' }) }
let(:ref_name) { 'refs/heads/nonexistant-branch' }
let(:pipeline) { execute_service.payload }
it 'does not create the pipeline' do
expect(pipeline).not_to be_created_successfully
expect(pipeline.errors[:base]).to eq(['Reference not found'])
end
context 'when there is a tag with that nonexistant-branch' do
# v1.0.0 is on the test repo as a tag
let(:ref_name) { 'refs/heads/v1.0.0' }
it 'does not create the pipeline' do
expect(pipeline).not_to be_created_successfully
expect(pipeline.errors[:base]).to eq(['Reference not found'])
end
end
end
context 'when pipeline is running for a branch with the name of both a branch and a tag' do
let(:gitlab_ci_yaml) { YAML.dump(test: { script: 'test' }) }
# v1.1.0 is on the test repo as branch and tag
let(:ref_name) { 'refs/heads/v1.1.0' }
let(:pipeline) { execute_service.payload }
it 'creates the pipeline for the branch' do
expect(pipeline).to be_created_successfully
expect(pipeline.branch?).to be true
expect(pipeline.tag?).to be false
end
end
context 'when pipeline is running for a tag with the name of both a branch and a tag' do
let(:gitlab_ci_yaml) { YAML.dump(test: { script: 'test' }) }
# v1.1.0 is on the test repo as branch and tag
let(:ref_name) { 'refs/tags/v1.1.0' }
let(:pipeline) { execute_service.payload }
it 'creates the pipeline for the tag' do
expect(pipeline).to be_created_successfully
expect(pipeline.branch?).to be false
expect(pipeline.tag?).to be true
end
end
context 'when pipeline is running for an ambiguous ref' do
let(:gitlab_ci_yaml) { YAML.dump(test: { script: 'test' }) }
# v1.1.0 is on the test repo as branch and tag
let(:ref_name) { 'v1.1.0' }
let(:pipeline) { execute_service.payload }
it 'does not create the pipeline' do
expect(pipeline).not_to be_created_successfully
expect(pipeline.errors[:base]).to eq(['Ref is ambiguous'])
end
end
context 'when pipeline variables are specified' do
subject(:pipeline) { execute_service(variables_attributes: variables_attributes).payload }
context 'with valid pipeline variables' do
let(:variables_attributes) do
[{ key: 'first', secret_value: 'world' },
{ key: 'second', secret_value: 'second_world' }]
end
it 'creates a pipeline with specified variables' do
expect(pipeline.variables.map { |var| var.slice(:key, :secret_value) })
.to eq variables_attributes.map(&:with_indifferent_access)
end
end
context 'with duplicate pipeline variables' do
let(:variables_attributes) do
[{ key: 'hello', secret_value: 'world' },
{ key: 'hello', secret_value: 'second_world' }]
end
it 'fails to create the pipeline' do
expect(pipeline).to be_failed
expect(pipeline.variables).to be_empty
expect(pipeline.errors[:base]).to eq(['Duplicate variable name: hello'])
end
end
context 'with more than one duplicate pipeline variable' do
let(:variables_attributes) do
[{ key: 'hello', secret_value: 'world' },
{ key: 'hello', secret_value: 'second_world' },
{ key: 'single', secret_value: 'variable' },
{ key: 'other', secret_value: 'value' },
{ key: 'other', secret_value: 'other value' }]
end
it 'fails to create the pipeline' do
expect(pipeline).to be_failed
expect(pipeline.variables).to be_empty
expect(pipeline.errors[:base]).to eq(['Duplicate variable names: hello, other'])
end
end
end
describe 'Pipeline for external pull requests' do
let(:response) do
execute_service(
source: source,
external_pull_request: pull_request,
ref: ref_name,
source_sha: source_sha,
target_sha: target_sha
)
end
let(:pipeline) { response.payload }
before do
stub_ci_pipeline_yaml_file(YAML.dump(config))
end
let(:ref_name) { 'refs/heads/feature' }
let(:source_sha) { project.commit(ref_name).id }
let(:target_sha) { nil }
context 'when source is external pull request' do
let(:source) { :external_pull_request_event }
context 'when config has external_pull_requests keywords' do
let(:config) do
{
build: {
stage: 'build',
script: 'echo'
},
test: {
stage: 'test',
script: 'echo',
only: ['external_pull_requests']
},
pages: {
stage: 'deploy',
script: 'echo',
except: ['external_pull_requests']
}
}
end
context 'when external pull request is specified' do
let(:pull_request) { create(:external_pull_request, project: project, source_branch: 'feature', target_branch: 'master') }
let(:ref_name) { pull_request.source_ref }
it 'creates an external pull request pipeline' do
expect(pipeline).to be_persisted
expect(pipeline).to be_external_pull_request_event
expect(pipeline.external_pull_request).to eq(pull_request)
expect(pipeline.source_sha).to eq(source_sha)
expect(pipeline.builds.order(:stage_id)
.map(&:name))
.to eq(%w[build test])
end
context 'when ref is tag' do
let(:ref_name) { 'refs/tags/v1.1.0' }
it 'does not create an extrnal pull request pipeline', :aggregate_failures do
expect(response).to be_error
expect(response.message).to eq('Tag is not included in the list and Failed to build the pipeline!')
expect(pipeline).not_to be_persisted
expect(pipeline.errors[:tag]).to eq(['is not included in the list'])
end
end
context 'when pull request is created from fork' do
it 'does not create an external pull request pipeline'
end
context "when there are no matched jobs" do
let(:config) do
{
test: {
stage: 'test',
script: 'echo',
except: ['external_pull_requests']
}
}
end
it 'does not create a detached merge request pipeline', :aggregate_failures do
expect(response).to be_error
expect(response.message).to eq('Pipeline will not run for the selected trigger. ' \
'The rules configuration prevented any jobs from being added to the pipeline.')
expect(pipeline).not_to be_persisted
expect(pipeline.errors[:base]).to eq(['Pipeline will not run for the selected trigger. ' \
'The rules configuration prevented any jobs from being added to the pipeline.'])
end
end
end
context 'when external pull request is not specified' do
let(:pull_request) { nil }
it 'does not create an external pull request pipeline', :aggregate_failures do
expect(response).to be_error
expect(response.message).to eq("External pull request can't be blank and Failed to build the pipeline!")
expect(pipeline).not_to be_persisted
expect(pipeline.errors[:external_pull_request]).to eq(["can't be blank"])
end
end
end
context "when config does not have external_pull_requests keywords" do
let(:config) do
{
build: {
stage: 'build',
script: 'echo'
},
test: {
stage: 'test',
script: 'echo'
},
pages: {
stage: 'deploy',
script: 'echo'
}
}
end
context 'when external pull request is specified' do
let(:pull_request) do
create(:external_pull_request,
project: project,
source_branch: Gitlab::Git.ref_name(ref_name),
target_branch: 'master')
end
it 'creates an external pull request pipeline' do
expect(pipeline).to be_persisted
expect(pipeline).to be_external_pull_request_event
expect(pipeline.external_pull_request).to eq(pull_request)
expect(pipeline.source_sha).to eq(source_sha)
expect(pipeline.builds.order(:stage_id)
.map(&:name))
.to eq(%w[build test pages])
end
end
context 'when external pull request is not specified' do
let(:pull_request) { nil }
it 'does not create an external pull request pipeline', :aggregate_failures do
expect(response).to be_error
expect(response.message).to eq("External pull request can't be blank and Failed to build the pipeline!")
expect(pipeline).not_to be_persisted
expect(pipeline.errors[:base]).to eq(['Failed to build the pipeline!'])
end
end
end
end
end
describe 'Pipelines for merge requests' do
let(:response) do
execute_service(
source: source,
merge_request: merge_request,
ref: ref_name,
source_sha: source_sha,
target_sha: target_sha
)
end
let(:pipeline) { response.payload }
before do
stub_ci_pipeline_yaml_file(YAML.dump(config))
end
let(:ref_name) { 'refs/heads/feature' }
let(:source_sha) { project.commit(ref_name).id }
let(:target_sha) { nil }
context 'when source is merge request' do
let(:source) { :merge_request_event }
context "when config has merge_requests keywords" do
let(:config) do
{
build: {
stage: 'build',
script: 'echo'
},
test: {
stage: 'test',
script: 'echo',
only: ['merge_requests']
},
pages: {
stage: 'deploy',
script: 'echo',
except: ['merge_requests']
}
}
end
context 'when merge request is specified' do
let(:merge_request) do
create(:merge_request,
source_project: project,
source_branch: 'feature',
target_project: project,
target_branch: 'master')
end
let(:ref_name) { merge_request.ref_path }
it 'creates a detached merge request pipeline' do
expect(pipeline).to be_persisted
expect(pipeline).to be_merge_request_event
expect(pipeline.merge_request).to eq(merge_request)
expect(pipeline.builds.order(:stage_id).pluck(:name)).to eq(%w[test])
end
it 'persists the specified source sha' do
expect(pipeline.source_sha).to eq(source_sha)
end
it 'does not persist target sha for detached merge request pipeline' do
expect(pipeline.target_sha).to be_nil
end
it 'schedules update for the head pipeline of the merge request' do
allow(MergeRequests::UpdateHeadPipelineWorker).to receive(:perform_async)
pipeline
expect(MergeRequests::UpdateHeadPipelineWorker).to have_received(:perform_async).with('Ci::PipelineCreatedEvent', { 'pipeline_id' => pipeline.id })
end
it 'schedules a namespace onboarding create action worker' do
expect(Onboarding::PipelineCreatedWorker)
.to receive(:perform_async).with(project.namespace_id)
pipeline
end
context 'when target sha is specified' do
let(:target_sha) { merge_request.target_branch_sha }
it 'persists the target sha' do
expect(pipeline.target_sha).to eq(target_sha)
end
end
context 'when ref is tag' do
let(:ref_name) { 'refs/tags/v1.1.0' }
it 'does not create a merge request pipeline', :aggregate_failures do
expect(response).to be_error
expect(response.message).to eq('Tag is not included in the list and Failed to build the pipeline!')
expect(pipeline).not_to be_persisted
expect(pipeline.errors[:tag]).to eq(['is not included in the list'])
end
end
context 'when merge request is created from a forked project' do
let(:merge_request) do
create(:merge_request,
source_project: project,
source_branch: 'feature',
target_project: target_project,
target_branch: 'master')
end
let(:ref_name) { 'refs/heads/feature' }
let!(:project) { fork_project(target_project, nil, repository: true) }
let!(:target_project) { create(:project, :repository) }
let!(:user) { create(:user) }
before do
project.add_developer(user)
end
it 'creates a legacy detached merge request pipeline in the forked project', :sidekiq_might_not_need_inline do
expect(pipeline).to be_persisted
expect(project.ci_pipelines).to eq([pipeline])
expect(target_project.ci_pipelines).to be_empty
end
end
context "when there are no matched jobs" do
let(:config) do
{
test: {
stage: 'test',
script: 'echo',
except: ['merge_requests']
}
}
end
it 'does not create a detached merge request pipeline', :aggregate_failures do
expect(response).to be_error
expect(response.message).to eq('Pipeline will not run for the selected trigger. ' \
'The rules configuration prevented any jobs from being added to the pipeline.')
expect(pipeline).not_to be_persisted
end
end
end
end
context "when config does not have merge_requests keywords" do
let(:config) do
{
build: {
stage: 'build',
script: 'echo'
},
test: {
stage: 'test',
script: 'echo'
},
pages: {
stage: 'deploy',
script: 'echo'
}
}
end
context 'when merge request is specified' do
let(:merge_request) do
create(:merge_request,
source_project: project,
source_branch: Gitlab::Git.ref_name(ref_name),
target_project: project,
target_branch: 'master')
end
it 'does not create a detached merge request pipeline', :aggregate_failures do
expect(response).to be_error
expect(response.message).to eq('Pipeline will not run for the selected trigger. ' \
'The rules configuration prevented any jobs from being added to the pipeline.')
expect(pipeline).not_to be_persisted
end
end
end
context "when config uses regular expression for only keyword" do
let(:config) do
{
build: {
stage: 'build',
script: 'echo',
only: ["/^#{ref_name}$/"]
}
}
end
context 'when merge request is specified' do
let(:merge_request) do
create(:merge_request,
source_project: project,
source_branch: Gitlab::Git.ref_name(ref_name),
target_project: project,
target_branch: 'master')
end
it 'does not create a detached merge request pipeline', :aggregate_failures do
expect(response).to be_error
expect(response.message).to eq('Pipeline will not run for the selected trigger. ' \
'The rules configuration prevented any jobs from being added to the pipeline.')
expect(pipeline).not_to be_persisted
end
end
end
context "when config uses variables for only keyword" do
let(:config) do
{
build: {
stage: 'build',
script: 'echo',
only: {
variables: %w[$CI]
}
}
}
end
context 'when merge request is specified' do
let(:merge_request) do
create(:merge_request,
source_project: project,
source_branch: Gitlab::Git.ref_name(ref_name),
target_project: project,
target_branch: 'master')
end
it 'does not create a detached merge request pipeline', :aggregate_failures do
expect(response).to be_error
expect(response.message).to eq('Pipeline will not run for the selected trigger. ' \
'The rules configuration prevented any jobs from being added to the pipeline.')
expect(pipeline).not_to be_persisted
end
end
end
context "when config has 'except: [tags]'" do
let(:config) do
{
build: {
stage: 'build',
script: 'echo',
except: ['tags']
}
}
end
context 'when merge request is specified' do
let(:merge_request) do
create(:merge_request,
source_project: project,
source_branch: Gitlab::Git.ref_name(ref_name),
target_project: project,
target_branch: 'master')
end
it 'does not create a detached merge request pipeline', :aggregate_failures do
expect(response).to be_error
expect(response.message).to eq('Pipeline will not run for the selected trigger. ' \
'The rules configuration prevented any jobs from being added to the pipeline.')
expect(pipeline).not_to be_persisted
end
end
end
end
context 'when source is web' do
let(:source) { :web }
let(:merge_request) { nil }
context "when config has merge_requests keywords" do
let(:config) do
{
build: {
stage: 'build',
script: 'echo'
},
test: {
stage: 'test',
script: 'echo',
only: ['merge_requests']
},
pages: {
stage: 'deploy',
script: 'echo',
except: ['merge_requests']
}
}
end
it 'creates a branch pipeline' do
expect(pipeline).to be_persisted
expect(pipeline).to be_web
expect(pipeline.merge_request).to be_nil
expect(pipeline.builds.order(:stage_id).pluck(:name)).to eq(%w[build pages])
end
end
end
end
context 'when needs is used' do
let(:response) { execute_service }
let(:pipeline) { response.payload }
let(:config) do
{
build_a: {
stage: "build",
script: "ls",
only: %w[master]
},
test_a: {
stage: "test",
script: "ls",
only: %w[master feature],
needs: %w[build_a]
},
deploy: {
stage: "deploy",
script: "ls",
only: %w[tags]
}
}
end
before do
stub_ci_pipeline_yaml_file(YAML.dump(config))
end
context 'when pipeline on master is created' do
let(:ref_name) { 'refs/heads/master' }
it 'creates a pipeline with build_a and test_a' do
expect(pipeline).to be_persisted
expect(pipeline.builds.pluck(:name)).to contain_exactly("build_a", "test_a")
end
it 'bulk inserts all needs' do
expect(Ci::BuildNeed).to receive(:bulk_insert!).and_call_original
expect(pipeline).to be_persisted
end
end
context 'when pipeline on feature is created' do
let(:ref_name) { 'refs/heads/feature' }
shared_examples 'has errors' do
it 'contains the expected errors', :aggregate_failures do
expect(pipeline.builds).to be_empty
error_message = "'test_a' job needs 'build_a' job, but 'build_a' is not in any previous stage"
expect(pipeline.yaml_errors).to eq(error_message)
expect(pipeline.error_messages.map(&:content)).to contain_exactly(error_message)
expect(pipeline.errors[:base]).to contain_exactly(error_message)
end
end
context 'when save_on_errors is enabled' do
let(:response) { execute_service(save_on_errors: true) }
let(:pipeline) { response.payload }
it 'does create a pipeline as test_a depends on build_a', :aggregate_failures do
expect(response).to be_error
expect(response.message).to eq("'test_a' job needs 'build_a' job, but 'build_a' is not in any previous stage")
expect(pipeline).to be_persisted
end
it_behaves_like 'has errors'
end
context 'when save_on_errors is disabled' do
let(:response) { execute_service(save_on_errors: false) }
let(:pipeline) { response.payload }
it 'does not create a pipeline as test_a depends on build_a', :aggregate_failures do
expect(response).to be_error
expect(pipeline).not_to be_persisted
end
it_behaves_like 'has errors'
end
end
context 'when pipeline on v1.0.0 is created' do
let(:ref_name) { 'refs/tags/v1.0.0' }
it 'does create a pipeline only with deploy' do
expect(pipeline).to be_persisted
expect(pipeline.builds.pluck(:name)).to contain_exactly("deploy")
end
end
end
describe 'pipeline components' do
let(:components_project) do
create(:project, :repository, creator: user, namespace: user.namespace)
end
let(:component_path) do
"#{Gitlab.config.gitlab.host}/#{components_project.full_path}/[email protected]"
end
let(:template) do
<<~YAML
spec:
inputs:
stage:
suffix:
default: my-job
---
test-$[[ inputs.suffix ]]:
stage: $[[ inputs.stage ]]
script: run tests
YAML
end
let(:sha) do
components_project.repository.create_file(
user,
'my-component/template.yml',
template,
message: 'Add my first CI component',
branch_name: 'master'
)
end
let(:config) do
<<~YAML
include:
- component: #{component_path}
inputs:
stage: my-stage
stages:
- my-stage
test-1:
stage: my-stage
script: run test-1
YAML
end
before do
stub_ci_pipeline_yaml_file(config)
end
context 'when there is no version with specified tag' do
before do
components_project.repository.add_tag(user, 'v0.01', sha)
end
it 'does not create a pipeline' do
response = execute_service(save_on_errors: true)
pipeline = response.payload
expect(pipeline).to be_persisted
expect(pipeline.yaml_errors)
.to include "[email protected]' - content not found"
end
end
context 'when there is a proper revision available' do
before do
components_project.repository.add_tag(user, 'v0.1', sha)
end
context 'when component is valid' do
it 'creates a pipeline using a pipeline component' do
response = execute_service(save_on_errors: true)
pipeline = response.payload
expect(pipeline).to be_persisted
expect(pipeline.yaml_errors).to be_blank
expect(pipeline.statuses.count).to eq 2
expect(pipeline.statuses.map(&:name)).to match_array %w[test-1 test-my-job]
end
end
context 'when interpolation is invalid' do
let(:template) do
<<~YAML
spec:
inputs:
stage:
---
test:
stage: $[[ inputs.stage ]]
script: rspec --suite $[[ inputs.suite ]]
YAML
end
it 'does not create a pipeline' do
response = execute_service(save_on_errors: true)
pipeline = response.payload
expect(pipeline).to be_persisted
expect(pipeline.yaml_errors)
.to include 'interpolation interrupted by errors, unknown interpolation key: `suite`'
end
end
context 'when there is a syntax error in the template' do
let(:template) do
<<~YAML
spec:
inputs:
stage:
---
:test
stage: $[[ inputs.stage ]]
YAML
end
it 'does not create a pipeline' do
response = execute_service(save_on_errors: true)
pipeline = response.payload
expect(pipeline).to be_persisted
expect(pipeline.yaml_errors)
.to include 'mapping values are not allowed'
end
end
end
end
# TODO: Remove this test section when include:with is removed as part of https://gitlab.com/gitlab-org/gitlab/-/issues/408369
describe 'pipeline components using include:with instead of include:inputs' do
let(:components_project) do
create(:project, :repository, creator: user, namespace: user.namespace)
end
let(:component_path) do
"#{Gitlab.config.gitlab.host}/#{components_project.full_path}/[email protected]"
end
let(:template) do
<<~YAML
spec:
inputs:
stage:
suffix:
default: my-job
---
test-$[[ inputs.suffix ]]:
stage: $[[ inputs.stage ]]
script: run tests
YAML
end
let(:sha) do
components_project.repository.create_file(
user,
'my-component/template.yml',
template,
message: 'Add my first CI component',
branch_name: 'master'
)
end
let(:config) do
<<~YAML
include:
- component: #{component_path}
with:
stage: my-stage
stages:
- my-stage
test-1:
stage: my-stage
script: run test-1
YAML
end
before do
stub_ci_pipeline_yaml_file(config)
end
context 'when there is no version with specified tag' do
before do
components_project.repository.add_tag(user, 'v0.01', sha)
end
it 'does not create a pipeline' do
response = execute_service(save_on_errors: true)
pipeline = response.payload
expect(pipeline).to be_persisted
expect(pipeline.yaml_errors)
.to include "[email protected]' - content not found"
end
end
context 'when there is a proper revision available' do
before do
components_project.repository.add_tag(user, 'v0.1', sha)
end
context 'when component is valid' do
it 'creates a pipeline using a pipeline component' do
response = execute_service(save_on_errors: true)
pipeline = response.payload
expect(pipeline).to be_persisted
expect(pipeline.yaml_errors).to be_blank
expect(pipeline.statuses.count).to eq 2
expect(pipeline.statuses.map(&:name)).to match_array %w[test-1 test-my-job]
end
context 'when inputs have a description' do
let(:template) do
<<~YAML
spec:
inputs:
stage:
suffix:
default: my-job
description: description
---
test-$[[ inputs.suffix ]]:
stage: $[[ inputs.stage ]]
script: run tests
YAML
end
it 'creates a pipeline' do
response = execute_service(save_on_errors: true)
pipeline = response.payload
expect(pipeline).to be_persisted
expect(pipeline.yaml_errors).to be_blank
end
end
end
context 'when interpolation is invalid' do
let(:template) do
<<~YAML
spec:
inputs:
stage:
---
test:
stage: $[[ inputs.stage ]]
script: rspec --suite $[[ inputs.suite ]]
YAML
end
it 'does not create a pipeline' do
response = execute_service(save_on_errors: true)
pipeline = response.payload
expect(pipeline).to be_persisted
expect(pipeline.yaml_errors)
.to include 'interpolation interrupted by errors, unknown interpolation key: `suite`'
end
end
context 'when there is a syntax error in the template' do
let(:template) do
<<~YAML
spec:
inputs:
stage:
---
:test
stage: $[[ inputs.stage ]]
YAML
end
it 'does not create a pipeline' do
response = execute_service(save_on_errors: true)
pipeline = response.payload
expect(pipeline).to be_persisted
expect(pipeline.yaml_errors)
.to include 'mapping values are not allowed'
end
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class DailyBuildGroupReportResultService
def execute(pipeline)
if DailyBuildGroupReportResult.upsert_reports(coverage_reports(pipeline))
Projects::CiFeatureUsage.insert_usage(
project_id: pipeline.project_id,
feature: :code_coverage,
default_branch: pipeline.default_branch?
)
end
end
private
def coverage_reports(pipeline)
base_attrs = {
project_id: pipeline.project_id,
ref_path: pipeline.source_ref_path,
date: pipeline.created_at.to_date,
last_pipeline_id: pipeline.id,
default_branch: pipeline.default_branch?,
group_id: pipeline.project&.group&.id
}
aggregate(pipeline.builds.with_coverage).map do |group_name, group|
base_attrs.merge(
group_name: group_name,
data: {
'coverage' => average_coverage(group)
}
)
end
end
def aggregate(builds)
builds.group_by(&:group_name)
end
def average_coverage(group)
total_coverage = group.reduce(0.0) { |sum, build| sum + build.coverage }
(total_coverage / group.size).round(2)
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::DailyBuildGroupReportResultService, '#execute', feature_category: :continuous_integration do
let_it_be(:group) { create(:group, :private) }
let_it_be(:pipeline) { create(:ci_pipeline, project: create(:project, group: group), created_at: '2020-02-06 00:01:10') }
let_it_be(:rspec_job) { create(:ci_build, pipeline: pipeline, name: 'rspec 3/3', coverage: 80) }
let_it_be(:karma_job) { create(:ci_build, pipeline: pipeline, name: 'karma 2/2', coverage: 90) }
let_it_be(:extra_job) { create(:ci_build, pipeline: pipeline, name: 'extra', coverage: nil) }
let(:coverages) { Ci::DailyBuildGroupReportResult.all }
it 'creates daily code coverage record for each job in the pipeline that has coverage value' do
described_class.new.execute(pipeline)
Ci::DailyBuildGroupReportResult.find_by(group_name: 'rspec').tap do |coverage|
expect(coverage).to have_attributes(
project_id: pipeline.project.id,
last_pipeline_id: pipeline.id,
ref_path: pipeline.source_ref_path,
group_name: rspec_job.group_name,
data: { 'coverage' => rspec_job.coverage },
date: pipeline.created_at.to_date,
group_id: group.id
)
end
Ci::DailyBuildGroupReportResult.find_by(group_name: 'karma').tap do |coverage|
expect(coverage).to have_attributes(
project_id: pipeline.project.id,
last_pipeline_id: pipeline.id,
ref_path: pipeline.source_ref_path,
group_name: karma_job.group_name,
data: { 'coverage' => karma_job.coverage },
date: pipeline.created_at.to_date,
group_id: group.id
)
end
expect(Ci::DailyBuildGroupReportResult.find_by(group_name: 'extra')).to be_nil
end
it 'creates a project_ci_feature_usage record for the pipeline project' do
described_class.new.execute(pipeline)
expect(Projects::CiFeatureUsage.count).to eq(1)
expect(Projects::CiFeatureUsage.first).to have_attributes(
project_id: pipeline.project.id,
feature: 'code_coverage',
default_branch: false
)
end
context 'when there are multiple builds with the same group name that report coverage' do
let!(:test_job_1) { create(:ci_build, pipeline: pipeline, name: 'test 1/2', coverage: 70) }
let!(:test_job_2) { create(:ci_build, pipeline: pipeline, name: 'test 2/2', coverage: 80) }
it 'creates daily code coverage record with the average as the value' do
described_class.new.execute(pipeline)
Ci::DailyBuildGroupReportResult.find_by(group_name: 'test').tap do |coverage|
expect(coverage).to have_attributes(
project_id: pipeline.project.id,
last_pipeline_id: pipeline.id,
ref_path: pipeline.source_ref_path,
group_name: test_job_2.group_name,
data: { 'coverage' => 75.0 },
date: pipeline.created_at.to_date
)
end
end
end
context 'when there is an existing daily code coverage for the matching date, project, ref_path, and group name' do
let!(:new_pipeline) do
create(
:ci_pipeline,
project: pipeline.project,
ref: pipeline.ref,
created_at: '2020-02-06 00:02:20'
)
end
let!(:new_rspec_job) { create(:ci_build, pipeline: new_pipeline, name: 'rspec 4/4', coverage: 84) }
let!(:new_karma_job) { create(:ci_build, pipeline: new_pipeline, name: 'karma 3/3', coverage: 92) }
before do
# Create the existing daily code coverage records
described_class.new.execute(pipeline)
end
it "updates the existing record's coverage value and last_pipeline_id" do
rspec_coverage = Ci::DailyBuildGroupReportResult.find_by(group_name: 'rspec')
karma_coverage = Ci::DailyBuildGroupReportResult.find_by(group_name: 'karma')
# Bump up the coverage values
described_class.new.execute(new_pipeline)
rspec_coverage.reload
karma_coverage.reload
expect(rspec_coverage).to have_attributes(
last_pipeline_id: new_pipeline.id,
data: { 'coverage' => new_rspec_job.coverage }
)
expect(karma_coverage).to have_attributes(
last_pipeline_id: new_pipeline.id,
data: { 'coverage' => new_karma_job.coverage }
)
end
it 'does not create a new project_ci_feature_usage record for the pipeline project' do
expect { described_class.new.execute(pipeline) }.not_to change { Projects::CiFeatureUsage.count }
expect(Projects::CiFeatureUsage.first).to have_attributes(
project_id: pipeline.project.id,
feature: 'code_coverage',
default_branch: false
)
end
end
context 'when the ID of the pipeline is older than the last_pipeline_id' do
let!(:new_pipeline) do
create(
:ci_pipeline,
project: pipeline.project,
ref: pipeline.ref,
created_at: '2020-02-06 00:02:20'
)
end
let!(:new_rspec_job) { create(:ci_build, pipeline: new_pipeline, name: 'rspec 4/4', coverage: 84) }
let!(:new_karma_job) { create(:ci_build, pipeline: new_pipeline, name: 'karma 3/3', coverage: 92) }
before do
# Create the existing daily code coverage records
# but in this case, for the newer pipeline first.
described_class.new.execute(new_pipeline)
end
it 'updates the existing daily code coverage records' do
rspec_coverage = Ci::DailyBuildGroupReportResult.find_by(group_name: 'rspec')
karma_coverage = Ci::DailyBuildGroupReportResult.find_by(group_name: 'karma')
# Run another one but for the older pipeline.
# This simulates the scenario wherein the success worker
# of an older pipeline, for some network hiccup, was delayed
# and only got executed right after the newer pipeline's success worker.
# Ideally, we don't want to bump the coverage value with an older one
# but given this can be a rare edge case and can be remedied by re-running
# the pipeline we'll just let it be for now. In return, we are able to use
# Rails 6 shiny new method, upsert_all, and simplify the code a lot.
described_class.new.execute(pipeline)
rspec_coverage.reload
karma_coverage.reload
expect(rspec_coverage).to have_attributes(
last_pipeline_id: pipeline.id,
data: { 'coverage' => rspec_job.coverage }
)
expect(karma_coverage).to have_attributes(
last_pipeline_id: pipeline.id,
data: { 'coverage' => karma_job.coverage }
)
end
end
context 'when pipeline has no builds with coverage' do
let!(:new_pipeline) do
create(
:ci_pipeline,
created_at: '2020-02-06 00:02:20'
)
end
let!(:some_job) { create(:ci_build, pipeline: new_pipeline, name: 'foo') }
it 'does nothing' do
expect { described_class.new.execute(new_pipeline) }.not_to raise_error
expect(Ci::DailyBuildGroupReportResult.count).to eq(0)
expect(Projects::CiFeatureUsage.count).to eq(0)
end
end
context 'when pipeline ref_path is the project default branch' do
let(:default_branch) { 'master' }
before do
allow(pipeline.project).to receive(:default_branch).and_return(default_branch)
end
it 'sets default branch to true' do
described_class.new.execute(pipeline)
coverages.each do |coverage|
expect(coverage.default_branch).to be_truthy
end
end
it 'creates a project_ci_feature_usage record for the pipeline project for default branch' do
described_class.new.execute(pipeline)
expect(Projects::CiFeatureUsage.count).to eq(1)
expect(Projects::CiFeatureUsage.first).to have_attributes(
project_id: pipeline.project.id,
feature: 'code_coverage',
default_branch: true
)
end
end
context 'when pipeline ref_path is not the project default branch' do
it 'sets default branch to false' do
described_class.new.execute(pipeline)
coverages.each do |coverage|
expect(coverage.default_branch).to be_falsey
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class EnqueueJobService
attr_accessor :job, :current_user, :variables
def initialize(job, current_user:, variables: nil)
@job = job
@current_user = current_user
@variables = variables
end
def execute(&transition)
transition ||= ->(job) { job.enqueue! }
Gitlab::OptimisticLocking.retry_lock(job, name: 'ci_enqueue_job') do |job|
job.user = current_user
job.job_variables_attributes = variables if variables
transition.call(job)
end
ResetSkippedJobsService.new(job.project, current_user).execute(job)
job
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::EnqueueJobService, '#execute', feature_category: :continuous_integration do
let_it_be(:project) { create(:project) }
let(:user) { create(:user, developer_projects: [project]) }
let(:pipeline) { create(:ci_pipeline, project: project) }
let(:build) { create(:ci_build, :manual, pipeline: pipeline) }
let(:service) do
described_class.new(build, current_user: user)
end
subject(:execute) { service.execute }
it 'assigns the user to the job' do
expect { execute }.to change { build.reload.user }.to(user)
end
it 'calls enqueue!' do
expect(build).to receive(:enqueue!)
execute
end
it 'calls Ci::ResetSkippedJobsService' do
expect_next_instance_of(Ci::ResetSkippedJobsService) do |service|
expect(service).to receive(:execute).with(build)
end
execute
end
it 'returns the job' do
expect(execute).to eq(build)
end
context 'when variables are supplied' do
let(:job_variables) do
[{ key: 'first', secret_value: 'first' },
{ key: 'second', secret_value: 'second' }]
end
let(:service) do
described_class.new(build, current_user: user, variables: job_variables)
end
it 'assigns the variables to the job' do
execute
expect(build.reload.job_variables.map(&:key)).to contain_exactly('first', 'second')
end
end
context 'when the job transition is invalid' do
let(:bridge) { create(:ci_bridge, :failed, pipeline: pipeline, project: project) }
let(:service) do
described_class.new(bridge, current_user: user)
end
it 'raises StateMachines::InvalidTransition' do
expect { execute }.to raise_error StateMachines::InvalidTransition
end
end
context 'when a transition block is supplied' do
let(:bridge) { create(:ci_bridge, :playable, pipeline: pipeline) }
let(:service) do
described_class.new(bridge, current_user: user)
end
subject(:execute) { service.execute(&:pending!) }
it 'calls the transition block instead of enqueue!' do
expect(bridge).to receive(:pending!)
expect(bridge).not_to receive(:enqueue!)
execute
end
end
context 'when the job is manually triggered another user' do
let(:job_variables) do
[{ key: 'third', secret_value: 'third' },
{ key: 'fourth', secret_value: 'fourth' }]
end
let(:service) do
described_class.new(build, current_user: user, variables: job_variables)
end
it 'assigns the user and variables to the job', :aggregate_failures do
called = false
service.execute do
unless called
called = true
raise ActiveRecord::StaleObjectError
end
build.enqueue!
end
build.reload
expect(called).to be true # ensure we actually entered the failure path
expect(build.user).to eq(user)
expect(build.job_variables.map(&:key)).to contain_exactly('third', 'fourth')
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class BuildUnscheduleService
def initialize(build, user)
@build = build
@user = user
end
def execute
return forbidden unless allowed?
return unprocessable_entity unless build.scheduled?
build.unschedule!
ServiceResponse.success(payload: build)
end
private
attr_reader :build, :user
def allowed?
user.can?(:update_build, build)
end
def forbidden
ServiceResponse.error(message: 'Forbidden', http_status: :forbidden)
end
def unprocessable_entity
ServiceResponse.error(message: 'Unprocessable entity', http_status: :unprocessable_entity)
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::BuildUnscheduleService, feature_category: :continuous_integration do
let_it_be(:user) { create(:user) }
let_it_be(:project) { create(:project) }
let_it_be(:pipeline) { create(:ci_pipeline, project: project) }
describe '#execute' do
subject(:execute) { described_class.new(build, user).execute }
context 'when user is authorized to unschedule the build' do
before do
project.add_maintainer(user)
end
context 'when build is scheduled' do
let!(:build) { create(:ci_build, :scheduled, pipeline: pipeline) }
it 'transits build to manual' do
response = execute
expect(response).to be_success
expect(response.payload.reload).to be_manual
end
end
context 'when build is not scheduled' do
let!(:build) { create(:ci_build, pipeline: pipeline) }
it 'responds with unprocessable entity', :aggregate_failures do
response = execute
expect(response).to be_error
expect(response.http_status).to eq(:unprocessable_entity)
end
end
end
context 'when user is not authorized to unschedule the build' do
let!(:build) { create(:ci_build, :scheduled, pipeline: pipeline) }
it 'responds with forbidden', :aggregate_failures do
response = execute
expect(response).to be_error
expect(response.http_status).to eq(:forbidden)
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class ProcessBuildService < BaseService
def execute(processable, current_status)
if valid_statuses_for_processable(processable).include?(current_status)
process(processable)
true
else
processable.skip
false
end
end
private
def process(processable)
return enqueue(processable) if processable.enqueue_immediately?
if processable.schedulable?
processable.schedule
elsif processable.action?
processable.actionize
else
enqueue(processable)
end
end
def enqueue(processable)
return processable.drop!(:failed_outdated_deployment_job) if processable.outdated_deployment?
processable.enqueue
end
def valid_statuses_for_processable(processable)
case processable.when
when 'on_success', 'manual', 'delayed'
processable.scheduling_type_dag? ? %w[success] : %w[success skipped]
when 'on_failure'
%w[failed]
when 'always'
%w[success failed skipped]
else
[]
end
end
end
end
Ci::ProcessBuildService.prepend_mod_with('Ci::ProcessBuildService')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::ProcessBuildService, '#execute', feature_category: :continuous_integration do
using RSpec::Parameterized::TableSyntax
let_it_be(:user) { create(:user) }
let_it_be(:project) { create(:project) }
let_it_be(:pipeline) { create(:ci_pipeline, ref: 'master', project: project) }
subject { described_class.new(project, user).execute(build, current_status) }
before_all do
project.add_maintainer(user)
end
shared_context 'with enqueue_immediately set' do
before do
build.set_enqueue_immediately!
end
end
context 'for single build' do
let!(:build) { create(:ci_build, *[trait].compact, :created, **conditions, pipeline: pipeline) }
where(:trait, :conditions, :current_status, :after_status, :retry_after_status) do
nil | { when: :on_success } | 'success' | 'pending' | 'pending'
nil | { when: :on_success } | 'skipped' | 'pending' | 'pending'
nil | { when: :on_success } | 'failed' | 'skipped' | 'skipped'
nil | { when: :on_failure } | 'success' | 'skipped' | 'skipped'
nil | { when: :on_failure } | 'skipped' | 'skipped' | 'skipped'
nil | { when: :on_failure } | 'failed' | 'pending' | 'pending'
nil | { when: :always } | 'success' | 'pending' | 'pending'
nil | { when: :always } | 'skipped' | 'pending' | 'pending'
nil | { when: :always } | 'failed' | 'pending' | 'pending'
:actionable | { when: :manual } | 'success' | 'manual' | 'pending'
:actionable | { when: :manual } | 'skipped' | 'manual' | 'pending'
:actionable | { when: :manual } | 'failed' | 'skipped' | 'skipped'
:schedulable | { when: :delayed } | 'success' | 'scheduled' | 'pending'
:schedulable | { when: :delayed } | 'skipped' | 'scheduled' | 'pending'
:schedulable | { when: :delayed } | 'failed' | 'skipped' | 'skipped'
end
with_them do
it 'updates the job status to after_status' do
expect { subject }.to change { build.status }.to(after_status)
end
context 'when build is set to enqueue immediately' do
include_context 'with enqueue_immediately set'
it 'updates the job status to retry_after_status' do
expect { subject }.to change { build.status }.to(retry_after_status)
end
end
end
end
context 'when build is scheduled with DAG' do
let!(:build) do
create(
:ci_build,
*[trait].compact,
:dependent,
:created,
when: build_when,
pipeline: pipeline,
needed: other_build
)
end
let!(:other_build) { create(:ci_build, :created, when: :on_success, pipeline: pipeline) }
where(:trait, :build_when, :current_status, :after_status, :retry_after_status) do
nil | :on_success | 'success' | 'pending' | 'pending'
nil | :on_success | 'skipped' | 'skipped' | 'skipped'
nil | :manual | 'success' | 'manual' | 'pending'
nil | :manual | 'skipped' | 'skipped' | 'skipped'
nil | :delayed | 'success' | 'manual' | 'pending'
nil | :delayed | 'skipped' | 'skipped' | 'skipped'
:schedulable | :delayed | 'success' | 'scheduled' | 'pending'
:schedulable | :delayed | 'skipped' | 'skipped' | 'skipped'
end
with_them do
it 'updates the job status to after_status' do
expect { subject }.to change { build.status }.to(after_status)
end
context 'when build is set to enqueue immediately' do
include_context 'with enqueue_immediately set'
it 'updates the job status to retry_after_status' do
expect { subject }.to change { build.status }.to(retry_after_status)
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class DisableUserPipelineSchedulesService
def execute(user)
Ci::PipelineSchedule.active.owned_by(user).each_batch do |relation|
relation.update_all(active: false)
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::DisableUserPipelineSchedulesService, feature_category: :continuous_integration do
describe '#execute' do
let(:user) { create(:user) }
subject(:service) { described_class.new.execute(user) }
context 'when user has active pipeline schedules' do
let(:owned_pipeline_schedule) { create(:ci_pipeline_schedule, active: true, owner: user) }
it 'disables all active pipeline schedules', :aggregate_failures do
expect { service }.to change { owned_pipeline_schedule.reload.active? }
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class BuildReportResultService
include Gitlab::Utils::UsageData
EVENT_NAME = 'i_testing_test_case_parsed'
def execute(build)
return unless build.has_test_reports?
test_suite = generate_test_suite_report(build)
track_test_cases(build, test_suite)
build.report_results.create!(
project_id: build.project_id,
data: tests_params(test_suite)
)
end
private
def generate_test_suite_report(build)
test_report = build.collect_test_reports!(Gitlab::Ci::Reports::TestReport.new)
test_report.get_suite(build.test_suite_name)
end
def tests_params(test_suite)
{
tests: {
name: test_suite.name,
duration: test_suite.total_time,
failed: test_suite.failed_count,
errored: test_suite.error_count,
skipped: test_suite.skipped_count,
success: test_suite.success_count,
suite_error: test_suite.suite_error
}
}
end
def track_test_cases(build, test_suite)
track_usage_event(EVENT_NAME, test_case_hashes(build, test_suite))
end
def test_case_hashes(build, test_suite)
[].tap do |hashes|
test_suite.each_test_case do |test_case|
key = "#{build.project_id}-#{test_case.key}"
hashes << Digest::SHA256.hexdigest(key)
end
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::BuildReportResultService, feature_category: :continuous_integration do
describe '#execute', :clean_gitlab_redis_shared_state do
subject(:build_report_result) { described_class.new.execute(build) }
context 'when build is finished' do
let(:build) { create(:ci_build, :success, :test_reports) }
it 'creates a build report result entry', :aggregate_failures do
expect { build_report_result }.to change { Ci::BuildReportResult.count }.by(1)
expect(build_report_result.tests_name).to eq("test")
expect(build_report_result.tests_success).to eq(2)
expect(build_report_result.tests_failed).to eq(2)
expect(build_report_result.tests_errored).to eq(0)
expect(build_report_result.tests_skipped).to eq(0)
expect(build_report_result.tests_duration).to eq(0.010284)
end
it 'tracks unique test cases parsed' do
build_report_result
unique_test_cases_parsed = Gitlab::UsageDataCounters::HLLRedisCounter.unique_events(
event_names: described_class::EVENT_NAME,
start_date: 2.weeks.ago,
end_date: 2.weeks.from_now
)
expect(unique_test_cases_parsed).to eq(4)
end
context 'and build has test report parsing errors' do
let(:build) { create(:ci_build, :success, :broken_test_reports) }
it 'creates a build report result entry with suite error', :aggregate_failures do
expect { build_report_result }.to change { Ci::BuildReportResult.count }.by(1)
expect(build_report_result.tests_name).to eq("test")
expect(build_report_result.tests_success).to eq(0)
expect(build_report_result.tests_failed).to eq(0)
expect(build_report_result.tests_errored).to eq(0)
expect(build_report_result.tests_skipped).to eq(0)
expect(build_report_result.tests_duration).to eq(0)
expect(build_report_result.suite_error).to be_present
end
it 'does not track unique test cases parsed' do
build_report_result
unique_test_cases_parsed = Gitlab::UsageDataCounters::HLLRedisCounter.unique_events(
event_names: described_class::EVENT_NAME,
start_date: 2.weeks.ago,
end_date: 2.weeks.from_now
)
expect(unique_test_cases_parsed).to eq(0)
end
end
context 'when data has already been persisted' do
it 'raises an error and do not persist the same data twice' do
expect { 2.times { described_class.new.execute(build) } }.to raise_error(ActiveRecord::RecordNotUnique)
expect(Ci::BuildReportResult.count).to eq(1)
end
end
end
context 'when build is running and test report does not exist' do
let(:build) { create(:ci_build, :running) }
it 'does not persist data' do
subject
expect(Ci::BuildReportResult.count).to eq(0)
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class DestroySecureFileService < BaseService
def execute(secure_file)
raise Gitlab::Access::AccessDeniedError unless can?(current_user, :admin_secure_files, secure_file.project)
secure_file.destroy!
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe ::Ci::DestroySecureFileService, feature_category: :continuous_integration do
let_it_be(:maintainer_user) { create(:user) }
let_it_be(:developer_user) { create(:user) }
let_it_be(:project) { create(:project) }
let_it_be(:secure_file) { create(:ci_secure_file, project: project) }
let_it_be(:project_member) { create(:project_member, :maintainer, user: maintainer_user, project: project) }
let_it_be(:project_member2) { create(:project_member, :developer, user: developer_user, project: project) }
subject { described_class.new(project, user).execute(secure_file) }
context 'user is a maintainer' do
let(:user) { maintainer_user }
it 'destroys the secure file' do
subject
expect { secure_file.reload }.to raise_error(ActiveRecord::RecordNotFound)
end
end
context 'user is a developer' do
let(:user) { developer_user }
it 'raises an exception' do
expect { subject }.to raise_error(Gitlab::Access::AccessDeniedError)
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class PrepareBuildService
attr_reader :build
def initialize(build)
@build = build
end
def execute
prerequisites.each(&:complete!)
build.enqueue_preparing!
rescue StandardError => e
Gitlab::ErrorTracking.track_exception(e, build_id: build.id)
build.drop(:unmet_prerequisites)
end
private
def prerequisites
build.prerequisites
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::PrepareBuildService, feature_category: :continuous_integration do
describe '#execute' do
let(:build) { create(:ci_build, :preparing) }
subject { described_class.new(build).execute }
before do
allow(build).to receive(:prerequisites).and_return(prerequisites)
end
shared_examples 'build enqueueing' do
it 'enqueues the build' do
expect(build).to receive(:enqueue_preparing).once
subject
end
end
context 'build has unmet prerequisites' do
let(:prerequisite) { double(complete!: true) }
let(:prerequisites) { [prerequisite] }
it 'completes each prerequisite' do
expect(prerequisites).to all(receive(:complete!))
subject
end
include_examples 'build enqueueing'
context 'prerequisites fail to complete' do
before do
allow(build).to receive(:enqueue_preparing).and_return(false)
end
it 'drops the build' do
expect(build).to receive(:drop).with(:unmet_prerequisites).once
subject
end
end
context 'prerequisites raise an error' do
before do
allow(prerequisite).to receive(:complete!).and_raise Kubeclient::HttpError.new(401, 'unauthorized', nil)
end
it 'drops the build and notifies Sentry' do
expect(build).to receive(:drop).with(:unmet_prerequisites).once
expect(Gitlab::ErrorTracking).to receive(:track_exception)
.with(instance_of(Kubeclient::HttpError), hash_including(build_id: build.id))
subject
end
end
end
context 'build has no prerequisites' do
let(:prerequisites) { [] }
include_examples 'build enqueueing'
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class PipelineBridgeStatusService < ::BaseService
def execute(pipeline)
return unless pipeline.bridge_triggered?
begin
pipeline.source_bridge.inherit_status_from_downstream!(pipeline)
rescue StateMachines::InvalidTransition => e
Gitlab::ErrorTracking.track_exception(
Ci::Bridge::InvalidTransitionError.new(e.message),
bridge_id: pipeline.source_bridge.id,
downstream_pipeline_id: pipeline.id)
end
end
end
end
Ci::PipelineBridgeStatusService.prepend_mod_with('Ci::PipelineBridgeStatusService')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::PipelineBridgeStatusService, feature_category: :continuous_integration do
let(:user) { build(:user) }
let_it_be(:project) { create(:project) }
let(:pipeline) { build(:ci_pipeline, project: project) }
describe '#execute' do
subject { described_class.new(project, user).execute(pipeline) }
context 'when pipeline has upstream bridge' do
let(:bridge) { build(:ci_bridge) }
before do
pipeline.source_bridge = bridge
end
it 'calls inherit_status_from_downstream on upstream bridge' do
expect(bridge).to receive(:inherit_status_from_downstream!).with(pipeline)
subject
end
context 'when bridge job status raises state machine errors' do
before do
pipeline.drop!
bridge.drop!
end
it 'tracks the exception' do
expect(Gitlab::ErrorTracking)
.to receive(:track_exception)
.with(
instance_of(Ci::Bridge::InvalidTransitionError),
bridge_id: bridge.id,
downstream_pipeline_id: pipeline.id)
subject
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
# This class responsible for assigning
# proper pending build to runner on runner API request
class RegisterJobService
include ::Gitlab::Ci::Artifacts::Logger
attr_reader :runner, :runner_manager, :metrics
TEMPORARY_LOCK_TIMEOUT = 3.seconds
Result = Struct.new(:build, :build_json, :build_presented, :valid?)
##
# The queue depth limit number has been determined by observing 95
# percentile of effective queue depth on gitlab.com. This is only likely to
# affect 5% of the worst case scenarios.
MAX_QUEUE_DEPTH = 45
def initialize(runner, runner_manager)
@runner = runner
@runner_manager = runner_manager
@metrics = ::Gitlab::Ci::Queue::Metrics.new(runner)
end
def execute(params = {})
replica_caught_up =
::Ci::Runner.sticking.find_caught_up_replica(:runner, runner.id, use_primary_on_failure: false)
@metrics.increment_queue_operation(:queue_attempt)
result = @metrics.observe_queue_time(:process, @runner.runner_type) do
process_queue(params)
end
# Since we execute this query against replica it might lead to false-positive
# We might receive the positive response: "hi, we don't have any more builds for you".
# This might not be true. If our DB replica is not up-to date with when runner event was generated
# we might still have some CI builds to be picked. Instead we should say to runner:
# "Hi, we don't have any more builds now, but not everything is right anyway, so try again".
# Runner will retry, but again, against replica, and again will check if replication lag did catch-up.
if !replica_caught_up && !result.build
metrics.increment_queue_operation(:queue_replication_lag)
::Ci::RegisterJobService::Result.new(nil, nil, nil, false) # rubocop:disable Cop/AvoidReturnFromBlocks
else
result
end
end
private
def process_queue(params)
valid = true
depth = 0
each_build(params) do |build|
depth += 1
@metrics.increment_queue_operation(:queue_iteration)
if depth > max_queue_depth
@metrics.increment_queue_operation(:queue_depth_limit)
valid = false
break
end
# We read builds from replicas
# It is likely that some other concurrent connection is processing
# a given build at a given moment. To avoid an expensive compute
# we perform an exclusive lease on Redis to acquire a build temporarily
unless acquire_temporary_lock(build.id)
@metrics.increment_queue_operation(:build_temporary_locked)
# We failed to acquire lock
# - our queue is not complete as some resources are locked temporarily
# - we need to re-process it again to ensure that all builds are handled
valid = false
next
end
result = process_build(build, params)
next unless result
if result.valid?
@metrics.register_success(result.build_presented)
@metrics.observe_queue_depth(:found, depth)
return result # rubocop:disable Cop/AvoidReturnFromBlocks
else
# The usage of valid: is described in
# handling of ActiveRecord::StaleObjectError
valid = false
end
end
@metrics.increment_queue_operation(:queue_conflict) unless valid
@metrics.observe_queue_depth(:conflict, depth) unless valid
@metrics.observe_queue_depth(:not_found, depth) if valid
@metrics.register_failure
Result.new(nil, nil, nil, valid)
end
# rubocop: disable CodeReuse/ActiveRecord
def each_build(params, &blk)
queue = ::Ci::Queue::BuildQueueService.new(runner)
builds = if runner.instance_type?
queue.builds_for_shared_runner
elsif runner.group_type?
queue.builds_for_group_runner
else
queue.builds_for_project_runner
end
if runner.ref_protected?
builds = queue.builds_for_protected_runner(builds)
end
# pick builds that does not have other tags than runner's one
builds = queue.builds_matching_tag_ids(builds, runner.tags.ids)
# pick builds that have at least one tag
unless runner.run_untagged?
builds = queue.builds_with_any_tags(builds)
end
build_ids = retrieve_queue(-> { queue.execute(builds) })
@metrics.observe_queue_size(-> { build_ids.size }, @runner.runner_type)
build_ids.each { |build_id| yield Ci::Build.find(build_id) }
end
# rubocop: enable CodeReuse/ActiveRecord
def retrieve_queue(queue_query_proc)
##
# We want to reset a load balancing session to discard the side
# effects of writes that could have happened prior to this moment.
#
::Gitlab::Database::LoadBalancing::Session.clear_session
@metrics.observe_queue_time(:retrieve, @runner.runner_type) do
queue_query_proc.call
end
end
def process_build(build, params)
unless build.pending?
@metrics.increment_queue_operation(:build_not_pending)
##
# If this build can not be picked because we had stale data in
# `ci_pending_builds` table, we need to respond with 409 to retry
# this operation.
#
if ::Ci::UpdateBuildQueueService.new.remove!(build)
return Result.new(nil, nil, nil, false)
end
return
end
if runner.matches_build?(build)
@metrics.increment_queue_operation(:build_can_pick)
else
@metrics.increment_queue_operation(:build_not_pick)
return
end
# In case when 2 runners try to assign the same build, second runner will be declined
# with StateMachines::InvalidTransition or StaleObjectError when doing run! or save method.
if assign_runner!(build, params)
present_build!(build)
end
rescue ActiveRecord::StaleObjectError
# We are looping to find another build that is not conflicting
# It also indicates that this build can be picked and passed to runner.
# If we don't do it, basically a bunch of runners would be competing for a build
# and thus we will generate a lot of 409. This will increase
# the number of generated requests, also will reduce significantly
# how many builds can be picked by runner in a unit of time.
# In case we hit the concurrency-access lock,
# we still have to return 409 in the end,
# to make sure that this is properly handled by runner.
@metrics.increment_queue_operation(:build_conflict_lock)
Result.new(nil, nil, nil, false)
rescue StateMachines::InvalidTransition
@metrics.increment_queue_operation(:build_conflict_transition)
Result.new(nil, nil, nil, false)
rescue StandardError => ex
@metrics.increment_queue_operation(:build_conflict_exception)
# If an error (e.g. GRPC::DeadlineExceeded) occurred constructing
# the result, consider this as a failure to be retried.
scheduler_failure!(build)
track_exception_for_build(ex, build)
# skip, and move to next one
nil
end
def max_queue_depth
MAX_QUEUE_DEPTH
end
# Force variables evaluation to occur now
def present_build!(build)
# We need to use the presenter here because Gitaly calls in the presenter
# may fail, and we need to ensure the response has been generated.
presented_build = ::Ci::BuildRunnerPresenter.new(build) # rubocop:disable CodeReuse/Presenter
log_artifacts_context(build)
log_build_dependencies_size(presented_build)
build_json = Gitlab::Json.dump(::API::Entities::Ci::JobRequest::Response.new(presented_build))
Result.new(build, build_json, presented_build, true)
end
def log_build_dependencies_size(presented_build)
return unless ::Feature.enabled?(:ci_build_dependencies_artifacts_logger, type: :ops)
presented_build.all_dependencies.then do |dependencies|
size = dependencies.sum do |build|
build.available_artifacts? ? build.artifacts_file.size : 0
end
log_build_dependencies(size: size, count: dependencies.size) if size > 0
end
end
def assign_runner!(build, params)
build.runner_id = runner.id
build.runner_session_attributes = params[:session] if params[:session].present?
failure_reason, _ = pre_assign_runner_checks.find { |_, check| check.call(build, params) }
if failure_reason
@metrics.increment_queue_operation(:runner_pre_assign_checks_failed)
build.drop!(failure_reason)
else
@metrics.increment_queue_operation(:runner_pre_assign_checks_success)
build.run!
build.runner_manager = runner_manager if runner_manager
end
!failure_reason
end
def acquire_temporary_lock(build_id)
return true if Feature.disabled?(:ci_register_job_temporary_lock, runner, type: :ops)
key = "build/register/#{build_id}"
Gitlab::ExclusiveLease
.new(key, timeout: TEMPORARY_LOCK_TIMEOUT.to_i)
.try_obtain
end
def scheduler_failure!(build)
Gitlab::OptimisticLocking.retry_lock(build, 3, name: 'register_job_scheduler_failure') do |subject|
subject.drop!(:scheduler_failure)
end
rescue StandardError => ex
build.doom!
# This requires extra exception, otherwise we would loose information
# why we cannot perform `scheduler_failure`
track_exception_for_build(ex, build)
end
def track_exception_for_build(ex, build)
Gitlab::ErrorTracking.track_exception(ex,
build_id: build.id,
build_name: build.name,
build_stage: build.stage_name,
pipeline_id: build.pipeline_id,
project_id: build.project_id
)
end
def pre_assign_runner_checks
{
missing_dependency_failure: -> (build, _) { !build.has_valid_build_dependencies? },
runner_unsupported: -> (build, params) { !build.supported_runner?(params.dig(:info, :features)) },
archived_failure: -> (build, _) { build.archived? },
project_deleted: -> (build, _) { build.project.pending_delete? },
builds_disabled: -> (build, _) { !build.project.builds_enabled? },
user_blocked: -> (build, _) { build.user&.blocked? }
}
end
end
end
Ci::RegisterJobService.prepend_mod_with('Ci::RegisterJobService')
``` | # frozen_string_literal: true
require 'spec_helper'
module Ci
RSpec.describe RegisterJobService, feature_category: :continuous_integration do
let_it_be(:group) { create(:group) }
let_it_be_with_reload(:project) { create(:project, group: group, shared_runners_enabled: false, group_runners_enabled: false) }
let_it_be_with_reload(:pipeline) { create(:ci_pipeline, project: project) }
let_it_be(:shared_runner) { create(:ci_runner, :instance) }
let!(:project_runner) { create(:ci_runner, :project, projects: [project]) }
let!(:group_runner) { create(:ci_runner, :group, groups: [group]) }
let!(:pending_job) { create(:ci_build, :pending, :queued, pipeline: pipeline) }
describe '#execute' do
subject(:execute) { described_class.new(runner, runner_manager).execute }
let(:runner_manager) { nil }
context 'checks database loadbalancing stickiness' do
let(:runner) { shared_runner }
before do
project.update!(shared_runners_enabled: false)
end
it 'result is valid if replica did caught-up', :aggregate_failures do
expect(ApplicationRecord.sticking).to receive(:find_caught_up_replica)
.with(:runner, runner.id, use_primary_on_failure: false)
.and_return(true)
expect { execute }.not_to change { Ci::RunnerManagerBuild.count }.from(0)
expect(execute).to be_valid
expect(execute.build).to be_nil
expect(execute.build_json).to be_nil
end
it 'result is invalid if replica did not caught-up', :aggregate_failures do
expect(ApplicationRecord.sticking).to receive(:find_caught_up_replica)
.with(:runner, shared_runner.id, use_primary_on_failure: false)
.and_return(false)
expect(subject).not_to be_valid
expect(subject.build).to be_nil
expect(subject.build_json).to be_nil
end
end
shared_examples 'handles runner assignment' do
context 'runner follows tag list' do
subject(:build) { build_on(project_runner, runner_manager: project_runner_manager) }
let(:project_runner_manager) { nil }
context 'when job has tag' do
before do
pending_job.update!(tag_list: ["linux"])
pending_job.reload
pending_job.create_queuing_entry!
end
context 'and runner has matching tag' do
before do
project_runner.update!(tag_list: ["linux"])
end
context 'with no runner manager specified' do
it 'picks build' do
expect(build).to eq(pending_job)
expect(pending_job.runner_manager).to be_nil
end
end
context 'with runner manager specified' do
let(:project_runner_manager) { create(:ci_runner_machine, runner: project_runner) }
it 'picks build and assigns runner manager' do
expect(build).to eq(pending_job)
expect(pending_job.runner_manager).to eq(project_runner_manager)
end
end
end
it 'does not pick build with different tag' do
project_runner.update!(tag_list: ["win32"])
expect(build).to be_falsey
end
it 'does not pick build with tag' do
pending_job.create_queuing_entry!
expect(build).to be_falsey
end
end
context 'when job has no tag' do
it 'picks build' do
expect(build).to eq(pending_job)
end
context 'when runner has tag' do
before do
project_runner.update!(tag_list: ["win32"])
end
it 'picks build' do
expect(build).to eq(pending_job)
end
end
end
end
context 'deleted projects' do
before do
project.update!(pending_delete: true)
end
context 'for shared runners' do
before do
project.update!(shared_runners_enabled: true)
end
it 'does not pick a build' do
expect(build_on(shared_runner)).to be_nil
end
end
context 'for project runner' do
subject(:build) { build_on(project_runner, runner_manager: project_runner_manager) }
let(:project_runner_manager) { nil }
context 'with no runner manager specified' do
it 'does not pick a build' do
expect(build).to be_nil
expect(pending_job.reload).to be_failed
expect(pending_job.queuing_entry).to be_nil
expect(Ci::RunnerManagerBuild.all).to be_empty
end
end
context 'with runner manager specified' do
let(:project_runner_manager) { create(:ci_runner_machine, runner: project_runner) }
it 'does not pick a build' do
expect(build).to be_nil
expect(pending_job.reload).to be_failed
expect(pending_job.queuing_entry).to be_nil
expect(Ci::RunnerManagerBuild.all).to be_empty
end
end
end
end
context 'allow shared runners' do
before do
project.update!(shared_runners_enabled: true)
pipeline.reload
pending_job.reload
pending_job.create_queuing_entry!
end
context 'when build owner has been blocked' do
let(:user) { create(:user, :blocked) }
before do
pending_job.update!(user: user)
end
context 'with no runner manager specified' do
it 'does not pick the build and drops the build' do
expect(build_on(shared_runner)).to be_falsey
expect(pending_job.reload).to be_user_blocked
end
end
context 'with runner manager specified' do
let(:runner_manager) { create(:ci_runner_machine, runner: runner) }
it 'does not pick the build and does not create join record' do
expect(build_on(shared_runner, runner_manager: runner_manager)).to be_falsey
expect(Ci::RunnerManagerBuild.all).to be_empty
end
end
end
context 'for multiple builds' do
let!(:project2) { create :project, shared_runners_enabled: true }
let!(:pipeline2) { create :ci_pipeline, project: project2 }
let!(:project3) { create :project, shared_runners_enabled: true }
let!(:pipeline3) { create :ci_pipeline, project: project3 }
let!(:build1_project1) { pending_job }
let!(:build2_project1) { create(:ci_build, :pending, :queued, pipeline: pipeline) }
let!(:build3_project1) { create(:ci_build, :pending, :queued, pipeline: pipeline) }
let!(:build1_project2) { create(:ci_build, :pending, :queued, pipeline: pipeline2) }
let!(:build2_project2) { create(:ci_build, :pending, :queued, pipeline: pipeline2) }
let!(:build1_project3) { create(:ci_build, :pending, :queued, pipeline: pipeline3) }
it 'picks builds one-by-one' do
expect(Ci::Build).to receive(:find).with(pending_job.id).and_call_original
expect(build_on(shared_runner)).to eq(build1_project1)
end
context 'when using fair scheduling' do
context 'when all builds are pending' do
it 'prefers projects without builds first' do
# it gets for one build from each of the projects
expect(build_on(shared_runner)).to eq(build1_project1)
expect(build_on(shared_runner)).to eq(build1_project2)
expect(build_on(shared_runner)).to eq(build1_project3)
# then it gets a second build from each of the projects
expect(build_on(shared_runner)).to eq(build2_project1)
expect(build_on(shared_runner)).to eq(build2_project2)
# in the end the third build
expect(build_on(shared_runner)).to eq(build3_project1)
end
end
context 'when some builds transition to success' do
it 'equalises number of running builds' do
# after finishing the first build for project 1, get a second build from the same project
expect(build_on(shared_runner)).to eq(build1_project1)
build1_project1.reload.success
expect(build_on(shared_runner)).to eq(build2_project1)
expect(build_on(shared_runner)).to eq(build1_project2)
build1_project2.reload.success
expect(build_on(shared_runner)).to eq(build2_project2)
expect(build_on(shared_runner)).to eq(build1_project3)
expect(build_on(shared_runner)).to eq(build3_project1)
end
end
end
context 'when using DEFCON mode that disables fair scheduling' do
before do
stub_feature_flags(ci_queueing_disaster_recovery_disable_fair_scheduling: true)
end
context 'when all builds are pending' do
it 'returns builds in order of creation (FIFO)' do
# it gets for one build from each of the projects
expect(build_on(shared_runner)).to eq(build1_project1)
expect(build_on(shared_runner)).to eq(build2_project1)
expect(build_on(shared_runner)).to eq(build3_project1)
expect(build_on(shared_runner)).to eq(build1_project2)
expect(build_on(shared_runner)).to eq(build2_project2)
expect(build_on(shared_runner)).to eq(build1_project3)
end
end
context 'when some builds transition to success' do
it 'returns builds in order of creation (FIFO)' do
expect(build_on(shared_runner)).to eq(build1_project1)
build1_project1.reload.success
expect(build_on(shared_runner)).to eq(build2_project1)
expect(build_on(shared_runner)).to eq(build3_project1)
build2_project1.reload.success
expect(build_on(shared_runner)).to eq(build1_project2)
expect(build_on(shared_runner)).to eq(build2_project2)
expect(build_on(shared_runner)).to eq(build1_project3)
end
end
end
end
context 'shared runner' do
let(:response) { described_class.new(shared_runner, nil).execute }
let(:build) { response.build }
it { expect(build).to be_kind_of(Build) }
it { expect(build).to be_valid }
it { expect(build).to be_running }
it { expect(build.runner).to eq(shared_runner) }
it { expect(Gitlab::Json.parse(response.build_json)['id']).to eq(build.id) }
end
context 'project runner' do
let(:build) { build_on(project_runner) }
it { expect(build).to be_kind_of(Build) }
it { expect(build).to be_valid }
it { expect(build).to be_running }
it { expect(build.runner).to eq(project_runner) }
end
end
context 'disallow shared runners' do
before do
project.update!(shared_runners_enabled: false)
end
context 'shared runner' do
let(:build) { build_on(shared_runner) }
it { expect(build).to be_nil }
end
context 'project runner' do
let(:build) { build_on(project_runner) }
it { expect(build).to be_kind_of(Build) }
it { expect(build).to be_valid }
it { expect(build).to be_running }
it { expect(build.runner).to eq(project_runner) }
end
end
context 'disallow when builds are disabled' do
before do
project.update!(shared_runners_enabled: true, group_runners_enabled: true)
project.project_feature.update_attribute(:builds_access_level, ProjectFeature::DISABLED)
pending_job.reload.create_queuing_entry!
end
context 'and uses shared runner' do
let(:build) { build_on(shared_runner) }
it { expect(build).to be_nil }
end
context 'and uses group runner' do
let(:build) { build_on(group_runner) }
it { expect(build).to be_nil }
end
context 'and uses project runner' do
let(:build) { build_on(project_runner) }
it 'does not pick a build' do
expect(build).to be_nil
expect(pending_job.reload).to be_failed
expect(pending_job.queuing_entry).to be_nil
end
end
end
context 'allow group runners' do
before do
project.update!(group_runners_enabled: true)
end
context 'for multiple builds' do
let!(:project2) { create(:project, group_runners_enabled: true, group: group) }
let!(:pipeline2) { create(:ci_pipeline, project: project2) }
let!(:project3) { create(:project, group_runners_enabled: true, group: group) }
let!(:pipeline3) { create(:ci_pipeline, project: project3) }
let!(:build1_project1) { pending_job }
let!(:build2_project1) { create(:ci_build, :queued, pipeline: pipeline) }
let!(:build3_project1) { create(:ci_build, :queued, pipeline: pipeline) }
let!(:build1_project2) { create(:ci_build, :queued, pipeline: pipeline2) }
let!(:build2_project2) { create(:ci_build, :queued, pipeline: pipeline2) }
let!(:build1_project3) { create(:ci_build, :queued, pipeline: pipeline3) }
# these shouldn't influence the scheduling
let!(:unrelated_group) { create(:group) }
let!(:unrelated_project) { create(:project, group_runners_enabled: true, group: unrelated_group) }
let!(:unrelated_pipeline) { create(:ci_pipeline, project: unrelated_project) }
let!(:build1_unrelated_project) { create(:ci_build, :pending, :queued, pipeline: unrelated_pipeline) }
let!(:unrelated_group_runner) { create(:ci_runner, :group, groups: [unrelated_group]) }
it 'does not consider builds from other group runners' do
queue = ::Ci::Queue::BuildQueueService.new(group_runner)
expect(queue.builds_for_group_runner.size).to eq 6
build_on(group_runner)
expect(queue.builds_for_group_runner.size).to eq 5
build_on(group_runner)
expect(queue.builds_for_group_runner.size).to eq 4
build_on(group_runner)
expect(queue.builds_for_group_runner.size).to eq 3
build_on(group_runner)
expect(queue.builds_for_group_runner.size).to eq 2
build_on(group_runner)
expect(queue.builds_for_group_runner.size).to eq 1
build_on(group_runner)
expect(queue.builds_for_group_runner.size).to eq 0
expect(build_on(group_runner)).to be_nil
end
end
context 'group runner' do
let(:build) { build_on(group_runner) }
it { expect(build).to be_kind_of(Build) }
it { expect(build).to be_valid }
it { expect(build).to be_running }
it { expect(build.runner).to eq(group_runner) }
end
end
context 'disallow group runners' do
before do
project.update!(group_runners_enabled: false)
pending_job.reload.create_queuing_entry!
end
context 'group runner' do
let(:build) { build_on(group_runner) }
it { expect(build).to be_nil }
end
end
context 'when first build is stalled' do
before do
allow_any_instance_of(described_class).to receive(:assign_runner!).and_call_original
allow_any_instance_of(described_class).to receive(:assign_runner!)
.with(pending_job, anything).and_raise(ActiveRecord::StaleObjectError)
end
subject { described_class.new(project_runner, nil).execute }
context 'with multiple builds are in queue' do
let!(:other_build) { create(:ci_build, :pending, :queued, pipeline: pipeline) }
before do
allow_any_instance_of(::Ci::Queue::BuildQueueService)
.to receive(:execute)
.and_return(Ci::Build.where(id: [pending_job, other_build]).pluck(:id))
end
it "receives second build from the queue" do
expect(subject).to be_valid
expect(subject.build).to eq(other_build)
end
end
context 'when single build is in queue' do
before do
allow_any_instance_of(::Ci::Queue::BuildQueueService)
.to receive(:execute)
.and_return(Ci::Build.where(id: pending_job).pluck(:id))
end
it "does not receive any valid result" do
expect(subject).not_to be_valid
end
end
context 'when there is no build in queue' do
before do
allow_any_instance_of(::Ci::Queue::BuildQueueService)
.to receive(:execute)
.and_return([])
end
it "does not receive builds but result is valid" do
expect(subject).to be_valid
expect(subject.build).to be_nil
end
end
end
context 'when access_level of runner is not_protected' do
let!(:project_runner) { create(:ci_runner, :project, projects: [project]) }
context 'when a job is protected' do
let!(:pending_job) { create(:ci_build, :pending, :queued, :protected, pipeline: pipeline) }
it 'picks the job' do
expect(build_on(project_runner)).to eq(pending_job)
end
end
context 'when a job is unprotected' do
let!(:pending_job) { create(:ci_build, :pending, :queued, pipeline: pipeline) }
it 'picks the job' do
expect(build_on(project_runner)).to eq(pending_job)
end
end
context 'when protected attribute of a job is nil' do
let!(:pending_job) { create(:ci_build, :pending, :queued, pipeline: pipeline) }
before do
pending_job.update_attribute(:protected, nil)
end
it 'picks the job' do
expect(build_on(project_runner)).to eq(pending_job)
end
end
end
context 'when access_level of runner is ref_protected' do
let!(:project_runner) { create(:ci_runner, :project, :ref_protected, projects: [project]) }
context 'when a job is protected' do
let!(:pending_job) { create(:ci_build, :pending, :queued, :protected, pipeline: pipeline) }
it 'picks the job' do
expect(build_on(project_runner)).to eq(pending_job)
end
end
context 'when a job is unprotected' do
let!(:pending_job) { create(:ci_build, :pending, :queued, pipeline: pipeline) }
it 'does not pick the job' do
expect(build_on(project_runner)).to be_nil
end
end
context 'when protected attribute of a job is nil' do
let!(:pending_job) { create(:ci_build, :pending, :queued, pipeline: pipeline) }
before do
pending_job.update_attribute(:protected, nil)
end
it 'does not pick the job' do
expect(build_on(project_runner)).to be_nil
end
end
end
context 'runner feature set is verified' do
let(:options) { { artifacts: { reports: { junit: "junit.xml" } } } }
let!(:pending_job) { create(:ci_build, :pending, :queued, pipeline: pipeline, options: options) }
subject { build_on(project_runner, params: params) }
context 'when feature is missing by runner' do
let(:params) { {} }
it 'does not pick the build and drops the build' do
expect(subject).to be_nil
expect(pending_job.reload).to be_failed
expect(pending_job).to be_runner_unsupported
end
end
context 'when feature is supported by runner' do
let(:params) do
{ info: { features: { upload_multiple_artifacts: true } } }
end
it 'does pick job' do
expect(subject).not_to be_nil
end
end
end
context 'when "dependencies" keyword is specified' do
let!(:pre_stage_job) do
create(:ci_build, :success, :artifacts, pipeline: pipeline, name: 'test', stage_idx: 0)
end
let!(:pending_job) do
create(:ci_build, :pending, :queued,
pipeline: pipeline, stage_idx: 1,
options: { script: ["bash"], dependencies: dependencies })
end
let(:dependencies) { %w[test] }
subject { build_on(project_runner) }
it 'picks a build with a dependency' do
picked_build = build_on(project_runner)
expect(picked_build).to be_present
end
context 'when there are multiple dependencies with artifacts' do
let!(:pre_stage_job_second) do
create(:ci_build, :success, :artifacts, pipeline: pipeline, name: 'deploy', stage_idx: 0)
end
let(:dependencies) { %w[test deploy] }
it 'logs build artifacts size' do
build_on(project_runner)
artifacts_size = [pre_stage_job, pre_stage_job_second].sum do |job|
job.job_artifacts_archive.size
end
expect(artifacts_size).to eq 107464 * 2
expect(Gitlab::ApplicationContext.current).to include({
'meta.artifacts_dependencies_size' => artifacts_size,
'meta.artifacts_dependencies_count' => 2
})
end
end
shared_examples 'not pick' do
it 'does not pick the build and drops the build' do
expect(subject).to be_nil
expect(pending_job.reload).to be_failed
expect(pending_job).to be_missing_dependency_failure
end
end
shared_examples 'validation is active' do
context 'when depended job has not been completed yet' do
let!(:pre_stage_job) do
create(:ci_build, :pending, :queued, :manual, pipeline: pipeline, name: 'test', stage_idx: 0)
end
it { is_expected.to eq(pending_job) }
end
context 'when artifacts of depended job has been expired' do
let!(:pre_stage_job) do
create(:ci_build, :success, :expired, pipeline: pipeline, name: 'test', stage_idx: 0)
end
context 'when the pipeline is locked' do
before do
pipeline.artifacts_locked!
end
it { is_expected.to eq(pending_job) }
end
context 'when the pipeline is unlocked' do
before do
pipeline.unlocked!
end
it_behaves_like 'not pick'
end
end
context 'when artifacts of depended job has been erased' do
let!(:pre_stage_job) do
create(:ci_build, :success, pipeline: pipeline, name: 'test', stage_idx: 0, erased_at: 1.minute.ago)
end
it_behaves_like 'not pick'
end
context 'when job object is staled' do
let!(:pre_stage_job) do
create(:ci_build, :success, :expired, pipeline: pipeline, name: 'test', stage_idx: 0)
end
before do
pipeline.unlocked!
allow_next_instance_of(Ci::Build) do |build|
expect(build).to receive(:drop!)
.and_raise(ActiveRecord::StaleObjectError.new(pending_job, :drop!))
end
end
it 'does not drop nor pick' do
expect(subject).to be_nil
end
end
end
shared_examples 'validation is not active' do
context 'when depended job has not been completed yet' do
let!(:pre_stage_job) do
create(:ci_build, :pending, :queued, :manual, pipeline: pipeline, name: 'test', stage_idx: 0)
end
it { expect(subject).to eq(pending_job) }
end
context 'when artifacts of depended job has been expired' do
let!(:pre_stage_job) do
create(:ci_build, :success, :expired, pipeline: pipeline, name: 'test', stage_idx: 0)
end
it { expect(subject).to eq(pending_job) }
end
context 'when artifacts of depended job has been erased' do
let!(:pre_stage_job) do
create(:ci_build, :success, pipeline: pipeline, name: 'test', stage_idx: 0, erased_at: 1.minute.ago)
end
it { expect(subject).to eq(pending_job) }
end
end
it_behaves_like 'validation is active'
end
context 'when build is degenerated' do
let!(:pending_job) { create(:ci_build, :pending, :queued, :degenerated, pipeline: pipeline) }
subject { build_on(project_runner) }
it 'does not pick the build and drops the build' do
expect(subject).to be_nil
pending_job.reload
expect(pending_job).to be_failed
expect(pending_job).to be_archived_failure
end
end
context 'when build has data integrity problem' do
let!(:pending_job) do
create(:ci_build, :pending, :queued, pipeline: pipeline)
end
before do
pending_job.update_columns(options: "string")
end
subject { build_on(project_runner) }
it 'does drop the build and logs both failures' do
expect(Gitlab::ErrorTracking).to receive(:track_exception)
.with(anything, a_hash_including(build_id: pending_job.id))
.twice
.and_call_original
expect(subject).to be_nil
pending_job.reload
expect(pending_job).to be_failed
expect(pending_job).to be_data_integrity_failure
end
end
context 'when build fails to be run!' do
let!(:pending_job) do
create(:ci_build, :pending, :queued, pipeline: pipeline)
end
before do
expect_any_instance_of(Ci::Build).to receive(:run!)
.and_raise(RuntimeError, 'scheduler error')
end
subject { build_on(project_runner) }
it 'does drop the build and logs failure' do
expect(Gitlab::ErrorTracking).to receive(:track_exception)
.with(anything, a_hash_including(build_id: pending_job.id))
.once
.and_call_original
expect(subject).to be_nil
pending_job.reload
expect(pending_job).to be_failed
expect(pending_job).to be_scheduler_failure
end
end
context 'when an exception is raised during a persistent ref creation' do
before do
allow_any_instance_of(Ci::PersistentRef).to receive(:exist?) { false }
allow_any_instance_of(Ci::PersistentRef).to receive(:create_ref) { raise ArgumentError }
end
subject { build_on(project_runner) }
it 'picks the build' do
expect(subject).to eq(pending_job)
pending_job.reload
expect(pending_job).to be_running
end
end
context 'when only some builds can be matched by runner' do
let!(:project_runner) { create(:ci_runner, :project, projects: [project], tag_list: %w[matching]) }
let!(:pending_job) { create(:ci_build, :pending, :queued, pipeline: pipeline, tag_list: %w[matching]) }
before do
# create additional matching and non-matching jobs
create_list(:ci_build, 2, :pending, :queued, pipeline: pipeline, tag_list: %w[matching])
create(:ci_build, :pending, :queued, pipeline: pipeline, tag_list: %w[non-matching])
end
it 'observes queue size of only matching jobs' do
# pending_job + 2 x matching ones
expect(Gitlab::Ci::Queue::Metrics.queue_size_total).to receive(:observe)
.with({ runner_type: project_runner.runner_type }, 3)
expect(build_on(project_runner)).to eq(pending_job)
end
it 'observes queue processing time by the runner type' do
expect(Gitlab::Ci::Queue::Metrics.queue_iteration_duration_seconds)
.to receive(:observe)
.with({ runner_type: project_runner.runner_type }, anything)
expect(Gitlab::Ci::Queue::Metrics.queue_retrieval_duration_seconds)
.to receive(:observe)
.with({ runner_type: project_runner.runner_type }, anything)
expect(build_on(project_runner)).to eq(pending_job)
end
end
context 'when ci_register_job_temporary_lock is enabled' do
before do
stub_feature_flags(ci_register_job_temporary_lock: true)
allow(Gitlab::Ci::Queue::Metrics.queue_operations_total).to receive(:increment)
end
context 'when a build is temporarily locked' do
let(:service) { described_class.new(project_runner, nil) }
before do
service.send(:acquire_temporary_lock, pending_job.id)
end
it 'skips this build and marks queue as invalid' do
expect(Gitlab::Ci::Queue::Metrics.queue_operations_total).to receive(:increment)
.with(operation: :queue_iteration)
expect(Gitlab::Ci::Queue::Metrics.queue_operations_total).to receive(:increment)
.with(operation: :build_temporary_locked)
expect(service.execute).not_to be_valid
end
context 'when there is another build in queue' do
let!(:next_pending_job) { create(:ci_build, :pending, :queued, pipeline: pipeline) }
it 'skips this build and picks another build' do
expect(Gitlab::Ci::Queue::Metrics.queue_operations_total).to receive(:increment)
.with(operation: :queue_iteration).twice
expect(Gitlab::Ci::Queue::Metrics.queue_operations_total).to receive(:increment)
.with(operation: :build_temporary_locked)
result = service.execute
expect(result.build).to eq(next_pending_job)
expect(result).to be_valid
end
end
end
end
end
context 'when using pending builds table' do
let!(:runner) { create(:ci_runner, :project, projects: [project], tag_list: %w[conflict]) }
include_examples 'handles runner assignment'
context 'when a conflicting data is stored in denormalized table' do
let!(:pending_job) { create(:ci_build, :pending, :queued, pipeline: pipeline, tag_list: %w[conflict]) }
before do
pending_job.update_column(:status, :running)
end
it 'removes queuing entry upon build assignment attempt' do
expect(pending_job.reload).to be_running
expect(pending_job.queuing_entry).to be_present
expect(execute).not_to be_valid
expect(pending_job.reload.queuing_entry).not_to be_present
end
end
end
end
describe '#register_success' do
let!(:current_time) { Time.zone.local(2018, 4, 5, 14, 0, 0) }
let!(:attempt_counter) { double('Gitlab::Metrics::NullMetric') }
let!(:job_queue_duration_seconds) { double('Gitlab::Metrics::NullMetric') }
before do
allow(Time).to receive(:now).and_return(current_time)
# Stub tested metrics
allow(Gitlab::Ci::Queue::Metrics)
.to receive(:attempt_counter)
.and_return(attempt_counter)
allow(Gitlab::Ci::Queue::Metrics)
.to receive(:job_queue_duration_seconds)
.and_return(job_queue_duration_seconds)
project.update!(shared_runners_enabled: true)
pending_job.update!(created_at: current_time - 3600, queued_at: current_time - 1800)
end
shared_examples 'attempt counter collector' do
it 'increments attempt counter' do
allow(job_queue_duration_seconds).to receive(:observe)
expect(attempt_counter).to receive(:increment)
build_on(runner)
end
end
shared_examples 'jobs queueing time histogram collector' do
it 'counts job queuing time histogram with expected labels' do
allow(attempt_counter).to receive(:increment)
expect(job_queue_duration_seconds).to receive(:observe)
.with({ shared_runner: expected_shared_runner,
jobs_running_for_project: expected_jobs_running_for_project_first_job,
shard: expected_shard }, 1800)
build_on(runner)
end
context 'when project already has running jobs' do
let(:build2) { create(:ci_build, :running, pipeline: pipeline, runner: shared_runner) }
let(:build3) { create(:ci_build, :running, pipeline: pipeline, runner: shared_runner) }
before do
::Ci::RunningBuild.upsert_shared_runner_build!(build2)
::Ci::RunningBuild.upsert_shared_runner_build!(build3)
end
it 'counts job queuing time histogram with expected labels' do
allow(attempt_counter).to receive(:increment)
expect(job_queue_duration_seconds).to receive(:observe)
.with({ shared_runner: expected_shared_runner,
jobs_running_for_project: expected_jobs_running_for_project_third_job,
shard: expected_shard }, 1800)
build_on(runner)
end
end
end
shared_examples 'metrics collector' do
it_behaves_like 'attempt counter collector'
it_behaves_like 'jobs queueing time histogram collector'
end
context 'when shared runner is used' do
before do
pending_job.reload
pending_job.create_queuing_entry!
end
let(:runner) { create(:ci_runner, :instance, tag_list: %w[tag1 tag2]) }
let(:expected_shared_runner) { true }
let(:expected_shard) { ::Gitlab::Ci::Queue::Metrics::DEFAULT_METRICS_SHARD }
let(:expected_jobs_running_for_project_first_job) { '0' }
let(:expected_jobs_running_for_project_third_job) { '2' }
it_behaves_like 'metrics collector'
context 'when metrics_shard tag is defined' do
let(:runner) { create(:ci_runner, :instance, tag_list: %w[tag1 metrics_shard::shard_tag tag2]) }
let(:expected_shard) { 'shard_tag' }
it_behaves_like 'metrics collector'
end
context 'when multiple metrics_shard tag is defined' do
let(:runner) { create(:ci_runner, :instance, tag_list: %w[tag1 metrics_shard::shard_tag metrics_shard::shard_tag_2 tag2]) }
let(:expected_shard) { 'shard_tag' }
it_behaves_like 'metrics collector'
end
context 'when max running jobs bucket size is exceeded' do
before do
stub_const('Project::INSTANCE_RUNNER_RUNNING_JOBS_MAX_BUCKET', 1)
end
let(:expected_jobs_running_for_project_third_job) { '1+' }
it_behaves_like 'metrics collector'
end
context 'when pending job with queued_at=nil is used' do
before do
pending_job.update!(queued_at: nil)
end
it_behaves_like 'attempt counter collector'
it "doesn't count job queuing time histogram" do
allow(attempt_counter).to receive(:increment)
expect(job_queue_duration_seconds).not_to receive(:observe)
build_on(runner)
end
end
end
context 'when project runner is used' do
let(:runner) { create(:ci_runner, :project, projects: [project], tag_list: %w[tag1 metrics_shard::shard_tag tag2]) }
let(:expected_shared_runner) { false }
let(:expected_shard) { ::Gitlab::Ci::Queue::Metrics::DEFAULT_METRICS_SHARD }
let(:expected_jobs_running_for_project_first_job) { '+Inf' }
let(:expected_jobs_running_for_project_third_job) { '+Inf' }
it_behaves_like 'metrics collector'
end
end
context 'when runner_session params are' do
it 'present sets runner session configuration in the build' do
runner_session_params = { session: { 'url' => 'https://example.com' } }
expect(build_on(project_runner, params: runner_session_params).runner_session.attributes)
.to include(runner_session_params[:session])
end
it 'not present it does not configure the runner session' do
expect(build_on(project_runner).runner_session).to be_nil
end
end
context 'when max queue depth is reached' do
let!(:pending_job) { create(:ci_build, :pending, :queued, :degenerated, pipeline: pipeline) }
let!(:pending_job_2) { create(:ci_build, :pending, :queued, :degenerated, pipeline: pipeline) }
let!(:pending_job_3) { create(:ci_build, :pending, :queued, pipeline: pipeline) }
before do
stub_const("#{described_class}::MAX_QUEUE_DEPTH", 2)
end
it 'returns 409 conflict' do
expect(Ci::Build.pending.unstarted.count).to eq 3
result = described_class.new(project_runner, nil).execute
expect(result).not_to be_valid
expect(result.build).to be_nil
expect(result.build_json).to be_nil
end
end
def build_on(runner, runner_manager: nil, params: {})
described_class.new(runner, runner_manager).execute(params).build
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class ParseAnnotationsArtifactService < ::BaseService
include ::Gitlab::Utils::StrongMemoize
include ::Gitlab::EncodingHelper
SizeLimitError = Class.new(StandardError)
ParserError = Class.new(StandardError)
def execute(artifact)
return error('Artifact is not annotations file type', :bad_request) unless artifact&.annotations?
return error("Annotations Artifact Too Big. Maximum Allowable Size: #{annotations_size_limit}", :bad_request) if
artifact.file.size > annotations_size_limit
annotations = parse!(artifact)
Ci::JobAnnotation.bulk_upsert!(annotations, unique_by: %i[partition_id job_id name])
success
rescue SizeLimitError, ParserError, Gitlab::Json.parser_error, ActiveRecord::RecordInvalid => error
error(error.message, :bad_request)
end
private
def parse!(artifact)
annotations = []
artifact.each_blob do |blob|
# Windows powershell may output UTF-16LE files, so convert the whole file
# to UTF-8 before proceeding.
blob = strip_bom(encode_utf8_with_replacement_character(blob))
blob_json = Gitlab::Json.parse(blob)
raise ParserError, 'Annotations files must be a JSON object' unless blob_json.is_a?(Hash)
blob_json.each do |key, value|
annotations.push(Ci::JobAnnotation.new(job: artifact.job, name: key, data: value))
if annotations.size > annotations_num_limit
raise SizeLimitError,
"Annotations files cannot have more than #{annotations_num_limit} annotation lists"
end
end
end
annotations
end
def annotations_num_limit
project.actual_limits.ci_job_annotations_num
end
strong_memoize_attr :annotations_num_limit
def annotations_size_limit
project.actual_limits.ci_job_annotations_size
end
strong_memoize_attr :annotations_size_limit
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::ParseAnnotationsArtifactService, feature_category: :build_artifacts do
let_it_be(:project) { create(:project) }
let_it_be_with_reload(:build) { create(:ci_build, project: project) }
let(:service) { described_class.new(project, nil) }
describe '#execute' do
subject { service.execute(artifact) }
context 'when build has an annotations artifact' do
let_it_be(:artifact) { create(:ci_job_artifact, :annotations, job: build) }
context 'when artifact does not have the specified blob' do
before do
allow(artifact).to receive(:each_blob)
end
it 'parses nothing' do
expect(subject[:status]).to eq(:success)
expect(build.job_annotations).to be_empty
end
end
context 'when artifact has the specified blob' do
let(:blob) { data.to_json }
before do
allow(artifact).to receive(:each_blob).and_yield(blob)
end
context 'when valid annotations are given' do
let(:data) do
{
external_links: [
{
external_link: {
label: 'URL 1',
url: 'https://url1.example.com/'
}
},
{
external_link: {
label: 'URL 2',
url: 'https://url2.example.com/'
}
}
]
}
end
it 'parses the artifact' do
subject
expect(build.job_annotations.as_json).to contain_exactly(
hash_including('name' => 'external_links', 'data' => [
hash_including('external_link' => hash_including('label' => 'URL 1', 'url' => 'https://url1.example.com/')),
hash_including('external_link' => hash_including('label' => 'URL 2', 'url' => 'https://url2.example.com/'))
])
)
end
end
context 'when valid annotations are given and annotation list name is the same' do
before do
build.job_annotations.create!(name: 'external_links', data: [
{
external_link: {
label: 'URL 1',
url: 'https://url1.example.com/'
}
}
])
end
let(:data) do
{
external_links: [
{
external_link: {
label: 'URL 2',
url: 'https://url2.example.com/'
}
}
]
}
end
it 'parses the artifact' do
subject
expect(build.job_annotations.as_json).to contain_exactly(
hash_including('name' => 'external_links', 'data' => [
hash_including('external_link' => hash_including('label' => 'URL 2', 'url' => 'https://url2.example.com/'))
])
)
end
end
context 'when invalid JSON is given' do
let(:blob) { 'Invalid JSON!' }
it 'returns error' do
expect(subject[:status]).to eq(:error)
expect(subject[:http_status]).to eq(:bad_request)
end
end
context 'when root is not an object' do
let(:data) { [] }
it 'returns error' do
expect(subject[:status]).to eq(:error)
expect(subject[:message]).to eq('Annotations files must be a JSON object')
expect(subject[:http_status]).to eq(:bad_request)
end
end
context 'when item is not a valid annotation list' do
let(:data) { { external_links: {} } }
it 'returns error' do
expect(subject[:status]).to eq(:error)
expect(subject[:message]).to eq('Validation failed: Data must be a valid json schema')
expect(subject[:http_status]).to eq(:bad_request)
end
end
context 'when more than limitated annotations are specified in annotations' do
let(:data) do
{
external_links_1: [
{
external_link: {
label: 'URL',
url: 'https://example.com/'
}
}
],
external_links_2: [
{
external_link: {
label: 'URL',
url: 'https://example.com/'
}
}
]
}
end
before do
allow(service).to receive(:annotations_num_limit).and_return(1)
end
it 'returns error' do
expect(subject[:status]).to eq(:error)
expect(subject[:message]).to eq(
"Annotations files cannot have more than #{service.send(:annotations_num_limit)} annotation lists")
expect(subject[:http_status]).to eq(:bad_request)
end
end
end
context 'when artifact size is too big' do
before do
allow(artifact.file).to receive(:size) { service.send(:annotations_size_limit) + 1.kilobyte }
end
it 'returns error' do
expect(subject[:status]).to eq(:error)
expect(subject[:message]).to eq(
"Annotations Artifact Too Big. Maximum Allowable Size: #{service.send(:annotations_size_limit)}")
expect(subject[:http_status]).to eq(:bad_request)
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class ParseDotenvArtifactService < ::BaseService
include ::Gitlab::Utils::StrongMemoize
include ::Gitlab::EncodingHelper
SizeLimitError = Class.new(StandardError)
ParserError = Class.new(StandardError)
def execute(artifact)
validate!(artifact)
variables = parse!(artifact)
Ci::JobVariable.bulk_insert!(variables)
success
rescue SizeLimitError, ParserError, ActiveRecord::RecordInvalid, ActiveRecord::RecordNotUnique => error
Gitlab::ErrorTracking.track_exception(error, job_id: artifact.job_id)
error(error.message, :bad_request)
end
private
def validate!(artifact)
unless artifact&.dotenv?
raise ArgumentError, 'Artifact is not dotenv file type'
end
unless artifact.file.size < dotenv_size_limit
raise SizeLimitError,
"Dotenv Artifact Too Big. Maximum Allowable Size: #{dotenv_size_limit}"
end
end
def parse!(artifact)
variables = {}
artifact.each_blob do |blob|
# Windows powershell may output UTF-16LE files, so convert the whole file
# to UTF-8 before proceeding.
blob = strip_bom(encode_utf8_with_replacement_character(blob))
blob.each_line do |line|
key, value = scan_line!(line)
variables[key] = Ci::JobVariable.new(
job_id: artifact.job_id,
source: :dotenv,
key: key,
value: value,
raw: false
)
end
end
if variables.size > dotenv_variable_limit
raise SizeLimitError,
"Dotenv files cannot have more than #{dotenv_variable_limit} variables"
end
variables.values
end
def scan_line!(line)
result = line.scan(/^(.*?)=(.*)$/).last
raise ParserError, 'Invalid Format' if result.nil?
result.each(&:strip!)
end
def dotenv_variable_limit
strong_memoize(:dotenv_variable_limit) { project.actual_limits.dotenv_variables }
end
def dotenv_size_limit
strong_memoize(:dotenv_size_limit) { project.actual_limits.dotenv_size }
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::ParseDotenvArtifactService, feature_category: :build_artifacts do
let_it_be(:project) { create(:project) }
let_it_be(:pipeline) { create(:ci_pipeline, project: project) }
let(:build) { create(:ci_build, pipeline: pipeline, project: project) }
let(:service) { described_class.new(project, nil) }
describe '#execute' do
subject { service.execute(artifact) }
context 'when build has a dotenv artifact' do
let!(:artifact) { create(:ci_job_artifact, :dotenv, job: build) }
it 'parses the artifact' do
expect(subject[:status]).to eq(:success)
expect(build.job_variables.as_json(only: [:key, :value])).to contain_exactly(
hash_including('key' => 'KEY1', 'value' => 'VAR1'),
hash_including('key' => 'KEY2', 'value' => 'VAR2'))
end
context 'when dotenv variables are conflicting against manual variables' do
before do
create(:ci_job_variable, job: build, key: 'KEY1')
end
it 'returns an error message that there is a duplicate variable' do
subject
expect(subject[:status]).to eq(:error)
expect(subject[:message]).to include("Key (key, job_id)=(KEY1, #{build.id}) already exists.")
expect(subject[:http_status]).to eq(:bad_request)
end
end
context 'when dotenv variables have duplicate variables' do
let!(:artifact) { create(:ci_job_artifact, :dotenv, job: build) }
let(:blob) do
<<~EOS
KEY1=VAR1
KEY2=VAR2
KEY2=VAR3
KEY1=VAR4
EOS
end
before do
allow(artifact).to receive(:each_blob).and_yield(blob)
end
it 'latest values get used' do
subject
expect(subject[:status]).to eq(:success)
expect(build.job_variables.as_json(only: [:key, :value])).to contain_exactly(
hash_including('key' => 'KEY1', 'value' => 'VAR4'),
hash_including('key' => 'KEY2', 'value' => 'VAR3'))
end
end
context 'when parse error happens' do
before do
allow(service).to receive(:scan_line!) { raise described_class::ParserError, 'Invalid Format' }
end
it 'returns error' do
expect(Gitlab::ErrorTracking).to receive(:track_exception)
.with(described_class::ParserError, job_id: build.id)
expect(subject[:status]).to eq(:error)
expect(subject[:message]).to eq('Invalid Format')
expect(subject[:http_status]).to eq(:bad_request)
end
end
context 'when artifact size is too big' do
before do
allow(artifact.file).to receive(:size) { 10.kilobytes }
end
it 'returns error' do
expect(subject[:status]).to eq(:error)
expect(subject[:message]).to eq("Dotenv Artifact Too Big. Maximum Allowable Size: #{service.send(:dotenv_size_limit)}")
expect(subject[:http_status]).to eq(:bad_request)
end
end
context 'when artifact has the specified blob' do
before do
allow(artifact).to receive(:each_blob).and_yield(blob)
end
context 'when a white space trails the key' do
let(:blob) { 'KEY1 =VAR1' }
it 'trims the trailing space' do
subject
expect(build.job_variables.as_json(only: [:key, :value])).to contain_exactly(
hash_including('key' => 'KEY1', 'value' => 'VAR1'))
end
end
context 'when multiple key/value pairs exist in one line' do
let(:blob) { 'KEY=VARCONTAINING=EQLS' }
it 'parses the dotenv data' do
subject
expect(build.job_variables.as_json(only: [:key, :value])).to contain_exactly(
hash_including('key' => 'KEY', 'value' => 'VARCONTAINING=EQLS'))
end
end
context 'when key contains UNICODE' do
let(:blob) { '🛹=skateboard' }
it 'returns error' do
expect(subject[:status]).to eq(:error)
expect(subject[:message]).to eq("Validation failed: Key can contain only letters, digits and '_'.")
expect(subject[:http_status]).to eq(:bad_request)
end
end
context 'when value contains UNICODE' do
let(:blob) { 'skateboard=🛹' }
it 'parses the dotenv data' do
subject
expect(build.job_variables.as_json(only: [:key, :value])).to contain_exactly(
hash_including('key' => 'skateboard', 'value' => '🛹'))
end
end
context 'when key contains a space' do
let(:blob) { 'K E Y 1=VAR1' }
it 'returns error' do
expect(subject[:status]).to eq(:error)
expect(subject[:message]).to eq("Validation failed: Key can contain only letters, digits and '_'.")
expect(subject[:http_status]).to eq(:bad_request)
end
end
context 'when value contains a space' do
let(:blob) { 'KEY1=V A R 1' }
it 'parses the dotenv data' do
subject
expect(build.job_variables.as_json(only: [:key, :value])).to contain_exactly(
hash_including('key' => 'KEY1', 'value' => 'V A R 1'))
end
end
context 'when value is double quoated' do
let(:blob) { 'KEY1="VAR1"' }
it 'parses the value as-is' do
subject
expect(build.job_variables.as_json(only: [:key, :value])).to contain_exactly(
hash_including('key' => 'KEY1', 'value' => '"VAR1"'))
end
end
context 'when value is single quoated' do
let(:blob) { "KEY1='VAR1'" }
it 'parses the value as-is' do
subject
expect(build.job_variables.as_json(only: [:key, :value])).to contain_exactly(
hash_including('key' => 'KEY1', 'value' => "'VAR1'"))
end
end
context 'when value has white spaces in double quote' do
let(:blob) { 'KEY1=" VAR1 "' }
it 'parses the value as-is' do
subject
expect(build.job_variables.as_json(only: [:key, :value])).to contain_exactly(
hash_including('key' => 'KEY1', 'value' => '" VAR1 "'))
end
end
context 'when key is missing' do
let(:blob) { '=VAR1' }
it 'returns error' do
expect(subject[:status]).to eq(:error)
expect(subject[:message]).to match(/Key can't be blank/)
expect(subject[:http_status]).to eq(:bad_request)
end
end
context 'when value is missing' do
let(:blob) { 'KEY1=' }
it 'parses the dotenv data' do
subject
expect(build.job_variables.as_json(only: [:key, :value])).to contain_exactly(
hash_including('key' => 'KEY1', 'value' => ''))
end
end
context 'when it is not dotenv format' do
let(:blob) { "{ 'KEY1': 'VAR1' }" }
it 'returns error' do
expect(subject[:status]).to eq(:error)
expect(subject[:message]).to eq('Invalid Format')
expect(subject[:http_status]).to eq(:bad_request)
end
end
context 'when blob is encoded in UTF-16 LE' do
let(:blob) { File.read(Rails.root.join('spec/fixtures/build_artifacts/dotenv_utf16_le.txt')) }
it 'parses the dotenv data' do
subject
expect(build.job_variables.as_json(only: [:key, :value])).to contain_exactly(
hash_including('key' => 'MY_ENV_VAR', 'value' => 'true'),
hash_including('key' => 'TEST2', 'value' => 'false'))
end
end
context 'when more than limitated variables are specified in dotenv' do
let(:blob) do
StringIO.new.tap do |s|
(service.send(:dotenv_variable_limit) + 1).times do |i|
s << "KEY#{i}=VAR#{i}\n"
end
end.string
end
it 'returns error' do
expect(subject[:status]).to eq(:error)
expect(subject[:message]).to eq("Dotenv files cannot have more than #{service.send(:dotenv_variable_limit)} variables")
expect(subject[:http_status]).to eq(:bad_request)
end
end
context 'when variables are cross-referenced in dotenv' do
let(:blob) do
<<~EOS
KEY1=VAR1
KEY2=${KEY1}_Test
EOS
end
it 'does not support variable expansion in dotenv parser' do
subject
expect(build.job_variables.as_json(only: [:key, :value])).to contain_exactly(
hash_including('key' => 'KEY1', 'value' => 'VAR1'),
hash_including('key' => 'KEY2', 'value' => '${KEY1}_Test'))
end
end
context 'when there is an empty line' do
let(:blob) do
<<~EOS
KEY1=VAR1
KEY2=VAR2
EOS
end
it 'does not support empty line in dotenv parser' do
subject
expect(subject[:status]).to eq(:error)
expect(subject[:message]).to eq('Invalid Format')
expect(subject[:http_status]).to eq(:bad_request)
end
end
context 'when there is a comment' do
let(:blob) do
<<~EOS
KEY1=VAR1 # This is variable
EOS
end
it 'does not support comment in dotenv parser' do
subject
expect(build.job_variables.as_json(only: [:key, :value])).to contain_exactly(
hash_including('key' => 'KEY1', 'value' => 'VAR1 # This is variable'))
end
end
end
end
context 'when build does not have a dotenv artifact' do
let!(:artifact) {}
it 'raises an error' do
expect { subject }.to raise_error(ArgumentError)
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
# TODO: a couple of points with this approach:
# + reuses existing architecture and reactive caching
# - it's not a report comparison and some comparing features must be turned off.
# see CompareReportsBaseService for more notes.
# issue: https://gitlab.com/gitlab-org/gitlab/issues/34224
class GenerateTerraformReportsService < CompareReportsBaseService
def execute(base_pipeline, head_pipeline)
{
status: :parsed,
key: key(base_pipeline, head_pipeline),
data: head_pipeline.terraform_reports.plans
}
rescue StandardError => e
Gitlab::ErrorTracking.track_exception(e, project_id: project.id)
{
status: :error,
key: key(base_pipeline, head_pipeline),
status_reason: _('An error occurred while fetching terraform reports.')
}
end
def latest?(base_pipeline, head_pipeline, data)
data&.fetch(:key, nil) == key(base_pipeline, head_pipeline)
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::GenerateTerraformReportsService, feature_category: :infrastructure_as_code do
let_it_be(:project) { create(:project, :repository) }
describe '#execute' do
let_it_be(:merge_request) { create(:merge_request, :with_terraform_reports, source_project: project) }
subject { described_class.new(project, nil, id: merge_request.id) }
context 'when head pipeline has terraform reports' do
it 'returns status and data' do
pipeline = merge_request.head_pipeline
result = subject.execute(nil, pipeline)
pipeline.builds.each do |build|
expect(result).to match(
status: :parsed,
data: match(
a_hash_including(build.id.to_s => hash_including(
'create' => 0,
'delete' => 0,
'update' => 1,
'job_name' => build.name
))
),
key: an_instance_of(Array)
)
end
end
end
context 'when head pipeline has corrupted terraform reports' do
it 'returns a report with error messages' do
build = create(:ci_build, pipeline: merge_request.head_pipeline, project: project)
create(:ci_job_artifact, :terraform_with_corrupted_data, job: build, project: project)
result = subject.execute(nil, merge_request.head_pipeline)
expect(result).to match(
status: :parsed,
data: match(
a_hash_including(build.id.to_s => hash_including(
'tf_report_error' => :invalid_json_format
))
),
key: an_instance_of(Array)
)
end
end
context 'when head pipeline is corrupted' do
it 'returns status and error message' do
result = subject.execute(nil, nil)
expect(result).to match(
a_hash_including(
status: :error,
status_reason: 'An error occurred while fetching terraform reports.'
)
)
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class AbortPipelinesService
# NOTE: This call fails pipelines in bulk without running callbacks.
# Only for pipeline abandonment scenarios (examples: project delete)
def execute(pipelines, failure_reason)
pipelines.cancelable.each_batch(of: 100) do |pipeline_batch|
now = Time.current
basic_attributes = { status: :failed }
all_attributes = basic_attributes.merge(failure_reason: failure_reason, finished_at: now)
bulk_fail_for(Ci::Stage, pipeline_batch, basic_attributes)
bulk_fail_for(CommitStatus, pipeline_batch, all_attributes)
pipeline_batch.update_all(all_attributes)
end
ServiceResponse.success(message: 'Pipelines stopped')
end
private
def bulk_fail_for(klass, pipelines, attributes)
klass.in_pipelines(pipelines)
.cancelable
.in_batches(of: 150) # rubocop:disable Cop/InBatches
.update_all(attributes)
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::AbortPipelinesService, feature_category: :continuous_integration do
let_it_be(:user) { create(:user) }
let_it_be(:project) { create(:project, namespace: user.namespace) }
let_it_be(:cancelable_pipeline, reload: true) { create(:ci_pipeline, :running, project: project, user: user) }
let_it_be(:manual_pipeline, reload: true) { create(:ci_pipeline, status: :manual, project: project, user: user) }
let_it_be(:other_users_pipeline, reload: true) { create(:ci_pipeline, :running, project: project, user: create(:user)) } # not this user's pipeline
let_it_be(:cancelable_build, reload: true) { create(:ci_build, :running, pipeline: cancelable_pipeline) }
let_it_be(:non_cancelable_build, reload: true) { create(:ci_build, :success, pipeline: cancelable_pipeline) }
let_it_be(:cancelable_stage, reload: true) { create(:ci_stage, name: 'stageA', status: :running, pipeline: cancelable_pipeline, project: project) }
let_it_be(:non_cancelable_stage, reload: true) { create(:ci_stage, name: 'stageB', status: :success, pipeline: cancelable_pipeline, project: project) }
let_it_be(:manual_pipeline_cancelable_build, reload: true) { create(:ci_build, :created, pipeline: manual_pipeline) }
let_it_be(:manual_pipeline_non_cancelable_build, reload: true) { create(:ci_build, :manual, pipeline: manual_pipeline) }
let_it_be(:manual_pipeline_cancelable_stage, reload: true) { create(:ci_stage, name: 'stageA', status: :created, pipeline: manual_pipeline, project: project) }
let_it_be(:manual_pipeline_non_cancelable_stage, reload: true) { create(:ci_stage, name: 'stageB', status: :success, pipeline: manual_pipeline, project: project) }
describe '#execute' do
def expect_correct_pipeline_cancellations
expect(cancelable_pipeline.finished_at).not_to be_nil
expect(cancelable_pipeline).to be_failed
expect(manual_pipeline.finished_at).not_to be_nil
expect(manual_pipeline).to be_failed
end
def expect_correct_stage_cancellations
expect(cancelable_pipeline.stages - [non_cancelable_stage]).to all(be_failed)
expect(manual_pipeline.stages - [manual_pipeline_non_cancelable_stage]).to all(be_failed)
expect(non_cancelable_stage).not_to be_failed
expect(manual_pipeline_non_cancelable_stage).not_to be_failed
end
def expect_correct_build_cancellations
expect(cancelable_build).to be_failed
expect(cancelable_build.finished_at).not_to be_nil
expect(manual_pipeline_cancelable_build).to be_failed
expect(manual_pipeline_cancelable_build.finished_at).not_to be_nil
expect(non_cancelable_build).not_to be_failed
expect(manual_pipeline_non_cancelable_build).not_to be_failed
end
def expect_correct_cancellations
expect_correct_pipeline_cancellations
expect_correct_stage_cancellations
expect_correct_build_cancellations
end
context 'with project pipelines' do
def abort_project_pipelines
described_class.new.execute(project.all_pipelines, :project_deleted)
end
it 'fails all running pipelines and related jobs' do
expect(abort_project_pipelines).to be_success
expect_correct_cancellations
expect(other_users_pipeline.status).to eq('failed')
expect(other_users_pipeline.failure_reason).to eq('project_deleted')
expect(other_users_pipeline.stages.map(&:status)).to all(eq('failed'))
end
it 'avoids N+1 queries' do
control_count = ActiveRecord::QueryRecorder.new { abort_project_pipelines }.count
pipelines = create_list(:ci_pipeline, 5, :running, project: project)
create_list(:ci_build, 5, :running, pipeline: pipelines.first)
expect { abort_project_pipelines }.not_to exceed_query_limit(control_count)
end
context 'with live build logs' do
before do
create(:ci_build_trace_chunk, build: cancelable_build)
end
it 'makes failed builds with stale trace visible' do
expect(Ci::Build.with_stale_live_trace.count).to eq 0
travel_to(2.days.ago) do
abort_project_pipelines
end
expect(Ci::Build.with_stale_live_trace.count).to eq 1
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
class CompareAccessibilityReportsService < CompareReportsBaseService
def comparer_class
Gitlab::Ci::Reports::AccessibilityReportsComparer
end
def serializer_class
AccessibilityReportsComparerSerializer
end
def get_report(pipeline)
pipeline&.accessibility_reports
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::CompareAccessibilityReportsService, feature_category: :continuous_integration do
let(:service) { described_class.new(project) }
let(:project) { create(:project, :repository) }
describe '#execute' do
subject { service.execute(base_pipeline, head_pipeline) }
context 'when head pipeline has accessibility reports' do
let(:base_pipeline) { nil }
let(:head_pipeline) { create(:ci_pipeline, :with_accessibility_reports, project: project) }
it 'returns status and data' do
expect(subject[:status]).to eq(:parsed)
expect(subject[:data]).to match_schema('entities/accessibility_reports_comparer')
end
end
context 'when base and head pipelines have accessibility reports' do
let(:base_pipeline) { create(:ci_pipeline, :with_accessibility_reports, project: project) }
let(:head_pipeline) { create(:ci_pipeline, :with_accessibility_reports, project: project) }
it 'returns status and data' do
expect(subject[:status]).to eq(:parsed)
expect(subject[:data]).to match_schema('entities/accessibility_reports_comparer')
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module JobTokenScope
class AddProjectService < ::BaseService
include EditScopeValidations
def execute(target_project, direction: :inbound)
validate_edit!(project, target_project, current_user)
link = allowlist(direction)
.add!(target_project, user: current_user)
ServiceResponse.success(payload: { project_link: link })
rescue ActiveRecord::RecordNotUnique
ServiceResponse.error(message: "Target project is already in the job token scope")
rescue ActiveRecord::RecordInvalid => e
ServiceResponse.error(message: e.message)
rescue EditScopeValidations::ValidationError => e
ServiceResponse.error(message: e.message)
end
private
def allowlist(direction)
Ci::JobToken::Allowlist.new(project, direction: direction)
end
end
end
end
Ci::JobTokenScope::AddProjectService.prepend_mod_with('Ci::JobTokenScope::AddProjectService')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::JobTokenScope::AddProjectService, feature_category: :continuous_integration do
let(:service) { described_class.new(project, current_user) }
let_it_be(:project) { create(:project, ci_outbound_job_token_scope_enabled: true).tap(&:save!) }
let_it_be(:target_project) { create(:project) }
let_it_be(:current_user) { create(:user) }
shared_examples 'adds project' do |context|
it 'adds the project to the scope' do
expect do
expect(result).to be_success
end.to change { Ci::JobToken::ProjectScopeLink.count }.by(1)
end
end
describe '#execute' do
subject(:result) { service.execute(target_project) }
it_behaves_like 'editable job token scope' do
context 'when user has permissions on source and target projects' do
let(:resulting_direction) { result.payload.fetch(:project_link)&.direction }
before do
project.add_maintainer(current_user)
target_project.add_developer(current_user)
end
it_behaves_like 'adds project'
context 'when token scope is disabled' do
before do
project.ci_cd_settings.update!(job_token_scope_enabled: false)
end
it_behaves_like 'adds project'
it 'creates an inbound link by default' do
expect(resulting_direction).to eq('inbound')
end
context 'when direction is specified' do
subject(:result) { service.execute(target_project, direction: direction) }
context 'when the direction is outbound' do
let(:direction) { :outbound }
specify { expect(resulting_direction).to eq('outbound') }
end
context 'when the direction is inbound' do
let(:direction) { :inbound }
specify { expect(resulting_direction).to eq('inbound') }
end
end
end
end
context 'when target project is same as the source project' do
before do
project.add_maintainer(current_user)
end
let(:target_project) { project }
it_behaves_like 'returns error', "Validation failed: Target project can't be the same as the source project"
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module JobTokenScope
class RemoveProjectService < ::BaseService
include EditScopeValidations
def execute(target_project, direction)
validate_edit!(project, target_project, current_user)
if project == target_project
return ServiceResponse.error(message: "Source project cannot be removed from the job token scope")
end
link = ::Ci::JobToken::ProjectScopeLink
.with_access_direction(direction)
.for_source_and_target(project, target_project)
unless link
return ServiceResponse.error(message: "Target project is not in the job token scope")
end
if link.destroy
ServiceResponse.success
else
ServiceResponse.error(message: link.errors.full_messages.to_sentence, payload: { project_link: link })
end
rescue EditScopeValidations::ValidationError => e
ServiceResponse.error(message: e.message, reason: :insufficient_permissions)
end
end
end
end
Ci::JobTokenScope::RemoveProjectService.prepend_mod_with('Ci::JobTokenScope::RemoveProjectService')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::JobTokenScope::RemoveProjectService, feature_category: :continuous_integration do
let(:service) { described_class.new(project, current_user) }
let_it_be(:project) { create(:project, ci_outbound_job_token_scope_enabled: true).tap(&:save!) }
let_it_be(:target_project) { create(:project) }
let_it_be(:current_user) { create(:user) }
let_it_be(:link) do
create(:ci_job_token_project_scope_link,
source_project: project,
target_project: target_project)
end
shared_examples 'removes project' do |context|
it 'removes the project from the scope' do
expect do
expect(result).to be_success
end.to change { Ci::JobToken::ProjectScopeLink.count }.by(-1)
end
end
describe '#execute' do
subject(:result) { service.execute(target_project, :outbound) }
it_behaves_like 'editable job token scope' do
context 'when user has permissions on source and target project' do
before do
project.add_maintainer(current_user)
target_project.add_developer(current_user)
end
it_behaves_like 'removes project'
context 'when token scope is disabled' do
before do
project.ci_cd_settings.update!(job_token_scope_enabled: false)
end
it_behaves_like 'removes project'
end
end
context 'when target project is same as the source project' do
before do
project.add_maintainer(current_user)
end
let(:target_project) { project }
it_behaves_like 'returns error', "Source project cannot be removed from the job token scope"
end
context 'when target project is not in the job token scope' do
let_it_be(:target_project) { create(:project, :public) }
before do
project.add_maintainer(current_user)
end
it_behaves_like 'returns error', 'Target project is not in the job token scope'
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module StuckBuilds
class DropScheduledService
include DropHelpers
BUILD_SCHEDULED_OUTDATED_TIMEOUT = 1.hour
def execute
Gitlab::AppLogger.info "#{self.class}: Cleaning scheduled, timed-out builds"
drop(scheduled_timed_out_builds, failure_reason: :stale_schedule)
end
private
def scheduled_timed_out_builds
Ci::Build.scheduled.scheduled_at_before(BUILD_SCHEDULED_OUTDATED_TIMEOUT.ago)
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::StuckBuilds::DropScheduledService, feature_category: :runner_fleet do
let_it_be(:runner) { create :ci_runner }
let!(:job) { create :ci_build, :scheduled, scheduled_at: scheduled_at, runner: runner }
subject(:service) { described_class.new }
context 'when job is scheduled' do
context 'for more than an hour ago' do
let(:scheduled_at) { 2.hours.ago }
it_behaves_like 'job is dropped with failure reason', 'stale_schedule'
end
context 'for less than 1 hour ago' do
let(:scheduled_at) { 30.minutes.ago }
it_behaves_like 'job is unchanged'
end
end
%w[success skipped failed canceled running pending].each do |status|
context "when job is #{status}" do
before do
job.update!(status: status)
end
context 'and scheduled for more than an hour ago' do
let(:scheduled_at) { 2.hours.ago }
it_behaves_like 'job is unchanged'
end
context 'and scheduled for less than 1 hour ago' do
let(:scheduled_at) { 30.minutes.ago }
it_behaves_like 'job is unchanged'
end
end
end
context 'when there are no stale scheduled builds' do
let(:job) {}
it 'does not drop the stale scheduled build yet' do
expect { service.execute }.not_to raise_error
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module StuckBuilds
class DropPendingService
include DropHelpers
BUILD_PENDING_OUTDATED_TIMEOUT = 1.day
BUILD_PENDING_STUCK_TIMEOUT = 1.hour
def execute
Gitlab::AppLogger.info "#{self.class}: Cleaning pending timed-out builds"
drop(
pending_builds(BUILD_PENDING_OUTDATED_TIMEOUT.ago),
failure_reason: :stuck_or_timeout_failure
)
drop_stuck(
pending_builds(BUILD_PENDING_STUCK_TIMEOUT.ago),
failure_reason: :stuck_or_timeout_failure
)
end
private
# rubocop: disable CodeReuse/ActiveRecord
# We're adding the ordering clause by `created_at` and `project_id`
# because we want to force the query planner to use the
# `ci_builds_gitlab_monitor_metrics` index all the time.
def pending_builds(timeout)
Ci::Build
.pending
.created_at_before(timeout)
.updated_at_before(timeout)
.order(created_at: :asc, project_id: :asc)
end
# rubocop: enable CodeReuse/ActiveRecord
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::StuckBuilds::DropPendingService, feature_category: :runner_fleet do
let_it_be(:runner) { create(:ci_runner) }
let_it_be(:pipeline) { create(:ci_empty_pipeline) }
let_it_be_with_reload(:job) do
create(:ci_build, pipeline: pipeline, runner: runner)
end
let(:created_at) {}
let(:updated_at) {}
subject(:service) { described_class.new }
before do
job_attributes = { status: status }
job_attributes[:created_at] = created_at if created_at
job_attributes[:updated_at] = updated_at if updated_at
job_attributes.compact!
job.update!(job_attributes)
end
context 'when job is pending' do
let(:status) { 'pending' }
context 'when job is not stuck' do
before do
allow_next_found_instance_of(Ci::Build) do |build|
allow(build).to receive(:stuck?).and_return(false)
end
end
context 'when job was updated_at more than 1 day ago' do
let(:updated_at) { 1.5.days.ago }
context 'when created_at is the same as updated_at' do
let(:created_at) { 1.5.days.ago }
it_behaves_like 'job is dropped with failure reason', 'stuck_or_timeout_failure'
end
context 'when created_at is before updated_at' do
let(:created_at) { 3.days.ago }
it_behaves_like 'job is dropped with failure reason', 'stuck_or_timeout_failure'
end
end
context 'when job was updated less than 1 day ago' do
let(:updated_at) { 6.hours.ago }
context 'when created_at is the same as updated_at' do
let(:created_at) { 1.5.days.ago }
it_behaves_like 'job is unchanged'
end
context 'when created_at is before updated_at' do
let(:created_at) { 3.days.ago }
it_behaves_like 'job is unchanged'
end
end
context 'when job was updated more than 1 hour ago' do
let(:updated_at) { 2.hours.ago }
context 'when created_at is the same as updated_at' do
let(:created_at) { 2.hours.ago }
it_behaves_like 'job is unchanged'
end
context 'when created_at is before updated_at' do
let(:created_at) { 3.days.ago }
it_behaves_like 'job is unchanged'
end
end
end
context 'when job is stuck' do
before do
allow_next_found_instance_of(Ci::Build) do |build|
allow(build).to receive(:stuck?).and_return(true)
end
end
context 'when job was updated_at more than 1 hour ago' do
let(:updated_at) { 1.5.hours.ago }
context 'when created_at is the same as updated_at' do
let(:created_at) { 1.5.hours.ago }
it_behaves_like 'job is dropped with failure reason', 'stuck_or_timeout_failure'
end
context 'when created_at is before updated_at' do
let(:created_at) { 3.days.ago }
it_behaves_like 'job is dropped with failure reason', 'stuck_or_timeout_failure'
end
end
context 'when job was updated in less than 1 hour ago' do
let(:updated_at) { 30.minutes.ago }
context 'when created_at is the same as updated_at' do
let(:created_at) { 30.minutes.ago }
it_behaves_like 'job is unchanged'
end
context 'when created_at is before updated_at' do
let(:created_at) { 2.days.ago }
it_behaves_like 'job is unchanged'
end
end
end
end
context 'when job is running' do
let(:status) { 'running' }
context 'when job was updated_at more than an hour ago' do
let(:updated_at) { 2.hours.ago }
it_behaves_like 'job is unchanged'
end
context 'when job was updated in less than 1 hour ago' do
let(:updated_at) { 30.minutes.ago }
it_behaves_like 'job is unchanged'
end
end
%w[success skipped failed canceled].each do |status|
context "when job is #{status}" do
let(:status) { status }
let(:updated_at) { 2.days.ago }
context 'when created_at is the same as updated_at' do
let(:created_at) { 2.days.ago }
it_behaves_like 'job is unchanged'
end
context 'when created_at is before updated_at' do
let(:created_at) { 3.days.ago }
it_behaves_like 'job is unchanged'
end
end
end
context 'for deleted project' do
let(:status) { 'running' }
let(:updated_at) { 2.days.ago }
before do
job.project.update!(pending_delete: true)
end
it_behaves_like 'job is unchanged'
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module StuckBuilds
class DropRunningService
include DropHelpers
BUILD_RUNNING_OUTDATED_TIMEOUT = 1.hour
def execute
Gitlab::AppLogger.info "#{self.class}: Cleaning running, timed-out builds"
drop(running_timed_out_builds, failure_reason: :stuck_or_timeout_failure)
end
private
def running_timed_out_builds
if Feature.enabled?(:ci_new_query_for_running_stuck_jobs)
Ci::Build
.running
.created_at_before(BUILD_RUNNING_OUTDATED_TIMEOUT.ago)
.updated_at_before(BUILD_RUNNING_OUTDATED_TIMEOUT.ago)
.order(created_at: :asc, project_id: :asc) # rubocop:disable CodeReuse/ActiveRecord
else
Ci::Build.running.updated_at_before(BUILD_RUNNING_OUTDATED_TIMEOUT.ago)
end
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::StuckBuilds::DropRunningService, feature_category: :runner_fleet do
let!(:runner) { create :ci_runner }
let!(:job) { create(:ci_build, runner: runner, created_at: created_at, updated_at: updated_at, status: status) }
subject(:service) { described_class.new }
around do |example|
freeze_time { example.run }
end
shared_examples 'running builds' do
context 'when job is running' do
let(:status) { 'running' }
let(:outdated_time) { described_class::BUILD_RUNNING_OUTDATED_TIMEOUT.ago - 30.minutes }
let(:fresh_time) { described_class::BUILD_RUNNING_OUTDATED_TIMEOUT.ago + 30.minutes }
context 'when job is outdated' do
let(:created_at) { outdated_time }
let(:updated_at) { outdated_time }
it_behaves_like 'job is dropped with failure reason', 'stuck_or_timeout_failure'
end
context 'when job is fresh' do
let(:created_at) { fresh_time }
let(:updated_at) { fresh_time }
it_behaves_like 'job is unchanged'
end
context 'when job freshly updated' do
let(:created_at) { outdated_time }
let(:updated_at) { fresh_time }
it_behaves_like 'job is unchanged'
end
end
end
include_examples 'running builds'
context 'when new query flag is disabled' do
before do
stub_feature_flags(ci_new_query_for_running_stuck_jobs: false)
end
include_examples 'running builds'
end
%w[success skipped failed canceled scheduled pending].each do |status|
context "when job is #{status}" do
let(:status) { status }
let(:updated_at) { 2.days.ago }
context 'when created_at is the same as updated_at' do
let(:created_at) { 2.days.ago }
it_behaves_like 'job is unchanged'
end
context 'when created_at is before updated_at' do
let(:created_at) { 3.days.ago }
it_behaves_like 'job is unchanged'
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module ResourceGroups
class AssignResourceFromResourceGroupService < ::BaseService
# rubocop: disable CodeReuse/ActiveRecord
def execute(resource_group)
release_resource_from_stale_jobs(resource_group)
free_resources = resource_group.resources.free.count
resource_group.upcoming_processables.take(free_resources).each do |upcoming|
Gitlab::OptimisticLocking.retry_lock(upcoming, name: 'enqueue_waiting_for_resource') do |processable|
processable.enqueue_waiting_for_resource
end
end
end
# rubocop: enable CodeReuse/ActiveRecord
private
def release_resource_from_stale_jobs(resource_group)
resource_group.resources.stale_processables.find_each do |processable|
resource_group.release_resource_from(processable)
end
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::ResourceGroups::AssignResourceFromResourceGroupService, feature_category: :continuous_integration do
include ConcurrentHelpers
let_it_be(:project) { create(:project) }
let_it_be(:user) { create(:user) }
let(:service) { described_class.new(project, user) }
describe '#execute' do
subject { service.execute(resource_group) }
let(:resource_group) { create(:ci_resource_group, project: project) }
let!(:build) { create(:ci_build, :waiting_for_resource, project: project, user: user, resource_group: resource_group) }
context 'when there is an available resource' do
it 'requests resource' do
subject
expect(build.reload).to be_pending
expect(build.resource).to be_present
end
context 'when failed to request resource' do
before do
allow_next_instance_of(Ci::Build) do |build|
allow(build).to receive(:enqueue_waiting_for_resource) { false }
end
end
it 'has a build waiting for resource' do
subject
expect(build).to be_waiting_for_resource
end
end
context 'when the build has already retained a resource' do
before do
resource_group.assign_resource_to(build)
build.update_column(:status, :pending)
end
it 'has a pending build' do
subject
expect(build).to be_pending
end
end
context 'when process mode is oldest_first' do
let(:resource_group) { create(:ci_resource_group, process_mode: :oldest_first, project: project) }
it 'requests resource' do
subject
expect(build.reload).to be_pending
expect(build.resource).to be_present
end
context 'when the other job exists in the newer pipeline' do
let!(:build_2) { create(:ci_build, :waiting_for_resource, project: project, user: user, resource_group: resource_group) }
it 'requests resource for the job in the oldest pipeline' do
subject
expect(build.reload).to be_pending
expect(build.resource).to be_present
expect(build_2.reload).to be_waiting_for_resource
expect(build_2.resource).to be_nil
end
end
context 'when build is not `waiting_for_resource` state' do
let!(:build) { create(:ci_build, :created, project: project, user: user, resource_group: resource_group) }
it 'attempts to request a resource' do
expect_next_found_instance_of(Ci::Build) do |job|
expect(job).to receive(:enqueue_waiting_for_resource).and_call_original
end
subject
end
it 'does not change the job status' do
subject
expect(build.reload).to be_created
expect(build.resource).to be_nil
end
end
end
context 'when process mode is newest_first' do
let(:resource_group) { create(:ci_resource_group, process_mode: :newest_first, project: project) }
it 'requests resource' do
subject
expect(build.reload).to be_pending
expect(build.resource).to be_present
end
context 'when the other job exists in the newer pipeline' do
let!(:build_2) { create(:ci_build, :waiting_for_resource, project: project, user: user, resource_group: resource_group) }
it 'requests resource for the job in the newest pipeline' do
subject
expect(build.reload).to be_waiting_for_resource
expect(build.resource).to be_nil
expect(build_2.reload).to be_pending
expect(build_2.resource).to be_present
end
end
context 'when build is not `waiting_for_resource` state' do
let!(:build) { create(:ci_build, :created, project: project, user: user, resource_group: resource_group) }
it 'attempts to request a resource' do
expect_next_found_instance_of(Ci::Build) do |job|
expect(job).to receive(:enqueue_waiting_for_resource).and_call_original
end
subject
end
it 'does not change the job status' do
subject
expect(build.reload).to be_created
expect(build.resource).to be_nil
end
end
end
context 'when parallel services are running' do
it 'can run the same command in parallel' do
parallel_num = 4
blocks = Array.new(parallel_num).map do
-> { subject }
end
run_parallel(blocks)
expect(build.reload).to be_pending
end
end
end
context 'when there are no available resources' do
let!(:other_build) { create(:ci_build) }
before do
resource_group.assign_resource_to(other_build)
end
it 'does not request resource' do
expect_any_instance_of(Ci::Build).not_to receive(:enqueue_waiting_for_resource)
subject
expect(build.reload).to be_waiting_for_resource
end
context 'when there is a stale build assigned to a resource' do
before do
other_build.doom!
other_build.update_column(:updated_at, 10.minutes.ago)
end
it 'releases the resource from the stale build and assignes to the waiting build' do
subject
expect(build.reload).to be_pending
expect(build.resource).to be_present
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module PipelineProcessing
class AtomicProcessingService
include Gitlab::Utils::StrongMemoize
include ExclusiveLeaseGuard
attr_reader :pipeline
DEFAULT_LEASE_TIMEOUT = 1.minute
BATCH_SIZE = 20
def initialize(pipeline)
@pipeline = pipeline
@collection = AtomicProcessingService::StatusCollection.new(pipeline)
end
def execute
return unless pipeline.needs_processing?
# Run the process only if we can obtain an exclusive lease; returns nil if lease is unavailable
success = try_obtain_lease { process! }
if success
# If any jobs changed from stopped to alive status during pipeline processing, we must
# re-reset their dependent jobs; see https://gitlab.com/gitlab-org/gitlab/-/issues/388539.
new_alive_jobs.group_by(&:user).each do |user, jobs|
log_running_reset_skipped_jobs_service(jobs)
ResetSkippedJobsService.new(project, user).execute(jobs)
end
# Re-schedule if we need further processing
PipelineProcessWorker.perform_async(pipeline.id) if pipeline.needs_processing?
end
success
end
private
def process!
update_stages!
update_pipeline!
update_jobs_processed!
Ci::ExpirePipelineCacheService.new.execute(pipeline)
true
end
def update_stages!
pipeline.stages.ordered.each { |stage| update_stage!(stage) }
end
def update_stage!(stage)
# Update jobs for a given stage in bulk/slices
@collection
.created_job_ids_in_stage(stage.position)
.in_groups_of(BATCH_SIZE, false) { |ids| update_jobs!(ids) }
status = @collection.status_of_stage(stage.position)
stage.set_status(status)
end
def update_jobs!(ids)
created_jobs = pipeline
.current_processable_jobs
.id_in(ids)
.with_project_preload
.created
.ordered_by_stage
.select_with_aggregated_needs(project)
created_jobs.each { |job| update_job!(job) }
end
def update_pipeline!
pipeline.set_status(@collection.status_of_all)
end
def update_jobs_processed!
processing = @collection.processing_jobs
processing.each_slice(BATCH_SIZE) do |slice|
pipeline.all_jobs.match_id_and_lock_version(slice)
.update_as_processed!
end
end
def update_job!(job)
previous_status = status_of_previous_jobs(job)
# We do not continue to process the job if the previous status is not completed
return unless Ci::HasStatus::COMPLETED_STATUSES.include?(previous_status)
::Deployments::CreateForJobService.new.execute(job)
Gitlab::OptimisticLocking.retry_lock(job, name: 'atomic_processing_update_job') do |subject|
Ci::ProcessBuildService.new(project, subject.user)
.execute(subject, previous_status)
# update internal representation of job
# to make the status change of job to be taken into account during further processing
@collection.set_job_status(job.id, job.status, job.lock_version)
end
end
def status_of_previous_jobs(job)
if job.scheduling_type_dag?
# job uses DAG, get status of all dependent needs
@collection.status_of_jobs(job.aggregated_needs_names.to_a)
else
# job uses Stages, get status of prior stage
@collection.status_of_jobs_prior_to_stage(job.stage_idx.to_i)
end
end
# Gets the jobs that changed from stopped to alive status since the initial status collection
# was evaluated. We determine this by checking if their current status is no longer stopped.
def new_alive_jobs
initial_stopped_job_names = @collection.stopped_job_names
return [] if initial_stopped_job_names.empty?
new_collection = AtomicProcessingService::StatusCollection.new(pipeline)
new_alive_job_names = initial_stopped_job_names - new_collection.stopped_job_names
return [] if new_alive_job_names.empty?
pipeline
.current_jobs
.by_name(new_alive_job_names)
.preload(:user) # rubocop: disable CodeReuse/ActiveRecord
.to_a
end
def project
pipeline.project
end
def lease_key
"#{super}::pipeline_id:#{pipeline.id}"
end
def lease_timeout
DEFAULT_LEASE_TIMEOUT
end
def lease_taken_log_level
:info
end
def log_running_reset_skipped_jobs_service(jobs)
Gitlab::AppJsonLogger.info(
class: self.class.name.to_s,
message: 'Running ResetSkippedJobsService on new alive jobs',
project_id: project.id,
pipeline_id: pipeline.id,
user_id: jobs.first.user.id,
jobs_count: jobs.count
)
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::PipelineProcessing::AtomicProcessingService, feature_category: :continuous_integration do
include RepoHelpers
include ExclusiveLeaseHelpers
describe 'Pipeline Processing Service Tests With Yaml' do
let_it_be(:project) { create(:project, :repository) }
let_it_be(:user) { project.first_owner }
where(:test_file_path) do
Dir.glob(Rails.root.join('spec/services/ci/pipeline_processing/test_cases/*.yml'))
end
with_them do
let(:test_file) { YAML.load_file(test_file_path) }
let(:pipeline) { Ci::CreatePipelineService.new(project, user, ref: 'master').execute(:pipeline).payload }
before do
stub_ci_pipeline_yaml_file(YAML.dump(test_file['config']))
end
it 'follows transitions' do
expect(pipeline).to be_persisted
Sidekiq::Worker.drain_all # ensure that all async jobs are executed
check_expectation(test_file.dig('init', 'expect'), "init")
test_file['transitions'].each_with_index do |transition, idx|
process_events(transition)
Sidekiq::Worker.drain_all # ensure that all async jobs are executed
check_expectation(transition['expect'], "transition:#{idx}")
end
end
private
def check_expectation(expectation, message)
expect(current_state.deep_stringify_keys).to eq(expectation), message
end
def current_state
# reload pipeline and all relations
pipeline.reload
{
pipeline: pipeline.status,
stages: pipeline.stages.pluck(:name, :status).to_h,
jobs: pipeline.latest_statuses.pluck(:name, :status).to_h
}
end
def process_events(transition)
if transition['jobs']
event_on_jobs(transition['event'], transition['jobs'])
else
event_on_pipeline(transition['event'])
end
end
def event_on_jobs(event, job_names)
jobs = pipeline.latest_statuses.by_name(job_names).to_a
expect(jobs.count).to eq(job_names.count) # ensure that we have the same counts
jobs.each do |job|
case event
when 'play'
job.play(user)
when 'retry'
::Ci::RetryJobService.new(project, user).execute(job)
else
job.public_send("#{event}!")
end
end
end
def event_on_pipeline(event)
if event == 'retry'
pipeline.retry_failed(user)
else
pipeline.public_send("#{event}!")
end
end
end
end
describe 'Pipeline Processing Service' do
let(:project) { create(:project, :repository) }
let(:user) { project.first_owner }
let(:pipeline) do
create(:ci_empty_pipeline, ref: 'master', project: project)
end
context 'when simple pipeline is defined' do
before do
create_build('linux', stage_idx: 0)
create_build('mac', stage_idx: 0)
create_build('rspec', stage_idx: 1)
create_build('rubocop', stage_idx: 1)
create_build('deploy', stage_idx: 2)
end
it 'processes a pipeline', :sidekiq_inline do
expect(process_pipeline).to be_truthy
succeed_pending
expect(builds.success.count).to eq(2)
succeed_pending
expect(builds.success.count).to eq(4)
succeed_pending
expect(builds.success.count).to eq(5)
end
it 'does not process pipeline if existing stage is running' do
expect(process_pipeline).to be_truthy
expect(builds.pending.count).to eq(2)
expect(process_pipeline).to be_falsey
expect(builds.pending.count).to eq(2)
end
end
context 'custom stage with first job allowed to fail' do
before do
create_build('clean_job', stage_idx: 0, allow_failure: true)
create_build('test_job', stage_idx: 1, allow_failure: true)
end
it 'automatically triggers a next stage when build finishes', :sidekiq_inline do
expect(process_pipeline).to be_truthy
expect(builds_statuses).to eq ['pending']
fail_running_or_pending
expect(builds_statuses).to eq %w[failed pending]
fail_running_or_pending
expect(pipeline.reload).to be_success
end
end
context 'when optional manual actions are defined', :sidekiq_inline do
before do
create_build('build', stage_idx: 0)
create_build('test', stage_idx: 1)
create_build('test_failure', stage_idx: 2, when: 'on_failure')
create_build('deploy', stage_idx: 3)
create_build('production', stage_idx: 3, when: 'manual', allow_failure: true)
create_build('cleanup', stage_idx: 4, when: 'always')
create_build('clear:cache', stage_idx: 4, when: 'manual', allow_failure: true)
end
context 'when builds are successful' do
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names).to eq ['build']
expect(builds_statuses).to eq ['pending']
succeed_running_or_pending
expect(builds_names).to eq %w[build test]
expect(builds_statuses).to eq %w[success pending]
succeed_running_or_pending
expect(builds_names).to eq %w[build test deploy production]
expect(builds_statuses).to eq %w[success success pending manual]
succeed_running_or_pending
expect(builds_names).to eq %w[build test deploy production cleanup clear:cache]
expect(builds_statuses).to eq %w[success success success manual pending manual]
succeed_running_or_pending
expect(builds_statuses).to eq %w[success success success manual success manual]
expect(pipeline.reload.status).to eq 'success'
end
end
context 'when test job fails' do
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names).to eq ['build']
expect(builds_statuses).to eq ['pending']
succeed_running_or_pending
expect(builds_names).to eq %w[build test]
expect(builds_statuses).to eq %w[success pending]
fail_running_or_pending
expect(builds_names).to eq %w[build test test_failure]
expect(builds_statuses).to eq %w[success failed pending]
succeed_running_or_pending
expect(builds_names).to eq %w[build test test_failure cleanup]
expect(builds_statuses).to eq %w[success failed success pending]
succeed_running_or_pending
expect(builds_statuses).to eq %w[success failed success success]
expect(pipeline.reload.status).to eq 'failed'
end
end
context 'when test and test_failure jobs fail' do
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names).to eq ['build']
expect(builds_statuses).to eq ['pending']
succeed_running_or_pending
expect(builds_names).to eq %w[build test]
expect(builds_statuses).to eq %w[success pending]
fail_running_or_pending
expect(builds_names).to eq %w[build test test_failure]
expect(builds_statuses).to eq %w[success failed pending]
fail_running_or_pending
expect(builds_names).to eq %w[build test test_failure cleanup]
expect(builds_statuses).to eq %w[success failed failed pending]
succeed_running_or_pending
expect(builds_names).to eq %w[build test test_failure cleanup]
expect(builds_statuses).to eq %w[success failed failed success]
expect(pipeline.reload.status).to eq('failed')
end
end
context 'when deploy job fails' do
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names).to eq ['build']
expect(builds_statuses).to eq ['pending']
succeed_running_or_pending
expect(builds_names).to eq %w[build test]
expect(builds_statuses).to eq %w[success pending]
succeed_running_or_pending
expect(builds_names).to eq %w[build test deploy production]
expect(builds_statuses).to eq %w[success success pending manual]
fail_running_or_pending
expect(builds_names).to eq %w[build test deploy production cleanup]
expect(builds_statuses).to eq %w[success success failed manual pending]
succeed_running_or_pending
expect(builds_statuses).to eq %w[success success failed manual success]
expect(pipeline.reload).to be_failed
end
end
context 'when build is canceled in the second stage' do
it 'does not schedule builds after build has been canceled' do
expect(process_pipeline).to be_truthy
expect(builds_names).to eq ['build']
expect(builds_statuses).to eq ['pending']
succeed_running_or_pending
expect(builds.running_or_pending).not_to be_empty
expect(builds_names).to eq %w[build test]
expect(builds_statuses).to eq %w[success pending]
cancel_running_or_pending
expect(builds.running_or_pending).to be_empty
expect(builds_names).to eq %w[build test]
expect(builds_statuses).to eq %w[success canceled]
expect(pipeline.reload).to be_canceled
end
end
context 'when listing optional manual actions' do
it 'returns only for skipped builds' do
# currently all builds are created
expect(process_pipeline).to be_truthy
expect(manual_actions).to be_empty
# succeed stage build
succeed_running_or_pending
expect(manual_actions).to be_empty
# succeed stage test
succeed_running_or_pending
expect(manual_actions).to be_one # production
# succeed stage deploy
succeed_running_or_pending
expect(manual_actions).to be_many # production and clear cache
end
end
end
context 'when delayed jobs are defined', :sidekiq_inline do
context 'when the scene is timed incremental rollout' do
before do
create_build('build', stage_idx: 0)
create_build('rollout10%', **delayed_options, stage_idx: 1)
create_build('rollout100%', **delayed_options, stage_idx: 2)
create_build('cleanup', stage_idx: 3)
allow(Ci::BuildScheduleWorker).to receive(:perform_at)
end
context 'when builds are successful' do
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names_and_statuses).to eq({ 'build': 'pending' })
succeed_pending
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'scheduled' })
travel_to 2.minutes.from_now do
enqueue_scheduled('rollout10%')
end
succeed_pending
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'success', 'rollout100%': 'scheduled' })
travel_to 2.minutes.from_now do
enqueue_scheduled('rollout100%')
end
succeed_pending
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'success', 'rollout100%': 'success', 'cleanup': 'pending' })
succeed_pending
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'success', 'rollout100%': 'success', 'cleanup': 'success' })
expect(pipeline.reload.status).to eq 'success'
end
end
context 'when build job fails' do
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names_and_statuses).to eq({ 'build': 'pending' })
fail_running_or_pending
expect(builds_names_and_statuses).to eq({ 'build': 'failed' })
expect(pipeline.reload.status).to eq 'failed'
end
end
context 'when rollout 10% is unscheduled' do
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names_and_statuses).to eq({ 'build': 'pending' })
succeed_pending
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'scheduled' })
unschedule
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'manual' })
expect(pipeline.reload.status).to eq 'manual'
end
context 'when user plays rollout 10%' do
it 'schedules rollout100%' do
process_pipeline
succeed_pending
unschedule
play_manual_action('rollout10%')
succeed_pending
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'success', 'rollout100%': 'scheduled' })
expect(pipeline.reload.status).to eq 'scheduled'
end
end
end
context 'when rollout 10% fails' do
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names_and_statuses).to eq({ 'build': 'pending' })
succeed_pending
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'scheduled' })
travel_to 2.minutes.from_now do
enqueue_scheduled('rollout10%')
end
fail_running_or_pending
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'failed' })
expect(pipeline.reload.status).to eq 'failed'
end
context 'when user retries rollout 10%' do
it 'does not schedule rollout10% again' do
process_pipeline
succeed_pending
enqueue_scheduled('rollout10%')
fail_running_or_pending
retry_build('rollout10%')
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'pending' })
expect(pipeline.reload.status).to eq 'running'
end
end
end
context 'when rollout 10% is played immidiately' do
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names_and_statuses).to eq({ 'build': 'pending' })
succeed_pending
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'scheduled' })
play_manual_action('rollout10%')
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'pending' })
expect(pipeline.reload.status).to eq 'running'
end
end
end
context 'when only one scheduled job exists in a pipeline' do
before do
create_build('delayed', **delayed_options, stage_idx: 0)
allow(Ci::BuildScheduleWorker).to receive(:perform_at)
end
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names_and_statuses).to eq({ 'delayed': 'scheduled' })
expect(pipeline.reload.status).to eq 'scheduled'
end
end
context 'when there are two delayed jobs in a stage' do
before do
create_build('delayed1', **delayed_options, stage_idx: 0)
create_build('delayed2', **delayed_options, stage_idx: 0)
create_build('job', stage_idx: 1)
allow(Ci::BuildScheduleWorker).to receive(:perform_at)
end
it 'blocks the stage until all scheduled jobs finished' do
expect(process_pipeline).to be_truthy
expect(builds_names_and_statuses).to eq({ 'delayed1': 'scheduled', 'delayed2': 'scheduled' })
travel_to 2.minutes.from_now do
enqueue_scheduled('delayed1')
end
expect(builds_names_and_statuses).to eq({ 'delayed1': 'pending', 'delayed2': 'scheduled' })
expect(pipeline.reload.status).to eq 'running'
end
end
context 'when a delayed job is allowed to fail' do
before do
create_build('delayed', **delayed_options, allow_failure: true, stage_idx: 0)
create_build('job', stage_idx: 1)
allow(Ci::BuildScheduleWorker).to receive(:perform_at)
end
it 'blocks the stage and continues after it failed' do
expect(process_pipeline).to be_truthy
expect(builds_names_and_statuses).to eq({ 'delayed': 'scheduled' })
travel_to 2.minutes.from_now do
enqueue_scheduled('delayed')
end
fail_running_or_pending
expect(builds_names_and_statuses).to eq({ 'delayed': 'failed', 'job': 'pending' })
expect(pipeline.reload.status).to eq 'pending'
end
end
end
context 'when an exception is raised during a persistent ref creation' do
before do
successful_build('test', stage_idx: 0)
allow_next_instance_of(Ci::PersistentRef) do |instance|
allow(instance).to receive(:delete_refs) { raise ArgumentError }
end
end
it 'process the pipeline' do
expect { process_pipeline }.not_to raise_error
end
end
context 'when there are manual action in earlier stages' do
context 'when first stage has only optional manual actions' do
before do
create_build('build', stage_idx: 0, when: 'manual', allow_failure: true)
create_build('check', stage_idx: 1)
create_build('test', stage_idx: 2)
process_pipeline
end
it 'starts from the second stage' do
expect(all_builds_statuses).to eq %w[manual pending created]
end
end
context 'when second stage has only optional manual actions' do
before do
create_build('check', stage_idx: 0)
create_build('build', stage_idx: 1, when: 'manual', allow_failure: true)
create_build('test', stage_idx: 2)
process_pipeline
end
it 'skips second stage and continues on third stage', :sidekiq_inline do
expect(all_builds_statuses).to eq(%w[pending created created])
builds.first.success
expect(all_builds_statuses).to eq(%w[success manual pending])
end
end
end
context 'when there are only manual actions in stages' do
before do
create_build('image', stage_idx: 0, when: 'manual', allow_failure: true)
create_build('build', stage_idx: 1, when: 'manual', allow_failure: true)
create_build('deploy', stage_idx: 2, when: 'manual')
create_build('check', stage_idx: 3)
process_pipeline
end
it 'processes all jobs until blocking actions encountered' do
expect(all_builds_statuses).to eq(%w[manual manual manual created])
expect(all_builds_names).to eq(%w[image build deploy check])
expect(pipeline.reload).to be_blocked
end
end
context 'when there is only one manual action' do
before do
create_build('deploy', stage_idx: 0, when: 'manual', allow_failure: true)
process_pipeline
end
it 'skips the pipeline' do
expect(pipeline.reload).to be_skipped
end
context 'when the action was played' do
before do
play_manual_action('deploy')
end
it 'queues the action and pipeline', :sidekiq_inline do
expect(all_builds_statuses).to eq(%w[pending])
expect(pipeline.reload).to be_pending
end
end
end
context 'when blocking manual actions are defined', :sidekiq_inline do
before do
create_build('code:test', stage_idx: 0)
create_build('staging:deploy', stage_idx: 1, when: 'manual')
create_build('staging:test', stage_idx: 2, when: 'on_success')
create_build('production:deploy', stage_idx: 3, when: 'manual')
create_build('production:test', stage_idx: 4, when: 'always')
end
context 'when first stage succeeds' do
it 'blocks pipeline on stage with first manual action' do
process_pipeline
expect(builds_names).to eq %w[code:test]
expect(builds_statuses).to eq %w[pending]
expect(pipeline.reload.status).to eq 'pending'
succeed_running_or_pending
expect(builds_names).to eq %w[code:test staging:deploy]
expect(builds_statuses).to eq %w[success manual]
expect(pipeline.reload).to be_manual
end
end
context 'when first stage fails' do
it 'does not take blocking action into account' do
process_pipeline
expect(builds_names).to eq %w[code:test]
expect(builds_statuses).to eq %w[pending]
expect(pipeline.reload.status).to eq 'pending'
fail_running_or_pending
expect(builds_names).to eq %w[code:test production:test]
expect(builds_statuses).to eq %w[failed pending]
succeed_running_or_pending
expect(builds_statuses).to eq %w[failed success]
expect(pipeline.reload).to be_failed
end
end
context 'when pipeline is promoted sequentially up to the end' do
before do
# Users need ability to merge into a branch in order to trigger
# protected manual actions.
#
create(:protected_branch, :developers_can_merge, name: 'master', project: project)
end
it 'properly processes entire pipeline' do
process_pipeline
expect(builds_names).to eq %w[code:test]
expect(builds_statuses).to eq %w[pending]
succeed_running_or_pending
expect(builds_names).to eq %w[code:test staging:deploy]
expect(builds_statuses).to eq %w[success manual]
expect(pipeline.reload).to be_manual
play_manual_action('staging:deploy')
expect(builds_statuses).to eq %w[success pending]
succeed_running_or_pending
expect(builds_names).to eq %w[code:test staging:deploy staging:test]
expect(builds_statuses).to eq %w[success success pending]
succeed_running_or_pending
expect(builds_names).to eq %w[code:test staging:deploy staging:test
production:deploy]
expect(builds_statuses).to eq %w[success success success manual]
expect(pipeline.reload).to be_manual
expect(pipeline.reload).to be_blocked
expect(pipeline.reload).not_to be_active
expect(pipeline.reload).not_to be_complete
play_manual_action('production:deploy')
expect(builds_statuses).to eq %w[success success success pending]
expect(pipeline.reload).to be_running
succeed_running_or_pending
expect(builds_names).to eq %w[code:test staging:deploy staging:test
production:deploy production:test]
expect(builds_statuses).to eq %w[success success success success pending]
expect(pipeline.reload).to be_running
succeed_running_or_pending
expect(builds_names).to eq %w[code:test staging:deploy staging:test
production:deploy production:test]
expect(builds_statuses).to eq %w[success success success success success]
expect(pipeline.reload).to be_success
end
end
end
context 'when second stage has only on_failure jobs', :sidekiq_inline do
before do
create_build('check', stage_idx: 0)
create_build('build', stage_idx: 1, when: 'on_failure')
create_build('test', stage_idx: 2)
process_pipeline
end
it 'skips second stage and continues on third stage' do
expect(all_builds_statuses).to eq(%w[pending created created])
builds.first.success
expect(all_builds_statuses).to eq(%w[success skipped pending])
end
end
context 'when failed build in the middle stage is retried', :sidekiq_inline do
context 'when failed build is the only unsuccessful build in the stage' do
before do
create_build('build:1', stage_idx: 0)
create_build('build:2', stage_idx: 0)
create_build('test:1', stage_idx: 1)
create_build('test:2', stage_idx: 1)
create_build('deploy:1', stage_idx: 2)
create_build('deploy:2', stage_idx: 2)
end
it 'does trigger builds in the next stage' do
expect(process_pipeline).to be_truthy
expect(builds_names).to eq ['build:1', 'build:2']
succeed_running_or_pending
expect(builds_names).to eq ['build:1', 'build:2', 'test:1', 'test:2']
pipeline.builds.find_by(name: 'test:1').success!
pipeline.builds.find_by(name: 'test:2').drop!
expect(builds_names).to eq ['build:1', 'build:2', 'test:1', 'test:2']
Ci::RetryJobService.new(pipeline.project, user).execute(pipeline.builds.find_by(name: 'test:2'))[:job].reset.success!
expect(builds_names).to eq ['build:1', 'build:2', 'test:1', 'test:2',
'test:2', 'deploy:1', 'deploy:2']
end
end
end
context 'when builds with auto-retries are configured', :sidekiq_inline do
before do
create_build('build:1', stage_idx: 0, user: user, options: { script: 'aa', retry: 2 })
create_build('test:1', stage_idx: 1, user: user, when: :on_failure)
create_build('test:2', stage_idx: 1, user: user, options: { script: 'aa', retry: 1 })
end
it 'automatically retries builds in a valid order' do
expect(process_pipeline).to be_truthy
fail_running_or_pending
expect(builds_names).to eq %w[build:1 build:1]
expect(builds_statuses).to eq %w[failed pending]
succeed_running_or_pending
expect(builds_names).to eq %w[build:1 build:1 test:2]
expect(builds_statuses).to eq %w[failed success pending]
succeed_running_or_pending
expect(builds_names).to eq %w[build:1 build:1 test:2]
expect(builds_statuses).to eq %w[failed success success]
expect(pipeline.reload).to be_success
end
end
context 'when pipeline with needs is created', :sidekiq_inline do
let!(:linux_build) { create_build('linux:build', stage: 'build', stage_idx: 0) }
let!(:mac_build) { create_build('mac:build', stage: 'build', stage_idx: 0) }
let!(:linux_rspec) { create_build('linux:rspec', stage: 'test', stage_idx: 1, scheduling_type: :dag) }
let!(:linux_rubocop) { create_build('linux:rubocop', stage: 'test', stage_idx: 1, scheduling_type: :dag) }
let!(:mac_rspec) { create_build('mac:rspec', stage: 'test', stage_idx: 1, scheduling_type: :dag) }
let!(:mac_rubocop) { create_build('mac:rubocop', stage: 'test', stage_idx: 1, scheduling_type: :dag) }
let!(:deploy) { create_build('deploy', stage: 'deploy', stage_idx: 2) }
let!(:linux_rspec_on_build) { create(:ci_build_need, build: linux_rspec, name: 'linux:build') }
let!(:linux_rubocop_on_build) { create(:ci_build_need, build: linux_rubocop, name: 'linux:build') }
let!(:mac_rspec_on_build) { create(:ci_build_need, build: mac_rspec, name: 'mac:build') }
let!(:mac_rubocop_on_build) { create(:ci_build_need, build: mac_rubocop, name: 'mac:build') }
it 'when linux:* finishes first it runs it out of order' do
expect(process_pipeline).to be_truthy
expect(stages).to eq(%w[pending created created])
expect(builds.pending).to contain_exactly(linux_build, mac_build)
# we follow the single path of linux
linux_build.reset.success!
expect(stages).to eq(%w[running pending created])
expect(builds.success).to contain_exactly(linux_build)
expect(builds.pending).to contain_exactly(mac_build, linux_rspec, linux_rubocop)
linux_rspec.reset.success!
expect(stages).to eq(%w[running running created])
expect(builds.success).to contain_exactly(linux_build, linux_rspec)
expect(builds.pending).to contain_exactly(mac_build, linux_rubocop)
linux_rubocop.reset.success!
expect(stages).to eq(%w[running running created])
expect(builds.success).to contain_exactly(linux_build, linux_rspec, linux_rubocop)
expect(builds.pending).to contain_exactly(mac_build)
mac_build.reset.success!
mac_rspec.reset.success!
mac_rubocop.reset.success!
expect(stages).to eq(%w[success success pending])
expect(builds.success).to contain_exactly(
linux_build, linux_rspec, linux_rubocop, mac_build, mac_rspec, mac_rubocop)
expect(builds.pending).to contain_exactly(deploy)
end
context 'when one of the jobs is run on a failure' do
let!(:linux_notify) { create_build('linux:notify', stage: 'deploy', stage_idx: 2, when: 'on_failure', scheduling_type: :dag) }
let!(:linux_notify_on_build) { create(:ci_build_need, build: linux_notify, name: 'linux:build') }
context 'when another job in build phase fails first' do
it 'does skip linux:notify' do
expect(process_pipeline).to be_truthy
mac_build.reset.drop!
linux_build.reset.success!
expect(linux_notify.reset).to be_skipped
end
end
context 'when linux:build job fails first' do
it 'does run linux:notify' do
expect(process_pipeline).to be_truthy
linux_build.reset.drop!
expect(linux_notify.reset).to be_pending
end
end
end
context 'when there is a job scheduled with dag but no need (needs: [])' do
let!(:deploy_pages) { create_build('deploy_pages', stage: 'deploy', stage_idx: 2, scheduling_type: :dag) }
it 'runs deploy_pages without waiting prior stages' do
expect(process_pipeline).to be_truthy
expect(stages).to eq(%w[pending created pending])
expect(builds.pending).to contain_exactly(linux_build, mac_build, deploy_pages)
linux_build.reset.success!
deploy_pages.reset.success!
expect(stages).to eq(%w[running pending running])
expect(builds.success).to contain_exactly(linux_build, deploy_pages)
expect(builds.pending).to contain_exactly(mac_build, linux_rspec, linux_rubocop)
linux_rspec.reset.success!
linux_rubocop.reset.success!
mac_build.reset.success!
mac_rspec.reset.success!
mac_rubocop.reset.success!
expect(stages).to eq(%w[success success running])
expect(builds.pending).to contain_exactly(deploy)
end
end
end
context 'when a needed job is skipped', :sidekiq_inline do
let!(:linux_build) { create_build('linux:build', stage: 'build', stage_idx: 0) }
let!(:linux_rspec) { create_build('linux:rspec', stage: 'test', stage_idx: 1) }
let!(:deploy) { create_build('deploy', stage: 'deploy', stage_idx: 2, scheduling_type: :dag) }
before do
create(:ci_build_need, build: deploy, name: 'linux:build')
end
it 'skips the jobs depending on it' do
expect(process_pipeline).to be_truthy
expect(stages).to eq(%w[pending created created])
expect(all_builds.pending).to contain_exactly(linux_build)
linux_build.reset.drop!
expect(stages).to eq(%w[failed skipped skipped])
expect(all_builds.failed).to contain_exactly(linux_build)
expect(all_builds.skipped).to contain_exactly(linux_rspec, deploy)
end
end
context 'when a needed job is manual', :sidekiq_inline do
let!(:linux_build) { create_build('linux:build', stage: 'build', stage_idx: 0, when: 'manual', allow_failure: true) }
let!(:deploy) { create_build('deploy', stage: 'deploy', stage_idx: 1, scheduling_type: :dag) }
before do
create(:ci_build_need, build: deploy, name: 'linux:build')
end
it 'makes deploy DAG to be skipped' do
expect(process_pipeline).to be_truthy
expect(stages).to eq(%w[skipped skipped])
expect(all_builds.manual).to contain_exactly(linux_build)
expect(all_builds.skipped).to contain_exactly(deploy)
end
end
context 'when jobs change from stopped to alive status during pipeline processing' do
around do |example|
Sidekiq::Testing.fake! { example.run }
end
let(:config) do
<<-YAML
stages: [test, deploy]
manual1:
stage: test
when: manual
script: exit 0
manual2:
stage: test
when: manual
script: exit 0
test1:
stage: test
needs: [manual1]
script: exit 0
test2:
stage: test
needs: [manual2]
script: exit 0
deploy1:
stage: deploy
needs: [manual1, manual2]
script: exit 0
deploy2:
stage: deploy
needs: [test2]
script: exit 0
YAML
end
let(:pipeline) do
Ci::CreatePipelineService.new(project, user, { ref: 'master' }).execute(:push).payload
end
let(:manual1) { all_builds.find_by(name: 'manual1') }
let(:manual2) { all_builds.find_by(name: 'manual2') }
let(:statuses_0) do
{ 'manual1': 'created', 'manual2': 'created', 'test1': 'created', 'test2': 'created', 'deploy1': 'created', 'deploy2': 'created' }
end
let(:statuses_1) do
{ 'manual1': 'manual', 'manual2': 'manual', 'test1': 'skipped', 'test2': 'skipped', 'deploy1': 'skipped', 'deploy2': 'skipped' }
end
let(:statuses_2) do
{ 'manual1': 'pending', 'manual2': 'pending', 'test1': 'skipped', 'test2': 'skipped', 'deploy1': 'skipped', 'deploy2': 'skipped' }
end
let(:statuses_3) do
{ 'manual1': 'pending', 'manual2': 'pending', 'test1': 'created', 'test2': 'created', 'deploy1': 'created', 'deploy2': 'created' }
end
let(:log_info) do
{
class: described_class.name.to_s,
message: 'Running ResetSkippedJobsService on new alive jobs',
project_id: project.id,
pipeline_id: pipeline.id,
user_id: user.id,
jobs_count: 2
}
end
before do
stub_ci_pipeline_yaml_file(config)
pipeline # Create the pipeline
end
# Since this is a test for a race condition, we are calling internal method `enqueue!`
# instead of `play` and stubbing `new_alive_jobs` of the service class.
it 'runs ResetSkippedJobsService on the new alive jobs and logs event' do
# Initial control without any pipeline processing
expect(all_builds_names_and_statuses).to eq(statuses_0)
process_pipeline
# Initial control after the first pipeline processing
expect(all_builds_names_and_statuses).to eq(statuses_1)
# Change the manual jobs from stopped to alive status.
# We don't use `play` to avoid running `ResetSkippedJobsService`.
manual1.enqueue!
manual2.enqueue!
# Statuses after playing the manual jobs
expect(all_builds_names_and_statuses).to eq(statuses_2)
mock_play_jobs_during_processing([manual1, manual2])
expect(Ci::ResetSkippedJobsService).to receive(:new).once.and_call_original
process_pipeline
expect(all_builds_names_and_statuses).to eq(statuses_3)
end
it 'logs event' do
expect(Gitlab::AppJsonLogger).to receive(:info).once.with(log_info)
mock_play_jobs_during_processing([manual1, manual2])
process_pipeline
end
context 'when the new alive jobs belong to different users' do
let_it_be(:user2) { create(:user) }
before do
process_pipeline # First pipeline processing
# Change the manual jobs from stopped to alive status
manual1.enqueue!
manual2.enqueue!
manual2.update!(user: user2)
mock_play_jobs_during_processing([manual1, manual2])
end
it 'runs ResetSkippedJobsService on the new alive jobs' do
# Statuses after playing the manual jobs
expect(all_builds_names_and_statuses).to eq(statuses_2)
# Since there are two different users, we expect this service to be called twice.
expect(Ci::ResetSkippedJobsService).to receive(:new).twice.and_call_original
process_pipeline
expect(all_builds_names_and_statuses).to eq(statuses_3)
end
# In this scenario, the new alive jobs (manual1 and manual2) have different users.
# We can only know for certain the assigned user of dependent jobs that are exclusive
# to either manual1 or manual2. Otherwise, the assigned user will depend on which of
# the new alive jobs get processed first by ResetSkippedJobsService.
it 'assigns the correct user to the dependent jobs' do
test1 = all_builds.find_by(name: 'test1')
test2 = all_builds.find_by(name: 'test2')
expect(test1.user).to eq(user)
expect(test2.user).to eq(user)
process_pipeline
expect(test1.reset.user).to eq(user)
expect(test2.reset.user).to eq(user2)
end
it 'logs event' do
expect(Gitlab::AppJsonLogger).to receive(:info).once.with(log_info.merge(jobs_count: 1))
expect(Gitlab::AppJsonLogger).to receive(:info).once.with(log_info.merge(user_id: user2.id, jobs_count: 1))
mock_play_jobs_during_processing([manual1, manual2])
process_pipeline
end
end
end
context 'when a bridge job has parallel:matrix config', :sidekiq_inline do
let(:parent_config) do
<<-EOY
test:
stage: test
script: echo test
deploy:
stage: deploy
trigger:
include: .child.yml
parallel:
matrix:
- PROVIDER: ovh
STACK: [monitoring, app]
EOY
end
let(:child_config) do
<<-EOY
test:
stage: test
script: echo test
EOY
end
let(:pipeline) do
Ci::CreatePipelineService.new(project, user, { ref: 'master' }).execute(:push).payload
end
let(:project_files) do
{
'.gitlab-ci.yml' => parent_config,
'.child.yml' => child_config
}
end
around do |example|
create_and_delete_files(project, project_files) do
example.run
end
end
it 'creates pipeline with bridges, then passes the matrix variables to downstream jobs' do
expect(all_builds_names).to contain_exactly('test', 'deploy: [ovh, monitoring]', 'deploy: [ovh, app]')
expect(all_builds_statuses).to contain_exactly('pending', 'created', 'created')
succeed_pending
# bridge jobs directly transition to success
expect(all_builds_statuses).to contain_exactly('success', 'success', 'success')
bridge1 = all_builds.find_by(name: 'deploy: [ovh, monitoring]')
bridge2 = all_builds.find_by(name: 'deploy: [ovh, app]')
downstream_job1 = bridge1.downstream_pipeline.all_jobs.first
downstream_job2 = bridge2.downstream_pipeline.all_jobs.first
expect(downstream_job1.scoped_variables.to_hash).to include('PROVIDER' => 'ovh', 'STACK' => 'monitoring')
expect(downstream_job2.scoped_variables.to_hash).to include('PROVIDER' => 'ovh', 'STACK' => 'app')
end
end
context 'when a bridge job has invalid downstream project', :sidekiq_inline do
let(:config) do
<<-EOY
test:
stage: test
script: echo test
deploy:
stage: deploy
trigger:
project: invalid-project
EOY
end
let(:pipeline) do
Ci::CreatePipelineService.new(project, user, { ref: 'master' }).execute(:push).payload
end
before do
stub_ci_pipeline_yaml_file(config)
end
it 'creates a pipeline, then fails the bridge job' do
expect(all_builds_names).to contain_exactly('test', 'deploy')
expect(all_builds_statuses).to contain_exactly('pending', 'created')
succeed_pending
expect(all_builds_names).to contain_exactly('test', 'deploy')
expect(all_builds_statuses).to contain_exactly('success', 'failed')
end
end
context 'when the dependency is stage-independent', :sidekiq_inline do
let(:config) do
<<-EOY
stages: [A, B]
A1:
stage: A
script: exit 0
when: manual
A2:
stage: A
script: exit 0
needs: [A1]
B:
stage: B
needs: [A2]
script: exit 0
EOY
end
let(:pipeline) do
Ci::CreatePipelineService.new(project, user, { ref: 'master' }).execute(:push).payload
end
before do
stub_ci_pipeline_yaml_file(config)
end
it 'processes subsequent jobs in the correct order when playing first job' do
expect(all_builds_names).to eq(%w[A1 A2 B])
expect(all_builds_statuses).to eq(%w[manual skipped skipped])
play_manual_action('A1')
expect(all_builds_names).to eq(%w[A1 A2 B])
expect(all_builds_statuses).to eq(%w[pending created created])
end
end
context 'when the exclusive lease is taken' do
let(:lease_key) { "ci/pipeline_processing/atomic_processing_service::pipeline_id:#{pipeline.id}" }
it 'skips pipeline processing' do
create_build('linux', stage_idx: 0)
stub_exclusive_lease_taken(lease_key)
expect(Gitlab::AppJsonLogger).to receive(:info).with(a_hash_including(message: /^Cannot obtain an exclusive lease/))
expect(process_pipeline).to be_falsy
end
end
describe 'deployments creation' do
let(:config) do
<<-YAML
stages: [stage-0, stage-1, stage-2, stage-3, stage-4]
test:
stage: stage-0
script: exit 0
review:
stage: stage-1
environment:
name: review
action: start
script: exit 0
staging:
stage: stage-2
environment:
name: staging
action: start
script: exit 0
when: manual
allow_failure: false
canary:
stage: stage-3
environment:
name: canary
action: start
script: exit 0
when: manual
production-a:
stage: stage-4
environment:
name: production-a
action: start
script: exit 0
when: manual
production-b:
stage: stage-4
environment:
name: production-b
action: start
script: exit 0
when: manual
needs: [canary]
YAML
end
let(:pipeline) do
Ci::CreatePipelineService.new(project, user, { ref: 'master' }).execute(:push).payload
end
let(:test_job) { all_builds.find_by(name: 'test') }
let(:review_deploy_job) { all_builds.find_by(name: 'review') }
let(:staging_deploy_job) { all_builds.find_by(name: 'staging') }
let(:canary_deploy_job) { all_builds.find_by(name: 'canary') }
let(:production_a_deploy_job) { all_builds.find_by(name: 'production-a') }
let(:production_b_deploy_job) { all_builds.find_by(name: 'production-b') }
before do
create(:environment, name: 'review', project: project)
create(:environment, name: 'staging', project: project)
create(:environment, name: 'canary', project: project)
create(:environment, name: 'production-a', project: project)
create(:environment, name: 'production-b', project: project)
stub_ci_pipeline_yaml_file(config)
pipeline # create the pipeline
end
it 'creates deployment records for the deploy jobs', :aggregate_failures do
# processes the 'test' job, not creating a Deployment record
expect { process_pipeline }.not_to change { Deployment.count }
succeed_pending
expect(test_job.status).to eq 'success'
# processes automatic 'review' deploy job, creating a Deployment record
expect { process_pipeline }.to change { Deployment.count }.by(1)
succeed_pending
expect(review_deploy_job.status).to eq 'success'
# processes manual 'staging' deploy job, creating a Deployment record
# the subsequent manual deploy jobs ('canary', 'production-a', 'production-b')
# are not yet processed because 'staging' is set as `allow_failure: false`
expect { process_pipeline }.to change { Deployment.count }.by(1)
play_manual_action('staging')
succeed_pending
expect(staging_deploy_job.reload.status).to eq 'success'
# processes manual 'canary' deployment job
# the subsequent manual deploy jobs ('production-a' and 'production-b')
# are also processed because 'canary' is set by default as `allow_failure: true`
# the 'production-b' is set as `needs: [canary]`, but it is still processed
# overall, 3 Deployment records are created
expect { process_pipeline }.to change { Deployment.count }.by(3)
expect(canary_deploy_job.status).to eq 'manual'
expect(production_a_deploy_job.status).to eq 'manual'
expect(production_b_deploy_job.status).to eq 'skipped'
# play and succeed the manual 'canary' and 'production-a' jobs
play_manual_action('canary')
play_manual_action('production-a')
succeed_pending
expect(canary_deploy_job.reload.status).to eq 'success'
expect(production_a_deploy_job.reload.status).to eq 'success'
expect(production_b_deploy_job.reload.status).to eq 'created'
# process the manual 'production-b' job again, no Deployment record is created
# because it has already been created when 'production-b' was first processed
expect { process_pipeline }.not_to change { Deployment.count }
expect(production_b_deploy_job.reload.status).to eq 'manual'
end
end
private
def all_builds
pipeline.all_jobs.order(:stage_idx, :id)
end
def builds
all_builds.where.not(status: [:created, :skipped])
end
def stages
pipeline.reset.stages.map(&:status)
end
def builds_names
builds.pluck(:name)
end
def builds_names_and_statuses
builds.each_with_object({}) do |b, h|
h[b.name.to_sym] = b.status
end
end
def all_builds_names_and_statuses
all_builds.each_with_object({}) do |b, h|
h[b.name.to_sym] = b.status
end
end
def all_builds_names
all_builds.pluck(:name)
end
def builds_statuses
builds.pluck(:status)
end
def all_builds_statuses
all_builds.pluck(:status)
end
def succeed_pending
builds.pending.each do |build|
build.reset.success
end
end
def succeed_running_or_pending
pipeline.builds.running_or_pending.each do |build|
build.reset.success
end
end
def fail_running_or_pending
pipeline.builds.running_or_pending.each do |build|
build.reset.drop
end
end
def cancel_running_or_pending
pipeline.builds.running_or_pending.each do |build|
build.reset.cancel
end
end
def play_manual_action(name)
builds.find_by(name: name).play(user)
end
def enqueue_scheduled(name)
builds.scheduled.find_by(name: name).enqueue!
end
def retry_build(name)
Ci::RetryJobService.new(project, user).execute(builds.find_by(name: name))
end
def manual_actions
pipeline.manual_actions.reload
end
def create_build(name, **opts)
create(:ci_build, :created, pipeline: pipeline, name: name, **with_stage_opts(opts))
end
def successful_build(name, **opts)
create(:ci_build, :success, pipeline: pipeline, name: name, **with_stage_opts(opts))
end
def with_stage_opts(opts)
{ stage: "stage-#{opts[:stage_idx].to_i}" }.merge(opts)
end
def delayed_options
{ when: 'delayed', options: { script: %w[echo], start_in: '1 minute' } }
end
def unschedule
pipeline.builds.scheduled.map(&:unschedule)
end
end
private
def process_pipeline
described_class.new(pipeline).execute
end
# A status collection is initialized at the start of pipeline processing and then again at the
# end of processing. Here we simulate "playing" the given jobs during pipeline processing by
# stubbing stopped_job_names so that they appear to have been stopped at the beginning of
# processing and then later changed to alive status at the end.
def mock_play_jobs_during_processing(jobs)
collection = Ci::PipelineProcessing::AtomicProcessingService::StatusCollection.new(pipeline)
allow(collection).to receive(:stopped_job_names).and_return(jobs.map(&:name), [])
# Return the same collection object for every instance of StatusCollection
allow(Ci::PipelineProcessing::AtomicProcessingService::StatusCollection).to receive(:new)
.and_return(collection)
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module PipelineProcessing
class AtomicProcessingService
class StatusCollection
include Gitlab::Utils::StrongMemoize
attr_reader :pipeline
def initialize(pipeline)
@pipeline = pipeline
@stage_jobs = {}
@prior_stage_jobs = {}
end
# This method updates internal status for given ID
def set_job_status(id, status, lock_version)
job = all_jobs_by_id[id]
return unless job
job[:status] = status
job[:lock_version] = lock_version
end
# This methods gets composite status of all jobs
def status_of_all
status_for_array(all_jobs)
end
# This methods gets composite status for jobs at a given stage
def status_of_stage(stage_position)
strong_memoize("status_of_stage_#{stage_position}") do
stage_jobs = all_jobs_grouped_by_stage_position[stage_position].to_a
status_for_array(stage_jobs.flatten)
end
end
# This methods gets composite status for jobs with given names
def status_of_jobs(names)
jobs = all_jobs_by_name.slice(*names)
status_for_array(jobs.values, dag: true)
end
# This methods gets composite status for jobs before given stage
def status_of_jobs_prior_to_stage(stage_position)
strong_memoize("status_of_jobs_prior_to_stage_#{stage_position}") do
stage_jobs = all_jobs_grouped_by_stage_position
.select { |position, _| position < stage_position }
status_for_array(stage_jobs.values.flatten)
end
end
# This methods gets a list of jobs for a given stage
def created_job_ids_in_stage(stage_position)
all_jobs_grouped_by_stage_position[stage_position]
.to_a
.select { |job| job[:status] == 'created' }
.map { |job| job[:id] }
end
# This method returns a list of all job, that are to be processed
def processing_jobs
all_jobs.lazy.reject { |job| job[:processed] }
end
# This method returns the names of jobs that have a stopped status
def stopped_job_names
all_jobs.select { |job| job[:status].in?(Ci::HasStatus::STOPPED_STATUSES) }.pluck(:name) # rubocop: disable CodeReuse/ActiveRecord
end
private
# We use these columns to perform an efficient calculation of a status
JOB_ATTRS = [
:id, :name, :status, :allow_failure,
:stage_idx, :processed, :lock_version
].freeze
def status_for_array(jobs, dag: false)
result = Gitlab::Ci::Status::Composite
.new(jobs, dag: dag, project: pipeline.project)
.status
result || 'success'
end
def all_jobs_grouped_by_stage_position
strong_memoize(:all_jobs_by_order) do
all_jobs.group_by { |job| job[:stage_idx].to_i }
end
end
def all_jobs_by_id
strong_memoize(:all_jobs_by_id) do
all_jobs.index_by { |row| row[:id] }
end
end
def all_jobs_by_name
strong_memoize(:jobs_by_name) do
all_jobs.index_by { |row| row[:name] }
end
end
# rubocop: disable CodeReuse/ActiveRecord
def all_jobs
# We fetch all relevant data in one go.
#
# This is more efficient than relying on PostgreSQL to calculate composite status for us
#
# Since we need to reprocess everything we can fetch all of them and do processing ourselves.
strong_memoize(:all_jobs) do
raw_jobs = pipeline
.current_jobs
.ordered_by_stage
.pluck(*JOB_ATTRS)
raw_jobs.map do |row|
JOB_ATTRS.zip(row).to_h
end
end
end
# rubocop: enable CodeReuse/ActiveRecord
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::PipelineProcessing::AtomicProcessingService::StatusCollection,
feature_category: :continuous_integration do
using RSpec::Parameterized::TableSyntax
let_it_be(:pipeline) { create(:ci_pipeline) }
let_it_be(:build_stage) { create(:ci_stage, name: 'build', pipeline: pipeline) }
let_it_be(:test_stage) { create(:ci_stage, name: 'test', pipeline: pipeline) }
let_it_be(:deploy_stage) { create(:ci_stage, name: 'deploy', pipeline: pipeline) }
let_it_be(:build_a) do
create(:ci_build, :success, name: 'build-a', ci_stage: build_stage, stage_idx: 0, pipeline: pipeline)
end
let_it_be(:build_b) do
create(:ci_build, :failed, name: 'build-b', ci_stage: build_stage, stage_idx: 0, pipeline: pipeline)
end
let_it_be(:test_a) do
create(:ci_build, :running, name: 'test-a', ci_stage: test_stage, stage_idx: 1, pipeline: pipeline)
end
let_it_be(:test_b) do
create(:ci_build, :pending, name: 'test-b', ci_stage: test_stage, stage_idx: 1, pipeline: pipeline)
end
let_it_be(:deploy) do
create(:ci_build, :created, name: 'deploy', ci_stage: deploy_stage, stage_idx: 2, pipeline: pipeline)
end
let(:collection) { described_class.new(pipeline) }
describe '#set_job_status' do
it 'does update existing status of job' do
collection.set_job_status(test_a.id, 'success', 100)
expect(collection.status_of_jobs(['test-a'])).to eq('success')
end
it 'ignores a missing job' do
collection.set_job_status(-1, 'failed', 100)
end
end
describe '#status_of_all' do
it 'returns composite status of the collection' do
expect(collection.status_of_all).to eq('running')
end
end
describe '#status_of_jobs' do
where(:names, :status) do
%w[build-a] | 'success'
%w[build-a build-b] | 'failed'
%w[build-a test-a] | 'running'
end
with_them do
it 'returns composite status of given names' do
expect(collection.status_of_jobs(names)).to eq(status)
end
end
end
describe '#status_of_jobs_prior_to_stage' do
where(:stage, :status) do
0 | 'success'
1 | 'failed'
2 | 'running'
end
with_them do
it 'returns composite status for jobs in prior stages' do
expect(collection.status_of_jobs_prior_to_stage(stage)).to eq(status)
end
end
end
describe '#status_of_stage' do
where(:stage, :status) do
0 | 'failed'
1 | 'running'
2 | 'created'
end
with_them do
it 'returns composite status for jobs at a given stages' do
expect(collection.status_of_stage(stage)).to eq(status)
end
end
end
describe '#created_job_ids_in_stage' do
it 'returns IDs of jobs at a given stage position' do
expect(collection.created_job_ids_in_stage(0)).to be_empty
expect(collection.created_job_ids_in_stage(1)).to be_empty
expect(collection.created_job_ids_in_stage(2)).to contain_exactly(deploy.id)
end
end
describe '#processing_jobs' do
it 'returns jobs marked as processing' do
expect(collection.processing_jobs.map { |job| job[:id] })
.to contain_exactly(build_a.id, build_b.id, test_a.id, test_b.id, deploy.id)
end
end
describe '#stopped_job_names' do
it 'returns names of jobs that have a stopped status' do
expect(collection.stopped_job_names)
.to contain_exactly(build_a.name, build_b.name)
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module Catalog
module Resources
class ValidateService
MINIMUM_AMOUNT_OF_COMPONENTS = 1
def initialize(project, ref)
@project = project
@ref = ref
@errors = []
end
def execute
verify_presence_project_readme
verify_presence_project_description
scan_directory_for_components
if errors.empty?
ServiceResponse.success
else
ServiceResponse.error(message: errors.join(', '))
end
end
private
attr_reader :project, :ref, :errors
def verify_presence_project_readme
return if project_has_readme?
errors << 'Project must have a README'
end
def verify_presence_project_description
return if project.description.present?
errors << 'Project must have a description'
end
def scan_directory_for_components
return if Ci::Catalog::ComponentsProject.new(project).fetch_component_paths(ref,
limit: MINIMUM_AMOUNT_OF_COMPONENTS).any?
errors << 'Project must contain components. Ensure you are using the correct directory structure'
end
def project_has_readme?
project.repository.blob_data_at(ref, 'README.md')
end
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::Catalog::Resources::ValidateService, feature_category: :pipeline_composition do
describe '#execute' do
context 'when a project has a README, a description, and at least one component' do
it 'is valid' do
project = create(:project, :catalog_resource_with_components)
response = described_class.new(project, project.default_branch).execute
expect(response).to be_success
end
end
context 'when a project has neither a description nor a README nor components' do
it 'is not valid' do
project = create(:project, :small_repo)
response = described_class.new(project, project.default_branch).execute
expect(response.message).to eq(
'Project must have a README, ' \
'Project must have a description, ' \
'Project must contain components. Ensure you are using the correct directory structure')
end
end
context 'when a project has components but has neither a description nor a README' do
it 'is not valid' do
project = create(:project, :small_repo, files: { 'templates/dast/template.yml' => 'image: alpine' })
response = described_class.new(project, project.default_branch).execute
expect(response.message).to eq('Project must have a README, Project must have a description')
end
end
context 'when a project has a description but has neither a README nor components' do
it 'is not valid' do
project = create(:project, :small_repo, description: 'project with no README and no components')
response = described_class.new(project, project.default_branch).execute
expect(response.message).to eq(
'Project must have a README, ' \
'Project must contain components. Ensure you are using the correct directory structure')
end
end
context 'when a project has a README but has neither a description nor components' do
it 'is not valid' do
project = create(:project, :repository)
response = described_class.new(project, project.default_branch).execute
expect(response.message).to eq(
'Project must have a description, ' \
'Project must contain components. Ensure you are using the correct directory structure')
end
end
context 'when a project has components and a description but no README' do
it 'is not valid' do
project = create(:project, :small_repo, description: 'desc', files: { 'templates/dast.yml' => 'image: alpine' })
response = described_class.new(project, project.default_branch).execute
expect(response.message).to eq('Project must have a README')
end
end
context 'when a project has components and a README but no description' do
it 'is not valid' do
project = create(:project, :custom_repo,
files: { 'templates/dast.yml' => 'image: alpine', 'README.md' => 'readme' })
response = described_class.new(project, project.default_branch).execute
expect(response.message).to eq('Project must have a description')
end
end
context 'when a project has a description and a README but no components' do
it 'is not valid' do
project = create(:project, :readme, description: 'project with no README and no components')
response = described_class.new(project, project.default_branch).execute
expect(response.message).to eq(
'Project must contain components. Ensure you are using the correct directory structure')
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module Catalog
module Resources
class ReleaseService
def initialize(release)
@release = release
@project = release.project
@errors = []
end
def execute
validate_catalog_resource
create_version
if errors.empty?
ServiceResponse.success
else
ServiceResponse.error(message: errors.join(', '))
end
end
private
attr_reader :project, :errors, :release
def validate_catalog_resource
response = Ci::Catalog::Resources::ValidateService.new(project, release.sha).execute
return if response.success?
errors << response.message
end
def create_version
return if errors.present?
response = Ci::Catalog::Resources::Versions::CreateService.new(release).execute
return if response.success?
errors << response.message
end
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::Catalog::Resources::ReleaseService, feature_category: :pipeline_composition do
describe '#execute' do
context 'with a valid catalog resource and release' do
it 'validates the catalog resource and creates a version' do
project = create(:project, :catalog_resource_with_components)
catalog_resource = create(:ci_catalog_resource, project: project)
release = create(:release, project: project, sha: project.repository.root_ref_sha)
response = described_class.new(release).execute
version = Ci::Catalog::Resources::Version.last
expect(response).to be_success
expect(version.release).to eq(release)
expect(version.catalog_resource).to eq(catalog_resource)
expect(version.catalog_resource.project).to eq(project)
end
end
context 'when the validation of the catalog resource fails' do
it 'returns an error and does not create a version' do
project = create(:project, :repository)
create(:ci_catalog_resource, project: project)
release = create(:release, project: project, sha: project.repository.root_ref_sha)
response = described_class.new(release).execute
expect(Ci::Catalog::Resources::Version.count).to be(0)
expect(response).to be_error
expect(response.message).to eq(
'Project must have a description, ' \
'Project must contain components. Ensure you are using the correct directory structure')
end
end
context 'when the creation of a version fails' do
it 'returns an error and does not create a version' do
project =
create(
:project, :custom_repo,
description: 'Component project',
files: {
'templates/secret-detection.yml' => 'image: agent: coop',
'README.md' => 'Read me'
}
)
create(:ci_catalog_resource, project: project)
release = create(:release, project: project, sha: project.repository.root_ref_sha)
response = described_class.new(release).execute
expect(Ci::Catalog::Resources::Version.count).to be(0)
expect(response).to be_error
expect(response.message).to include('mapping values are not allowed in this context')
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module Catalog
module Resources
class DestroyService
include Gitlab::Allowable
attr_reader :project, :current_user
def initialize(project, user)
@current_user = user
@project = project
end
def execute(catalog_resource)
raise Gitlab::Access::AccessDeniedError unless can?(current_user, :add_catalog_resource,
project)
catalog_resource.destroy!
ServiceResponse.success(message: 'Catalog Resource destroyed')
end
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::Catalog::Resources::DestroyService, feature_category: :pipeline_composition do
let_it_be(:project) { create(:project, :catalog_resource_with_components) }
let_it_be(:catalog_resource) { create(:ci_catalog_resource, project: project) }
let_it_be(:user) { create(:user) }
let(:service) { described_class.new(project, user) }
before do
stub_licensed_features(ci_namespace_catalog: true)
end
describe '#execute' do
context 'with an unauthorized user' do
it 'raises an AccessDeniedError' do
expect { service.execute(catalog_resource) }.to raise_error(Gitlab::Access::AccessDeniedError)
end
end
context 'with an authorized user' do
before_all do
project.add_owner(user)
end
it 'destroys a catalog resource' do
expect(project.catalog_resource).to eq(catalog_resource)
response = service.execute(catalog_resource)
expect(project.reload.catalog_resource).to be_nil
expect(response.status).to be(:success)
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module Catalog
module Resources
class CreateService
include Gitlab::Allowable
attr_reader :project, :current_user
def initialize(project, user)
@current_user = user
@project = project
end
def execute
raise Gitlab::Access::AccessDeniedError unless can?(current_user, :add_catalog_resource, project)
catalog_resource = Ci::Catalog::Resource.new(project: project)
if catalog_resource.valid?
catalog_resource.save!
ServiceResponse.success(payload: catalog_resource)
else
ServiceResponse.error(message: catalog_resource.errors.full_messages.join(', '))
end
end
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::Catalog::Resources::CreateService, feature_category: :pipeline_composition do
let_it_be(:project) { create(:project, :catalog_resource_with_components) }
let_it_be(:user) { create(:user) }
let(:service) { described_class.new(project, user) }
before do
stub_licensed_features(ci_namespace_catalog: true)
end
describe '#execute' do
context 'with an unauthorized user' do
it 'raises an AccessDeniedError' do
expect { service.execute }.to raise_error(Gitlab::Access::AccessDeniedError)
end
end
context 'with an authorized user' do
before_all do
project.add_owner(user)
end
context 'and a valid project' do
it 'creates a catalog resource' do
response = service.execute
expect(response.payload.project).to eq(project)
end
end
context 'with an invalid catalog resource' do
it 'does not save the catalog resource' do
catalog_resource = instance_double(::Ci::Catalog::Resource,
valid?: false,
errors: instance_double(ActiveModel::Errors, full_messages: ['not valid']))
allow(::Ci::Catalog::Resource).to receive(:new).and_return(catalog_resource)
response = service.execute
expect(response.message).to eq('not valid')
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module Catalog
module Resources
module Versions
class CreateService
def initialize(release)
@project = release.project
@release = release
@errors = []
@version = nil
@components_project = Ci::Catalog::ComponentsProject.new(project)
end
def execute
build_catalog_resource_version
fetch_and_build_components if Feature.enabled?(:ci_catalog_create_metadata, project)
publish_catalog_resource!
if errors.empty?
ServiceResponse.success
else
ServiceResponse.error(message: errors.flatten.first(10).join(', '))
end
end
private
attr_reader :project, :errors, :release, :components_project
def build_catalog_resource_version
return error('Project is not a catalog resource') unless project.catalog_resource
@version = Ci::Catalog::Resources::Version.new(
release: release,
catalog_resource: project.catalog_resource,
project: project
)
end
def fetch_and_build_components
return if errors.present?
max_components = Ci::Catalog::ComponentsProject::COMPONENTS_LIMIT
component_paths = components_project.fetch_component_paths(release.sha, limit: max_components + 1)
if component_paths.size > max_components
return error("Release cannot contain more than #{max_components} components")
end
build_components(component_paths)
end
def build_components(component_paths)
paths_with_oids = component_paths.map { |path| [release.sha, path] }
blobs = project.repository.blobs_at(paths_with_oids)
blobs.each do |blob|
metadata = extract_metadata(blob)
build_catalog_resource_component(metadata)
end
rescue ::Gitlab::Config::Loader::FormatError => e
error(e)
end
def extract_metadata(blob)
{
name: components_project.extract_component_name(blob.path),
inputs: components_project.extract_inputs(blob.data),
path: blob.path
}
end
def build_catalog_resource_component(metadata)
return if errors.present?
component = @version.components.build(
name: metadata[:name],
project: @version.project,
inputs: metadata[:inputs],
catalog_resource: @version.catalog_resource,
path: metadata[:path],
created_at: Time.current
)
return if component.valid?
error("Build component error: #{component.errors.full_messages.join(', ')}")
end
def publish_catalog_resource!
return if errors.present?
::Ci::Catalog::Resources::Version.transaction do
BulkInsertableAssociations.with_bulk_insert do
@version.save!
end
project.catalog_resource.publish!
end
end
def error(message)
errors << message
end
end
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::Catalog::Resources::Versions::CreateService, feature_category: :pipeline_composition do
describe '#execute' do
let(:files) do
{
'templates/secret-detection.yml' => "spec:\n inputs:\n website:\n---\nimage: alpine_1",
'templates/dast/template.yml' => 'image: alpine_2',
'templates/blank-yaml.yml' => '',
'templates/dast/sub-folder/template.yml' => 'image: alpine_3',
'templates/template.yml' => "spec:\n inputs:\n environment:\n---\nimage: alpine_6",
'tests/test.yml' => 'image: alpine_7',
'README.md' => 'Read me'
}
end
let(:project) do
create(
:project, :custom_repo,
description: 'Simple and Complex components',
files: files
)
end
let(:release) { create(:release, project: project, sha: project.repository.root_ref_sha) }
let!(:catalog_resource) { create(:ci_catalog_resource, project: project) }
context 'when the project is not a catalog resource' do
it 'does not create a version' do
project = create(:project, :repository)
release = create(:release, project: project, sha: project.repository.root_ref_sha)
response = described_class.new(release).execute
expect(response).to be_error
expect(response.message).to include('Project is not a catalog resource')
end
end
context 'when the catalog resource has different types of components and a release' do
it 'creates a version for the release' do
response = described_class.new(release).execute
expect(response).to be_success
version = Ci::Catalog::Resources::Version.last
expect(version.release).to eq(release)
expect(version.catalog_resource).to eq(catalog_resource)
expect(version.catalog_resource.project).to eq(project)
end
it 'marks the catalog resource as published' do
described_class.new(release).execute
expect(catalog_resource.reload.state).to eq('published')
end
context 'when the ci_catalog_create_metadata feature flag is disabled' do
before do
stub_feature_flags(ci_catalog_create_metadata: false)
end
it 'does not create components' do
expect(Ci::Catalog::Resources::Component).not_to receive(:bulk_insert!).and_call_original
expect(project.ci_components.count).to eq(0)
response = described_class.new(release).execute
expect(response).to be_success
expect(project.ci_components.count).to eq(0)
end
end
context 'when the ci_catalog_create_metadata feature flag is enabled' do
context 'when there are more than 10 components' do
let(:files) do
{
'templates/secret11.yml' => '',
'templates/secret10.yml' => '',
'templates/secret8.yml' => '',
'templates/secret7.yml' => '',
'templates/secret6.yml' => '',
'templates/secret5.yml' => '',
'templates/secret4.yml' => '',
'templates/secret3.yml' => '',
'templates/secret2.yml' => '',
'templates/secret1.yml' => '',
'templates/secret0.yml' => '',
'README.md' => 'Read me'
}
end
it 'does not create components' do
response = described_class.new(release).execute
expect(response).to be_error
expect(response.message).to include('Release cannot contain more than 10 components')
expect(project.ci_components.count).to eq(0)
end
end
it 'bulk inserts all the components' do
expect(Ci::Catalog::Resources::Component).to receive(:bulk_insert!).and_call_original
described_class.new(release).execute
end
it 'creates components for the catalog resource' do
expect(project.ci_components.count).to eq(0)
response = described_class.new(release).execute
expect(response).to be_success
version = Ci::Catalog::Resources::Version.last
expect(project.ci_components.count).to eq(4)
expect(project.ci_components.first.name).to eq('blank-yaml')
expect(project.ci_components.first.project).to eq(version.project)
expect(project.ci_components.first.inputs).to eq({})
expect(project.ci_components.first.catalog_resource).to eq(version.catalog_resource)
expect(project.ci_components.first.version).to eq(version)
expect(project.ci_components.first.path).to eq('templates/blank-yaml.yml')
expect(project.ci_components.second.name).to eq('dast')
expect(project.ci_components.second.project).to eq(version.project)
expect(project.ci_components.second.inputs).to eq({})
expect(project.ci_components.second.catalog_resource).to eq(version.catalog_resource)
expect(project.ci_components.second.version).to eq(version)
expect(project.ci_components.second.path).to eq('templates/dast/template.yml')
expect(project.ci_components.third.name).to eq('secret-detection')
expect(project.ci_components.third.project).to eq(version.project)
expect(project.ci_components.third.inputs).to eq({ "website" => nil })
expect(project.ci_components.third.catalog_resource).to eq(version.catalog_resource)
expect(project.ci_components.third.version).to eq(version)
expect(project.ci_components.third.path).to eq('templates/secret-detection.yml')
expect(project.ci_components.fourth.name).to eq('template')
expect(project.ci_components.fourth.project).to eq(version.project)
expect(project.ci_components.fourth.inputs).to eq({ "environment" => nil })
expect(project.ci_components.fourth.catalog_resource).to eq(version.catalog_resource)
expect(project.ci_components.fourth.version).to eq(version)
expect(project.ci_components.fourth.path).to eq('templates/template.yml')
end
end
end
context 'with invalid data' do
let_it_be(:files) do
{
'templates/secret-detection.yml' => 'some: invalid: syntax',
'README.md' => 'Read me'
}
end
it 'returns an error' do
response = described_class.new(release).execute
expect(response).to be_error
expect(response.message).to include('mapping values are not allowed in this context')
end
end
context 'when one or more components are invalid' do
let_it_be(:files) do
{
'templates/secret-detection.yml' => "spec:\n inputs:\n - website\n---\nimage: alpine_1",
'README.md' => 'Read me'
}
end
it 'returns an error' do
response = described_class.new(release).execute
expect(response).to be_error
expect(response.message).to include('Inputs must be a valid json schema')
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module Pipelines
class AddJobService
include ::Gitlab::ExclusiveLeaseHelpers
attr_reader :pipeline
def initialize(pipeline)
@pipeline = pipeline
raise ArgumentError, "Pipeline must be persisted for this service to be used" unless pipeline.persisted?
end
def execute!(job, &block)
assign_pipeline_attributes(job)
in_lock("ci:pipelines:#{pipeline.id}:add-job", ttl: LOCK_TIMEOUT, sleep_sec: LOCK_SLEEP, retries: LOCK_RETRIES) do
Ci::Pipeline.transaction do
yield(job)
job.update_older_statuses_retried!
end
end
ServiceResponse.success(payload: { job: job })
rescue StandardError => e
ServiceResponse.error(message: e.message, payload: { job: job })
end
private
LOCK_TIMEOUT = 1.minute
LOCK_SLEEP = 0.5.seconds
LOCK_RETRIES = 20
def assign_pipeline_attributes(job)
job.pipeline = pipeline
job.project = pipeline.project
job.ref = pipeline.ref
job.partition_id = pipeline.partition_id
# update metadata since it might have been lazily initialised before this call
# metadata is present on `Ci::Processable`
if job.respond_to?(:metadata) && job.metadata
job.metadata.project = pipeline.project
job.metadata.partition_id = pipeline.partition_id
end
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::Pipelines::AddJobService, feature_category: :continuous_integration do
include ExclusiveLeaseHelpers
let_it_be_with_reload(:pipeline) { create(:ci_pipeline) }
let(:job) { build(:ci_build) }
subject(:service) { described_class.new(pipeline) }
context 'when the pipeline is not persisted' do
let(:pipeline) { build(:ci_pipeline) }
it 'raises error' do
expect { service }.to raise_error('Pipeline must be persisted for this service to be used')
end
end
describe '#execute!' do
subject(:execute) do
service.execute!(job) do |job|
job.save!
end
end
it 'assigns pipeline attributes to the job' do
expect do
execute
end.to change { job.slice(:pipeline, :project, :ref) }.to(
pipeline: pipeline, project: pipeline.project, ref: pipeline.ref
).and change { job.metadata.project }.to(pipeline.project)
end
it 'assigns partition_id to job and metadata' do
pipeline.partition_id = ci_testing_partition_id
expect { execute }
.to change(job, :partition_id).to(pipeline.partition_id)
.and change { job.metadata.partition_id }.to(pipeline.partition_id)
end
it 'returns a service response with the job as payload' do
expect(execute).to be_success
expect(execute.payload[:job]).to eq(job)
end
it 'calls update_older_statuses_retried!' do
expect(job).to receive(:update_older_statuses_retried!)
execute
end
context 'when the block raises an error' do
subject(:execute) do
service.execute!(job) do |job|
raise "this is an error"
end
end
it 'returns a service response with the error and the job as payload' do
expect(execute).to be_error
expect(execute.payload[:job]).to eq(job)
expect(execute.message).to eq('this is an error')
end
end
context 'exclusive lock' do
let(:lock_uuid) { 'test' }
let(:lock_key) { "ci:pipelines:#{pipeline.id}:add-job" }
let(:lock_timeout) { 1.minute }
before do
# "Please stub a default value first if message might be received with other args as well."
allow(Gitlab::ExclusiveLease).to receive(:new).and_call_original
end
it 'uses exclusive lock' do
lease = stub_exclusive_lease(lock_key, lock_uuid, timeout: lock_timeout)
expect(lease).to receive(:try_obtain)
expect(lease).to receive(:cancel)
expect(execute).to be_success
expect(execute.payload[:job]).to eq(job)
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module Pipelines
class HookService
include Gitlab::Utils::StrongMemoize
HOOK_NAME = :pipeline_hooks
def initialize(pipeline)
@pipeline = pipeline
end
def execute
project.execute_hooks(hook_data, HOOK_NAME) if project.has_active_hooks?(HOOK_NAME)
project.execute_integrations(hook_data, HOOK_NAME) if project.has_active_integrations?(HOOK_NAME)
end
private
attr_reader :pipeline
def project
@project ||= pipeline.project
end
def hook_data
strong_memoize(:hook_data) do
Gitlab::DataBuilder::Pipeline.build(pipeline)
end
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::Pipelines::HookService, feature_category: :continuous_integration do
describe '#execute_hooks' do
let_it_be(:namespace) { create(:namespace) }
let_it_be(:project) { create(:project, :repository, namespace: namespace) }
let_it_be(:pipeline, reload: true) { create(:ci_empty_pipeline, :created, project: project) }
let(:hook_enabled) { true }
let!(:hook) { create(:project_hook, project: project, pipeline_events: hook_enabled) }
let(:hook_data) { double }
subject(:service) { described_class.new(pipeline) }
describe 'HOOK_NAME' do
specify { expect(described_class::HOOK_NAME).to eq(:pipeline_hooks) }
end
context 'with pipeline hooks enabled' do
before do
allow(Gitlab::DataBuilder::Pipeline).to receive(:build).with(pipeline).once.and_return(hook_data)
end
it 'calls pipeline.project.execute_hooks and pipeline.project.execute_integrations' do
create(:pipelines_email_integration, project: project)
expect(pipeline.project).to receive(:execute_hooks).with(hook_data, described_class::HOOK_NAME)
expect(pipeline.project).to receive(:execute_integrations).with(hook_data, described_class::HOOK_NAME)
service.execute
end
end
context 'with pipeline hooks and integrations disabled' do
let(:hook_enabled) { false }
it 'does not call pipeline.project.execute_hooks and pipeline.project.execute_integrations' do
expect(pipeline.project).not_to receive(:execute_hooks)
expect(pipeline.project).not_to receive(:execute_integrations)
service.execute
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module Pipelines
class UpdateMetadataService
def initialize(pipeline, params)
@pipeline = pipeline
@params = params
end
def execute
metadata = pipeline.pipeline_metadata
metadata = pipeline.build_pipeline_metadata(project: pipeline.project) if metadata.nil?
params[:name] = params[:name].strip if params.key?(:name)
if metadata.update(params)
ServiceResponse.success(message: 'Pipeline metadata was updated', payload: pipeline)
else
ServiceResponse.error(message: 'Failed to update pipeline', payload: metadata.errors.full_messages,
reason: :bad_request)
end
end
private
attr_reader :pipeline, :params
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::Pipelines::UpdateMetadataService, feature_category: :continuous_integration do
subject(:execute) { described_class.new(pipeline, { name: name }).execute }
let(:name) { 'Some random pipeline name' }
context 'when pipeline has no name' do
let(:pipeline) { create(:ci_pipeline) }
it 'updates the name' do
expect { execute }.to change { pipeline.reload.name }.to(name)
end
end
context 'when pipeline has a name' do
let(:pipeline) { create(:ci_pipeline, name: 'Some other name') }
it 'updates the name' do
expect { execute }.to change { pipeline.reload.name }.to(name)
end
end
context 'when new name is too long' do
let(:pipeline) { create(:ci_pipeline) }
let(:name) { 'a' * 256 }
it 'does not update the name' do
expect { execute }.not_to change { pipeline.reload.name }
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module PipelineArtifacts
class CreateCodeQualityMrDiffReportService
include Gitlab::Utils::StrongMemoize
def initialize(pipeline)
@pipeline = pipeline
end
def execute
return unless pipeline.can_generate_codequality_reports?
return if pipeline.has_codequality_mr_diff_report?
return unless new_errors_introduced?
Ci::PipelineArtifact.create_or_replace_for_pipeline!(**artifact_attributes)
end
private
attr_reader :pipeline
def artifact_attributes
file = build_carrierwave_file!
{
pipeline: pipeline,
file_type: :code_quality_mr_diff,
size: file["tempfile"].size,
file: file,
locked: pipeline.locked
}
end
def merge_requests
strong_memoize(:merge_requests) do
pipeline.merge_requests_as_head_pipeline
end
end
def head_report
strong_memoize(:head_report) do
pipeline.codequality_reports
end
end
def base_report(merge_request)
strong_memoize(:base_report) do
merge_request&.base_pipeline&.codequality_reports
end
end
def mr_diff_report_by_merge_requests
strong_memoize(:mr_diff_report_by_merge_requests) do
merge_requests.each_with_object({}) do |merge_request, hash|
key = "merge_request_#{merge_request.id}"
new_errors = Gitlab::Ci::Reports::CodequalityReportsComparer.new(base_report(merge_request), head_report).new_errors
next if new_errors.empty?
hash[key] = Gitlab::Ci::Reports::CodequalityMrDiff.new(new_errors)
end
end
end
def new_errors_introduced?
mr_diff_report_by_merge_requests.present?
end
def build_carrierwave_file!
CarrierWaveStringFile.new_file(
file_content: build_quality_mr_diff_report(mr_diff_report_by_merge_requests),
filename: Ci::PipelineArtifact::DEFAULT_FILE_NAMES.fetch(:code_quality_mr_diff),
content_type: 'application/json'
)
end
def build_quality_mr_diff_report(mr_diff_report)
Gitlab::Json.dump(mr_diff_report.each_with_object({}) do |diff_report, hash|
hash[diff_report.first] = Ci::CodequalityMrDiffReportSerializer.new.represent(diff_report.second) # rubocop: disable CodeReuse/Serializer
end)
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe ::Ci::PipelineArtifacts::CreateCodeQualityMrDiffReportService, feature_category: :build_artifacts do
describe '#execute' do
let(:merge_request) { create(:merge_request) }
let(:project) { merge_request.project }
let(:head_pipeline) { create(:ci_pipeline, :success, :with_codequality_reports, project: project, merge_requests_as_head_pipeline: [merge_request]) }
let(:base_pipeline) { create(:ci_pipeline, :success, project: project, ref: merge_request.target_branch, sha: merge_request.diff_base_sha) }
subject { described_class.new(head_pipeline).execute }
context 'when there are codequality reports' do
context 'when pipeline passes' do
context 'when degradations are present' do
context 'when degradations already present in target branch pipeline' do
before do
create(:ci_build, :success, :codequality_reports, name: 'codequality', pipeline: base_pipeline, project: project)
end
it "does not persist a pipeline artifact" do
expect { subject }.not_to change { Ci::PipelineArtifact.count }
end
end
context 'when degradation is not present in target branch pipeline' do
before do
create(:ci_build, :success, :codequality_reports_without_degradation, name: 'codequality', pipeline: base_pipeline, project: project)
end
it 'persists a pipeline artifact' do
expect { subject }.to change { Ci::PipelineArtifact.count }.by(1)
end
it 'persists the default file name' do
subject
pipeline_artifact = Ci::PipelineArtifact.first
expect(pipeline_artifact.file.filename).to eq('code_quality_mr_diff.json')
end
it 'sets expire_at to 1 week' do
freeze_time do
subject
pipeline_artifact = Ci::PipelineArtifact.first
expect(pipeline_artifact.expire_at).to eq(1.week.from_now)
end
end
it "artifact has pipeline's locked status" do
subject
artifact = Ci::PipelineArtifact.first
expect(artifact.locked).to eq(head_pipeline.locked)
end
it 'does not persist the same artifact twice' do
2.times { described_class.new(head_pipeline).execute }
expect { subject }.not_to change { Ci::PipelineArtifact.count }
end
end
end
end
end
context 'when there are no codequality reports for head pipeline' do
let(:head_pipeline) { create(:ci_pipeline, :success, project: project, merge_requests_as_head_pipeline: [merge_request]) }
it "does not persist a pipeline artifact" do
expect { subject }.not_to change { Ci::PipelineArtifact.count }
end
end
context 'when there are no codequality reports for base pipeline' do
let(:head_pipeline) { create(:ci_pipeline, :success, project: project, merge_requests_as_head_pipeline: [merge_request]) }
it "does not persist a pipeline artifact" do
expect { subject }.not_to change { Ci::PipelineArtifact.count }
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module PipelineArtifacts
class CoverageReportService
include Gitlab::Utils::StrongMemoize
def initialize(pipeline)
@pipeline = pipeline
end
def execute
return if report.empty?
Ci::PipelineArtifact.create_or_replace_for_pipeline!(**pipeline_artifact_params).tap do |pipeline_artifact|
Gitlab::AppLogger.info(log_params(pipeline_artifact))
end
end
private
attr_reader :pipeline
def report
strong_memoize(:report) do
Gitlab::Ci::Reports::CoverageReportGenerator.new(pipeline).report
end
end
def pipeline_artifact_params
{
pipeline: pipeline,
file_type: :code_coverage,
file: carrierwave_file,
size: carrierwave_file['tempfile'].size,
locked: pipeline.locked
}
end
def carrierwave_file
strong_memoize(:carrier_wave_file) do
CarrierWaveStringFile.new_file(
file_content: Gitlab::Json.dump(report),
filename: Ci::PipelineArtifact::DEFAULT_FILE_NAMES.fetch(:code_coverage),
content_type: 'application/json'
)
end
end
def log_params(pipeline_artifact)
{
project_id: pipeline.project_id,
pipeline_id: pipeline.id,
pipeline_artifact_id: pipeline_artifact.id,
message: "Created code coverage for pipeline."
}
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::PipelineArtifacts::CoverageReportService, feature_category: :build_artifacts do
describe '#execute' do
let_it_be(:project) { create(:project, :repository) }
subject { described_class.new(pipeline).execute }
shared_examples 'creating or updating a pipeline coverage report' do
context 'when pipeline is finished' do
it 'creates or updates a pipeline artifact' do
subject
expect(pipeline.reload.pipeline_artifacts.count).to eq(1)
end
it 'persists the default file name' do
subject
file = Ci::PipelineArtifact.first.file
expect(file.filename).to eq('code_coverage.json')
end
it 'sets expire_at to 1 week from now' do
freeze_time do
subject
pipeline_artifact = Ci::PipelineArtifact.first
expect(pipeline_artifact.expire_at).to eq(1.week.from_now)
end
end
it 'logs relevant information' do
allow(Gitlab::AppLogger).to receive(:info).and_call_original
expect(Gitlab::AppLogger).to receive(:info).with({
project_id: project.id,
pipeline_id: pipeline.id,
pipeline_artifact_id: kind_of(Numeric),
message: kind_of(String)
})
subject
end
end
end
context 'when pipeline has coverage report' do
let!(:pipeline) { create(:ci_pipeline, :with_coverage_reports, project: project) }
it_behaves_like 'creating or updating a pipeline coverage report'
it "artifact has pipeline's locked status" do
subject
artifact = Ci::PipelineArtifact.first
expect(artifact.locked).to eq(pipeline.locked)
end
end
context 'when pipeline has coverage report from child pipeline' do
let!(:pipeline) { create(:ci_pipeline, :success, project: project) }
let!(:child_pipeline) { create(:ci_pipeline, :with_coverage_reports, project: project, child_of: pipeline) }
it_behaves_like 'creating or updating a pipeline coverage report'
end
context 'when pipeline has existing pipeline artifact for coverage report' do
let!(:pipeline) { create(:ci_pipeline, :with_coverage_reports, project: project) }
let!(:child_pipeline) { create(:ci_pipeline, :with_coverage_reports, project: project, child_of: pipeline) }
let!(:pipeline_artifact) do
create(:ci_pipeline_artifact, :with_coverage_report, pipeline: pipeline, expire_at: 1.day.from_now)
end
it_behaves_like 'creating or updating a pipeline coverage report'
end
context 'when pipeline is running and coverage report does not exist' do
let(:pipeline) { create(:ci_pipeline, :running) }
it 'does not persist data' do
expect { subject }.not_to change { Ci::PipelineArtifact.count }.from(0)
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module PipelineArtifacts
class DestroyAllExpiredService
include ::Gitlab::ExclusiveLeaseHelpers
include ::Gitlab::LoopHelpers
include ::Gitlab::Utils::StrongMemoize
BATCH_SIZE = 100
LOOP_LIMIT = 1000
LOOP_TIMEOUT = 5.minutes
LOCK_TIMEOUT = 10.minutes
EXCLUSIVE_LOCK_KEY = 'expired_pipeline_artifacts:destroy:lock'
def initialize
@removed_artifacts_count = 0
@start_at = Time.current
end
def execute
in_lock(EXCLUSIVE_LOCK_KEY, ttl: LOCK_TIMEOUT, retries: 1) do
destroy_unlocked_pipeline_artifacts
legacy_destroy_pipeline_artifacts
end
@removed_artifacts_count
end
private
def destroy_unlocked_pipeline_artifacts
loop_until(timeout: LOOP_TIMEOUT, limit: LOOP_LIMIT) do
artifacts = Ci::PipelineArtifact.expired_before(@start_at).artifact_unlocked.limit(BATCH_SIZE)
break if artifacts.empty?
destroy_batch(artifacts)
end
end
def legacy_destroy_pipeline_artifacts
loop_until(timeout: LOOP_TIMEOUT, limit: LOOP_LIMIT) do
destroy_artifacts_batch
end
end
def destroy_artifacts_batch
artifacts = ::Ci::PipelineArtifact.unlocked.expired.limit(BATCH_SIZE).to_a
return false if artifacts.empty?
destroy_batch(artifacts)
end
def destroy_batch(artifacts)
artifacts.each(&:destroy!)
increment_stats(artifacts.size)
true
end
def increment_stats(size)
destroyed_artifacts_counter.increment({}, size)
@removed_artifacts_count += size
end
def destroyed_artifacts_counter
strong_memoize(:destroyed_artifacts_counter) do
name = :destroyed_pipeline_artifacts_count_total
comment = 'Counter of destroyed expired pipeline artifacts'
::Gitlab::Metrics.counter(name, comment)
end
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::PipelineArtifacts::DestroyAllExpiredService, :clean_gitlab_redis_shared_state,
feature_category: :build_artifacts do
let(:service) { described_class.new }
describe '.execute' do
subject { service.execute }
context 'when timeout happens' do
before do
stub_const('Ci::PipelineArtifacts::DestroyAllExpiredService::LOOP_TIMEOUT', 0.1.seconds)
allow(service).to receive(:destroy_artifacts_batch) { true }
end
it 'returns 0 and does not continue destroying' do
is_expected.to eq(0)
end
end
context 'when there are no artifacts' do
it 'does not raise error' do
expect { subject }.not_to raise_error
end
end
context 'when the loop limit is reached' do
before do
stub_const('::Ci::PipelineArtifacts::DestroyAllExpiredService::LOOP_LIMIT', 1)
stub_const('::Ci::PipelineArtifacts::DestroyAllExpiredService::BATCH_SIZE', 1)
create_list(:ci_pipeline_artifact, 2, :unlocked, expire_at: 1.week.ago)
end
it 'destroys one artifact' do
expect { subject }.to change { Ci::PipelineArtifact.count }.by(-1)
end
it 'reports the number of destroyed artifacts' do
is_expected.to eq(1)
end
end
context 'when there are artifacts more than batch sizes' do
before do
stub_const('Ci::PipelineArtifacts::DestroyAllExpiredService::BATCH_SIZE', 1)
create_list(:ci_pipeline_artifact, 2, :unlocked, expire_at: 1.week.ago)
end
it 'destroys all expired artifacts' do
expect { subject }.to change { Ci::PipelineArtifact.count }.by(-2)
end
it 'reports the number of destroyed artifacts' do
is_expected.to eq(2)
end
end
context 'when artifacts are not expired' do
before do
create(:ci_pipeline_artifact, :unlocked, expire_at: 2.days.from_now)
end
it 'does not destroy pipeline artifacts' do
expect { subject }.not_to change { Ci::PipelineArtifact.count }
end
it 'reports the number of destroyed artifacts' do
is_expected.to eq(0)
end
end
context 'when pipeline is locked' do
before do
create(:ci_pipeline_artifact, expire_at: 2.weeks.ago)
end
it 'does not destroy pipeline artifacts' do
expect { subject }.not_to change { Ci::PipelineArtifact.count }
end
it 'reports the number of destroyed artifacts' do
is_expected.to eq(0)
end
end
context 'with unlocked pipeline artifacts' do
let_it_be(:not_expired_artifact) { create(:ci_pipeline_artifact, :artifact_unlocked, expire_at: 2.days.from_now) }
before do
create_list(:ci_pipeline_artifact, 2, :artifact_unlocked, expire_at: 1.week.ago)
allow(service).to receive(:legacy_destroy_pipeline_artifacts)
end
it 'destroys all expired artifacts' do
expect { subject }.to change { Ci::PipelineArtifact.count }.by(-2)
expect(not_expired_artifact.reload).to be_present
end
context 'when the loop limit is reached' do
before do
stub_const('::Ci::PipelineArtifacts::DestroyAllExpiredService::LOOP_LIMIT', 1)
stub_const('::Ci::PipelineArtifacts::DestroyAllExpiredService::BATCH_SIZE', 1)
end
it 'destroys one artifact' do
expect { subject }.to change { Ci::PipelineArtifact.count }.by(-1)
expect(not_expired_artifact.reload).to be_present
end
it 'reports the number of destroyed artifacts' do
is_expected.to eq(1)
end
end
end
end
describe '.destroy_artifacts_batch' do
it 'returns a falsy value without artifacts' do
expect(service.send(:destroy_artifacts_batch)).to be_falsy
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
# This service is responsible for creating a pipeline for a given
# ExternalPullRequest coming from other providers such as GitHub.
module Ci
module ExternalPullRequests
class CreatePipelineService < BaseService
def execute(pull_request)
return pull_request_not_open_error unless pull_request.open?
return pull_request_branch_error unless pull_request.actual_branch_head?
Ci::ExternalPullRequests::CreatePipelineWorker.perform_async(
project.id, current_user.id, pull_request.id
)
end
private
def pull_request_not_open_error
ServiceResponse.error(message: 'The pull request is not opened', payload: nil)
end
def pull_request_branch_error
ServiceResponse.error(message: 'The source sha is not the head of the source branch', payload: nil)
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::ExternalPullRequests::CreatePipelineService, feature_category: :continuous_integration do
describe '#execute' do
let_it_be(:project) { create(:project, :auto_devops, :repository) }
let_it_be(:user) { create(:user) }
let_it_be_with_reload(:pull_request) { create(:external_pull_request, project: project) }
before do
project.add_maintainer(user)
end
subject(:execute) { described_class.new(project, user).execute(pull_request) }
context 'when pull request is open' do
before do
pull_request.update!(status: :open)
end
context 'when source sha is the head of the source branch' do
let(:source_branch) { project.repository.branches.last }
before do
pull_request.update!(source_branch: source_branch.name, source_sha: source_branch.target)
end
it 'enqueues Ci::ExternalPullRequests::CreatePipelineWorker' do
expect { execute }
.to change { ::Ci::ExternalPullRequests::CreatePipelineWorker.jobs.count }
.by(1)
args = ::Ci::ExternalPullRequests::CreatePipelineWorker.jobs.last['args']
expect(args[0]).to eq(project.id)
expect(args[1]).to eq(user.id)
expect(args[2]).to eq(pull_request.id)
end
end
context 'when source sha is not the head of the source branch (force push upon rebase)' do
let(:source_branch) { project.repository.branches.first }
let(:commit) { project.repository.commits(source_branch.name, limit: 2).last }
before do
pull_request.update!(source_branch: source_branch.name, source_sha: commit.sha)
end
it 'does nothing', :aggregate_failures do
expect { execute }
.not_to change { ::Ci::ExternalPullRequests::CreatePipelineWorker.jobs.count }
expect(execute).to be_error
expect(execute.message).to eq('The source sha is not the head of the source branch')
expect(execute.payload).to be_nil
end
end
end
context 'when pull request is not opened' do
before do
pull_request.update!(status: :closed)
end
it 'does nothing', :aggregate_failures do
expect { execute }
.not_to change { ::Ci::ExternalPullRequests::CreatePipelineWorker.jobs.count }
expect(execute).to be_error
expect(execute.message).to eq('The pull request is not opened')
expect(execute.payload).to be_nil
end
end
end
end
|