hexsha
stringlengths 40
40
| size
int64 2
1.01M
| content
stringlengths 2
1.01M
| avg_line_length
float64 1.5
100
| max_line_length
int64 2
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
b9f78b350ef9a08ef5a8e7cbdd1f41b5ae1e0039 | 139 | class AddAprilFoolsFlagToUsers < ActiveRecord::Migration[6.0]
def change
add_column :users, :enabled_april_fools, :boolean
end
end
| 23.166667 | 61 | 0.776978 |
79edba8ab16e2edba0ec24aeeaabd487ed500480 | 1,848 | #!/bin/env ruby
require 'pathname'
# Minimum images
@MIN = 100
# Thresholds
# Young - Keep all builds
@YOUNG_THRESH = 200
# Then, only keep odd hrev numbers
if ARGV.count != 1
puts "Usage: nightly-reaper.rb <ARCH_PATH>"
exit 1
end
@target = Pathname.new(ARGV.first)
stage0 = []
Dir.foreach(@target) do |item|
next if File.directory?(item)
#next if item == '.' or item == '..'
next if not item.end_with?(".zip")
fields = item.split('-')
if not fields[2].start_with?("hrev")
puts "Warning: Skipping #{item} due to invalid name!"
end
hrev = fields[2].tr("hrev", "")
full_file = @target.join(item).to_s
stage0.push({rev: hrev.to_i, name: full_file})
end
stage0.sort! {|a,b| b[:rev] <=> a[:rev]}
if stage0.count < @MIN
puts "Under #{@MIN} builds! Bailing..."
exit 0
end
result = []
latest_rev = stage0.first[:rev]
puts latest_rev
# Keep the young builds
stage0.each do |item|
if item[:rev] > (latest_rev - @YOUNG_THRESH)
puts "Young: Keeping #{item[:rev]} gt #{latest_rev - @YOUNG_THRESH}"
result.push(item)
elsif item[:rev] <= (latest_rev - @YOUNG_THRESH)
if item[:rev].odd?
puts "Mature : Keeping odd #{item[:rev]} lt #{latest_rev - @YOUNG_THRESH}"
result.push(item)
end
end
end
to_remove = stage0 - result
puts "Removing: #{to_remove.count}"
#puts to_remove
puts "Keeping: #{result.count}"
#puts result
to_remove.each do |filename|
begin
puts "Removing #{filename[:name]}..."
File.delete(filename[:name])
# Also cleanup the sha256 and sig's no longer needed
if File.file?("#{filename[:name]}.sha256")
puts "Removing #{filename[:name]}.sha256..."
File.delete("#{filename[:name]}.sha256")
end
if File.file?("#{filename[:name]}.sig")
puts "Removing #{filename[:name]}.sig..."
File.delete("#{filename[:name]}.sig")
end
rescue
puts "ERROR REMOVING #{filename[:name]}!"
end
end
| 22.536585 | 77 | 0.661797 |
1c9174f8783b68a20069ec1d8c9a365b7f95180f | 2,869 | # frozen_string_literal: true
Capybara::SpecHelper.spec '#has_button?' do
before do
@session.visit('/form')
end
it 'should be true if the given button is on the page' do
expect(@session).to have_button('med')
expect(@session).to have_button('crap321')
expect(@session).to have_button(:crap321)
expect(@session).to have_button('button with label element')
expect(@session).to have_button('button within label element')
end
it 'should be true for disabled buttons if disabled: true' do
expect(@session).to have_button('Disabled button', disabled: true)
end
it 'should be false if the given button is not on the page' do
expect(@session).not_to have_button('monkey')
end
it 'should be false for disabled buttons by default' do
expect(@session).not_to have_button('Disabled button')
end
it 'should be false for disabled buttons if disabled: false' do
expect(@session).not_to have_button('Disabled button', disabled: false)
end
it 'should be true for disabled buttons if disabled: :all' do
expect(@session).to have_button('Disabled button', disabled: :all)
end
it 'should be true for enabled buttons if disabled: :all' do
expect(@session).to have_button('med', disabled: :all)
end
it 'can verify button type' do
expect(@session).to have_button('awe123', type: 'submit')
expect(@session).not_to have_button('awe123', type: 'reset')
end
it 'should be true for role=button when enable_aria_role: true' do
expect(@session).to have_button('ARIA button', enable_aria_role: true)
end
it 'should be false for role=button when enable_aria_role: false' do
expect(@session).not_to have_button('ARIA button', enable_aria_role: false)
end
end
Capybara::SpecHelper.spec '#has_no_button?' do
before do
@session.visit('/form')
end
it 'should be true if the given button is on the page' do
expect(@session).not_to have_no_button('med')
expect(@session).not_to have_no_button('crap321')
end
it 'should be true for disabled buttons if disabled: true' do
expect(@session).not_to have_no_button('Disabled button', disabled: true)
end
it 'should be false if the given button is not on the page' do
expect(@session).to have_no_button('monkey')
end
it 'should be false for disabled buttons by default' do
expect(@session).to have_no_button('Disabled button')
end
it 'should be false for disabled buttons if disabled: false' do
expect(@session).to have_no_button('Disabled button', disabled: false)
end
it 'should be true for role=button when enable_aria_role: false' do
expect(@session).to have_no_button('ARIA button', enable_aria_role: false)
end
it 'should be false for role=button when enable_aria_role: true' do
expect(@session).not_to have_no_button('ARIA button', enable_aria_role: true)
end
end
| 32.602273 | 81 | 0.722203 |
796e32d281e5e84b2e039c94de36f600a3c3ae79 | 5,281 | require "test_helper"
class ApplicationControllerTest < ActionDispatch::IntegrationTest
context "The application controller" do
should "return 406 Not Acceptable for a bad file extension" do
get posts_path, params: { format: :jpg }
assert_response 406
get posts_path, params: { format: :blah }
assert_response 406
end
context "on a RecordNotFound error" do
should "return 404 Not Found even with a bad file extension" do
get post_path("bad.json")
assert_response 404
get post_path("bad.jpg")
assert_response 404
get post_path("bad.blah")
assert_response 404
end
end
context "on a PaginationError" do
should "return 410 Gone even with a bad file extension" do
get posts_path, params: { page: 999999999 }, as: :json
assert_response 410
get posts_path, params: { page: 999999999 }, as: :jpg
assert_response 410
get posts_path, params: { page: 999999999 }, as: :blah
assert_response 410
end
end
context "on api authentication" do
setup do
@user = create(:user, password: "password")
@api_key = ApiKey.generate!(@user)
ActionController::Base.allow_forgery_protection = true
end
teardown do
ActionController::Base.allow_forgery_protection = false
end
context "using http basic auth" do
should "succeed for api key matches" do
basic_auth_string = "Basic #{::Base64.encode64("#{@user.name}:#{@api_key.key}")}"
get edit_user_path(@user), headers: { HTTP_AUTHORIZATION: basic_auth_string }
assert_response :success
end
should "fail for api key mismatches" do
basic_auth_string = "Basic #{::Base64.encode64("#{@user.name}:badpassword")}"
get edit_user_path(@user), headers: { HTTP_AUTHORIZATION: basic_auth_string }
assert_response 401
end
should "succeed for non-GET requests without a CSRF token" do
assert_changes -> { @user.reload.enable_safe_mode }, from: false, to: true do
basic_auth_string = "Basic #{::Base64.encode64("#{@user.name}:#{@api_key.key}")}"
put user_path(@user), headers: { HTTP_AUTHORIZATION: basic_auth_string }, params: { user: { enable_safe_mode: "true" } }, as: :json
assert_response :success
end
end
end
context "using the api_key parameter" do
should "succeed for api key matches" do
get edit_user_path(@user), params: { login: @user.name, api_key: @api_key.key }
assert_response :success
end
should "fail for api key mismatches" do
get edit_user_path(@user), params: { login: @user.name }
assert_response 401
get edit_user_path(@user), params: { api_key: @api_key.key }
assert_response 401
get edit_user_path(@user), params: { login: @user.name, api_key: "bad" }
assert_response 401
end
should "succeed for non-GET requests without a CSRF token" do
assert_changes -> { @user.reload.enable_safe_mode }, from: false, to: true do
put user_path(@user), params: { login: @user.name, api_key: @api_key.key, user: { enable_safe_mode: "true" } }, as: :json
assert_response :success
end
end
end
context "without any authentication" do
should "redirect to the login page" do
get edit_user_path(@user)
assert_redirected_to new_session_path(url: edit_user_path(@user))
end
end
context "with cookie-based authentication" do
should "not allow non-GET requests without a CSRF token" do
# get the csrf token from the login page so we can login
get new_session_path
assert_response :success
token = css_select("form input[name=authenticity_token]").first["value"]
# login
post session_path, params: { authenticity_token: token, name: @user.name, password: "password" }
assert_redirected_to posts_path
# try to submit a form with cookies but without the csrf token
put user_path(@user), headers: { HTTP_COOKIE: headers["Set-Cookie"] }, params: { user: { enable_safe_mode: "true" } }
assert_response 403
assert_equal("ActionController::InvalidAuthenticityToken", css_select("p").first.content)
assert_equal(false, @user.reload.enable_safe_mode)
end
end
end
context "on session cookie authentication" do
should "succeed" do
user = create(:user, password: "password")
post session_path, params: { name: user.name, password: "password" }
get edit_user_path(user)
assert_response :success
end
end
context "when the api limit is exceeded" do
should "fail with a 429 error" do
user = create(:user)
post = create(:post, rating: "s", uploader: user)
UserThrottle.any_instance.stubs(:throttled?).returns(true)
put_auth post_path(post), user, params: { post: { rating: "e" } }
assert_response 429
assert_equal("s", post.reload.rating)
end
end
end
end
| 35.206667 | 143 | 0.631699 |
035bb7bbc067bf68e81fa26a6215c37e639190ab | 2,068 | class FavoriteGroupsController < ApplicationController
before_filter :member_only, :except => [:index, :show]
respond_to :html, :xml, :json, :js
def index
@favorite_groups = FavoriteGroup.search(params[:search]).order("updated_at desc").paginate(params[:page], :limit => params[:limit], :search_count => params[:search])
respond_with(@favorite_groups) do |format|
format.xml do
render :xml => @favorite_groups.to_xml(:root => "favorite-groups")
end
end
end
def show
@favorite_group = FavoriteGroup.find(params[:id])
@post_set = PostSets::FavoriteGroup.new(@favorite_group, params[:page])
respond_with(@favorite_group)
end
def new
@favorite_group = FavoriteGroup.new
respond_with(@favorite_group)
end
def create
@favorite_group = FavoriteGroup.create(params[:favorite_group])
respond_with(@favorite_group) do |format|
format.html do
if @favorite_group.errors.any?
render :action => "new"
else
redirect_to favorite_groups_path
end
end
end
end
def edit
@favorite_group = FavoriteGroup.find(params[:id])
check_privilege(@favorite_group)
respond_with(@favorite_group)
end
def update
@favorite_group = FavoriteGroup.find(params[:id])
check_privilege(@favorite_group)
@favorite_group.update_attributes(params[:favorite_group])
unless @favorite_group.errors.any?
flash[:notice] = "Favorite group updated"
end
respond_with(@favorite_group)
end
def destroy
@favorite_group = FavoriteGroup.find(params[:id])
check_privilege(@favorite_group)
@favorite_group.destroy
flash[:notice] = "Favorite group deleted"
redirect_to favorite_groups_path
end
def add_post
@favorite_group = FavoriteGroup.find(params[:id])
check_privilege(@favorite_group)
@post = Post.find(params[:post_id])
@favorite_group.add!(@post)
end
private
def check_privilege(favgroup)
raise User::PrivilegeError unless favgroup.editable_by?(CurrentUser.user)
end
end
| 27.945946 | 169 | 0.703578 |
7a5f8e0e02bcc4ccbda84a3837c31d1cec1a2a15 | 50 | module CuisineInspoDishes
VERSION = "0.1.0"
end
| 12.5 | 25 | 0.74 |
6241b53c3a9fc982f6324d2f3fdea71dc52041be | 326 | class Piano
def keys
'32'
end
def pedals
# return nil
end
end
piano = Piano.new
puts piano.keys.length
# safe navigation
# method pedals - will return nil and there is no 'length' - will be error without &.
puts piano.pedals&.length
piano2 = nil
# will return nil without errors
puts piano2&.keys&.length | 14.818182 | 85 | 0.702454 |
abbb120ef883b306a037ed162da4e1305c630a78 | 4,057 | # frozen_string_literal: true
require 'aws_backend'
class AwsIamUsers < AwsResourceBase
name 'aws_iam_users'
desc 'Verifies settings for a collection of AWS IAM Users'
example "
describe aws_iam_user(user_name: 'psmith') do
it { should exist }
end
"
attr_reader :access_keys, :attached_policy_names, :attached_policy_arns, :has_console_password, :has_mfa_enabled, :inline_policy_names, :username, :user_arn, :user_id, :table
alias has_mfa_enabled? has_mfa_enabled
alias has_console_password? has_console_password
FilterTable.create
.register_column(:usernames, field: :username)
.register_column(:user_arns, field: :user_arn)
.register_column(:user_ids, field: :user_id)
.register_column(:access_keys, field: :access_keys)
.register_column(:has_attached_policies, field: :has_attached_policies)
.register_column(:attached_policy_names, field: :attached_policy_names)
.register_column(:attached_policy_arns, field: :attached_policy_arns)
.register_column(:has_console_password, field: :has_console_password)
.register_column(:has_inline_policies, field: :has_inline_policies)
.register_column(:inline_policy_names, field: :inline_policy_names)
.register_column(:has_mfa_enabled, field: :has_mfa_enabled)
.register_column(:password_ever_used?, field: :password_ever_used?)
.register_column(:password_last_used_days_ago, field: :password_last_used_days_ago)
.install_filter_methods_on_resource(self, :table)
def initialize(opts = {})
super(opts)
validate_parameters
@table = fetch_data
end
def fetch_data
user_rows = []
resp = {}
pagination_options = {}
loop do
catch_aws_errors do
iam_client = @aws.iam_client
resp = iam_client.list_users(pagination_options)
users = resp.users
return [] if !users || users.empty?
users.each do |u|
username = { user_name: u.arn.split('/').last }
attached_policies = iam_client.list_attached_user_policies(username).attached_policies
inline_policies = iam_client.list_user_policies(username).policy_names
password_last_used = u.password_last_used
if password_last_used
password_last_used_days_ago = ((Time.now - password_last_used) / (24*60*60)).to_i
else
password_last_used_days_ago = -1 # Never used
end
user_rows += [{ username: username[:user_name],
user_arn: u.arn,
user_id: u.user_id,
access_keys: user_access_keys(username),
has_mfa_enabled: !iam_client.list_mfa_devices(username).mfa_devices.empty?,
has_attached_policies: !attached_policies.empty?,
attached_policy_names: attached_policies.map { |p| p[:policy_name] },
attached_policy_arns: attached_policies.map { |p| p[:policy_arn] },
has_inline_policies: !inline_policies.empty?,
inline_policy_names: iam_client.list_user_policies(username).policy_names,
password_ever_used?: !password_last_used.nil?,
password_last_used_days_ago: password_last_used_days_ago,
has_console_password: has_password?(username) }]
end
end
break if resp.marker.nil?
pagination_options = { marker: resp.marker }
end
@table = user_rows
end
private
def has_password?(username)
@aws.iam_client.get_login_profile(username)
true
rescue Aws::IAM::Errors::NoSuchEntity
false
end
def user_access_keys(username)
# Return empty array instead if no keys.
keys = @aws.iam_client.list_access_keys(username).access_key_metadata
[] if keys.empty?
end
end
| 39.009615 | 176 | 0.647769 |
ffbbc3a91c1722c57e61b0c0a313bbbb59eb00e0 | 3,395 | Rails.application.configure do
# Settings specified here will take precedence over those in config/application.rb.
# Code is not reloaded between requests.
config.cache_classes = true
# Eager load code on boot. This eager loads most of Rails and
# your application in memory, allowing both threaded web servers
# and those relying on copy on write to perform better.
# Rake tasks automatically ignore this option for performance.
config.eager_load = true
# Full error reports are disabled and caching is turned on.
config.consider_all_requests_local = false
config.action_controller.perform_caching = true
# Enable Rack::Cache to put a simple HTTP cache in front of your application
# Add `rack-cache` to your Gemfile before enabling this.
# For large-scale production use, consider using a caching reverse proxy like
# NGINX, varnish or squid.
# config.action_dispatch.rack_cache = true
# Disable serving static files from the `/public` folder by default since
# Apache or NGINX already handles this.
config.serve_static_files = ENV['RAILS_SERVE_STATIC_FILES'].present?
# Compress JavaScripts and CSS.
config.assets.js_compressor = :uglifier
# config.assets.css_compressor = :sass
# Do not fallback to assets pipeline if a precompiled asset is missed.
config.assets.compile = false
# Asset digests allow you to set far-future HTTP expiration dates on all assets,
# yet still be able to expire them through the digest params.
config.assets.digest = true
# `config.assets.precompile` and `config.assets.version` have moved to config/initializers/assets.rb
# Specifies the header that your server uses for sending files.
# config.action_dispatch.x_sendfile_header = 'X-Sendfile' # for Apache
# config.action_dispatch.x_sendfile_header = 'X-Accel-Redirect' # for NGINX
# Force all access to the app over SSL, use Strict-Transport-Security, and use secure cookies.
# config.force_ssl = true
# Use the lowest log level to ensure availability of diagnostic information
# when problems arise.
config.log_level = :info
# Prepend all log lines with the following tags.
# config.log_tags = [ :subdomain, :uuid ]
# Use a different logger for distributed setups.
# config.logger = ActiveSupport::TaggedLogging.new(SyslogLogger.new)
# Use a different cache store in production.
# config.cache_store = :mem_cache_store
# Enable serving of images, stylesheets, and JavaScripts from an asset server.
# config.action_controller.asset_host = 'http://assets.example.com'
# Ignore bad email addresses and do not raise email delivery errors.
# Set this to true and configure the email server for immediate delivery to raise delivery errors.
# config.action_mailer.raise_delivery_errors = false
# Enable locale fallbacks for I18n (makes lookups for any locale fall back to
# the I18n.default_locale when a translation cannot be found).
config.i18n.fallbacks = true
# Send deprecation notices to registered listeners.
config.active_support.deprecation = :notify
# Use default logging formatter so that PID and timestamp are not suppressed.
config.log_formatter = ::Logger::Formatter.new
# Do not dump schema after migrations.
config.active_record.dump_schema_after_migration = false
config.assets.compile = true
config.assets.precompile = ['*.js', '*.css', '*.css.erb']
end
| 41.402439 | 102 | 0.759941 |
ac07e6871736c9a728aa1061d7ce1c7198ddcce6 | 60 | require_relative 'spec_helper'
describe "DiffLint" do
end
| 10 | 30 | 0.8 |
7ab500923c06ffb9d254a968a84f44644dd3be8b | 512 | require 'rrreader/version'
require 'rrreader/parser'
require 'colorize'
module Rrreader
def self.XML(xml_str)
parser = Rrreader::Parser.new
parser.xml(xml_str)
end
def self.PRINT(xml_str)
channel = XML(xml_str)
puts
puts "#{channel.title}\n#{channel.description}".colorize(:light_green)
puts ('-'*50).colorize(:green)
channel.items.each do |item|
puts "#{item.title}\n#{item.magnet}".colorize(:green)
puts ('*'*10).colorize(:green)
end
return
end
end
| 19.692308 | 74 | 0.658203 |
f8d1684ea06ccacd57fc858b421eb5dbf65c24fa | 913 | class GroupsController < ApplicationController
# GET /groups.json
def index
render json: Group.all
end
# GET /groups/1.json
def show
group = Group.find params[:id]
render json: group
end
# POST /groups.json
def create
client_id = params[:group][:client_id]
group = Group.new params[:group]
group.client_id = client_id
if group.save
render json: group, status: :created
else
render json: { errors: group.errors }, status: :unprocessable_entity
end
end
# PUT /groups/1.json
def update
group = Group.find params[:id]
if group.update_attributes params[:group]
render json: group, status: :ok
else
render json: { errors: group.errors }, status: :unprocessable_entity
end
end
# DELETE /groups/1.json
def destroy
group = Group.find params[:id]
group.destroy
render json: nil, status: :ok
end
end
| 20.75 | 74 | 0.657174 |
ff6261aef893b70075846bce636445b71045e2f7 | 7,128 | require 'rails_helper'
RSpec.describe 'meter_reviews', type: :system do
let!(:school) { create(:school) }
let!(:reviewed_school) { create(:school) }
let!(:other_school) { create(:school) }
let!(:dcc_meter) { create(:electricity_meter, school: school, dcc_meter: true, consent_granted: false) }
let!(:dcc_meter_granted) { create(:electricity_meter, school: reviewed_school, dcc_meter: true, consent_granted: true) }
let!(:electricity_meter) { create(:electricity_meter, school: other_school) }
let!(:admin) { create(:admin) }
context 'with pending reviews' do
before(:each) do
login_as(admin)
visit root_path
click_on 'Admin'
end
it 'lists only meters pending reviews' do
click_on 'Meter Reviews'
expect(page).to have_title "Meter Reviews"
expect(page).to have_content school.name
expect(page).to_not have_content reviewed_school.name
expect(page).to_not have_content other_school
end
it 'has link to school consent documents' do
click_on 'Meter Reviews'
expect(page).to have_link("View", href: school_consent_documents_path(school))
end
it 'has link to school consent grants' do
click_on 'Meter Reviews'
expect(page).to have_link("View", href: school_consent_grants_path(school))
end
context 'with current consent' do
let!(:consent_statement) { create(:consent_statement, current: true) }
let!(:consent_grant) { create(:consent_grant, consent_statement: consent_statement, school: school) }
before(:each) do
click_on 'Meter Reviews'
end
it 'displays a tick' do
expect(page).to have_css('td.consent i.fa-check-circle')
end
it 'does not offer option to request consent' do
expect(page).to_not have_link('Request consent')
end
it 'offers option to complete review' do
expect(page).to have_link('Perform review')
end
end
context 'with no consent' do
before(:each) do
click_on 'Meter Reviews'
end
it 'displays a cross' do
expect(page).to have_css('td.consent i.fa-times-circle')
end
it 'offers to request consent' do
expect(page).to have_link('Request consent')
end
it 'does not offer to complete review' do
expect(page).to_not have_link('Perform review')
end
end
context 'with bills' do
let!(:consent_document) { create(:consent_document, school: school) }
before(:each) do
click_on 'Meter Reviews'
end
it 'displays a tick' do
expect(page).to have_css('td.bills i.fa-check-circle')
end
end
context 'without bills' do
before(:each) do
click_on 'Meter Reviews'
end
it 'displays a cross' do
expect(page).to have_css('td.bills i.fa-times-circle')
end
end
context 'when viewing meters for school' do
it 'displays a link to perform a review' do
visit school_meters_path(school)
expect(page).to have_link("Review meters", href: new_admin_school_meter_review_path(school))
end
end
end
context 'when performing a review' do
before(:each) do
allow_any_instance_of(MeterManagement).to receive(:check_n3rgy_status).and_return(true)
login_as(admin)
visit root_path
click_on 'Admin'
end
context 'and consent is not current' do
it 'should not allow completion' do
click_on 'Meter Reviews'
expect(page).to_not have_link("Complete review")
end
end
context 'when consent is current' do
let!(:consent_statement) { create(:consent_statement, current: true) }
let!(:consent_grant) { create(:consent_grant, consent_statement: consent_statement, school: school) }
before(:each) do
click_on 'Meter Reviews'
end
it 'should list the meters' do
click_on 'Perform review'
expect(page.has_unchecked_field?(dcc_meter.mpan_mprn.to_s)).to be true
expect(page.has_link?("View meters")).to be true
end
it 'should link to the consent grant' do
click_on 'Perform review'
expect(page.has_link?(href: school_consent_grants_path(school))).to be true
end
it 'should require meters to be added' do
click_on 'Perform review'
click_on 'Complete review'
expect(page.has_text?("You must select at least one meter")).to be true
expect(MeterReview.count).to be 0
end
it 'completes the review' do
expect(DccGrantTrustedConsentsJob).to receive(:perform_later).with([dcc_meter])
click_on 'Perform review'
check dcc_meter.mpan_mprn.to_s
click_on 'Complete review'
expect(page).to have_content("Review was successfully recorded")
expect(MeterReview.count).to be 1
expect(MeterReview.first.user).to eql(admin)
expect(MeterReview.first.meters).to match([dcc_meter])
expect(MeterReview.first.consent_documents).to be_empty
end
context 'and documents are available' do
let!(:consent_document) { create(:consent_document, school: school, description: "Proof!", title: "Our Energy Bill") }
before(:each) do
click_on 'Perform review'
end
it 'should provide list of documents' do
expect(page.has_unchecked_field?(consent_document.title)).to be true
expect(page.has_link?("View documents")).to be true
end
it 'should allow a new bill to be requested' do
expect(page.has_link?("Request bill")).to be true
end
it 'should allow documents to be attached' do
check dcc_meter.mpan_mprn.to_s
check consent_document.title.to_s
click_on 'Complete review'
expect(MeterReview.first.consent_documents).to match([consent_document])
end
end
end
end
context 'when showing a review' do
let!(:meter_review) { create(:meter_review, school: school, user: admin) }
let!(:consent_document) { create(:consent_document, school: school, description: "Proof!", title: "Our Energy Bill") }
before(:each) do
meter_review.meters << dcc_meter
meter_review.consent_documents << consent_document
login_as(admin)
visit admin_school_meter_review_path(school, meter_review)
end
it 'should display user' do
expect(page.has_text?( meter_review.user.name )).to be true
end
it 'should list the meters' do
meter = meter_review.meters.first
expect(page.has_link?( meter.mpan_mprn.to_s )).to be true
end
it 'should link to consent documents' do
expect(page.has_link?( consent_document.title )).to be true
end
context 'when viewing meters' do
it 'provides a link to meter reviews' do
visit school_meters_path(school)
expect(page).to have_link("Meter reviews", href: admin_school_meter_reviews_path(school) )
end
end
end
end
| 31.400881 | 126 | 0.650814 |
6a3d5233a69e5d3e3cbff1e31a10f14973377725 | 7,336 | #!/usr/bin/env ruby
#----------------------------------------------------------------@@@ Copyright
# Copyright: Mike 'Fuzzy' Partin 2012
#----------------------------------------------------------------@@@ License
# Licensed under the BSD License (Any version you like)
#----------------------------------------------------------------@@@ Changelog
# 02/05/2012 Basic argument handling and inititial code structure coming
# together nicely.
# 02/05/2012 Have put together a rough draft of the Dd and Buffer classes that
# Should give us some configurable buffered I/O and let us do some
# adaption arguments for those variables later on.
# 02/05/2012 Everything but pipe= works at this point.
#----------------------------------------------------------------@@@ Roadmap
#----------------------------------------------------------------@@@ Requires
#----------------------------------------------------------------## Stdlib
require 'net/ftp' # Our HTTP/HTTPS/FTP support additions
require 'pty' # pipe= support
#----------------------------------------------------------------## 3rd Party
#----------------------------------------------------------------## Internal
#----------------------------------------------------------------@@@ Constants
VERSION = '1.0'
OPTS = {
:if => ARGF, # By default (STDIN or data given on the cmdline)
:of => STDOUT, # By default
:bs => 1048576, # 1M
:count => nil, # No defaults
:pipe => nil, # No defaults
:gauge => 1 # Defaults to on
}
#----------------------------------------------------------------@@@ Modules
module Format
module_function
def format_time(secs=nil)
return "#{secs}s"
end
module_function
def format_size(bytes=nil)
if bytes/1024 >= 1
kbytes = Float(bytes)/1024.00
if kbytes/1024 >= 1
mbytes = Float(kbytes)/1024.00
if mbytes/1024 >= 1
gbytes = Float(mbytes)/1024.00
retv = '%.02fTB' % Float(gbytes)/1024.00 if gbytes/1024 >= 1
retv = '%.02fGB' % gbytes
else
retv = '%.02fMB' % mbytes
end
else
retv = '%.02fKB' % kbytes
end
else
retv = '%dB' % bytes
end
return retv
end
end
#----------------------------------------------------------------@@@ Classes
class DD
include Format
def initialize(args={})
@OPTS = OPTS
# This next bit only enforces that we get only supported options given
# to us, and just ignores everything we don't know about entirely.
OPTS.keys.each {|i| @OPTS[i] = args[i] if args.keys.include? i}
end
def gauge(cur=nil, tot=nil)
if @OPTS[:gauge].to_i == 1 and ![cur, tot].include? nil
STDERR.write "%8s of %8s (%3d%%) \r" % [
format_size(cur),
format_size(tot),
(Float(cur) / Float(tot)) * 100
] if tot > 0
STDERR.write "%8s written \r" % format_size(cur) if tot == 0
STDERR.flush
end
end
def transfer
begin
cnt = 0
tot = 0
STDERR.sync = true
# Ok, now that the options and vars are all setup, lets take a few to
# worry about our output and it's setup.
@OPTS[:out] = File.open(@OPTS[:of], 'w+') if OPTS[:of]
# And now lets setup our input and gets shit rollin
if @OPTS[:if] =~ /^ftp:\/\/.*$/
if_split = @OPTS[:if].split('/')
info = {
:path => "/#{if_split[3..(if_split.size - 2)].join('/')}",
:file => @OPTS[:if].split('/')[(@OPTS[:if].split('/').size - 1)],
:host => @OPTS[:if].split('/')[2],
:user => 'ftp',
:pswd => '[email protected]'
}
if info[:host] =~ /^.*:.*@.*$/
info[:user], info[:pswd] = info[:host].split('@')[0].split(':')
tmp = info[:host].split('@')[1]
info[:host] = tmp
end
ftp = Net::FTP.open(info[:host], user=info[:user], passwd=info[:pswd])
ftp.passive=true
ftp.chdir(info[:path])
size = ftp.size(info[:file])
if @OPTS[:pipe]
begin
null = '/dev/null'
IO.popen(@OPTS[:pipe], 'w') do|tmp|
@OPTS[:out] = tmp
begin
ftp.getbinaryfile(info[:file], null, @OPTS[:bs].to_i) do|blk|
break if @OPTS[:count] and cnt == @OPTS[:count].to_i
@OPTS[:out].write(blk)
@OPTS[:out].flush
cnt += 1
tot += blk.size
gauge(tot, size)
end
end
end
end
else
ftp.getbinaryfile(info[:file], '/dev/null', @OPTS[:bs].to_i) do|blk|
break if @OPTS[:count] and cnt == @OPTS[:count].to_i
@OPTS[:out].write(blk)
cnt += 1
tot += blk.size
gauge(tot, size)
end
end
puts
elsif @OPTS[:if] =~ /^(\.|\/|\~|[a-z]+).*$/
@OPTS[:in] = File.open(@OPTS[:if], 'r')
size = @OPTS[:in].size
if @OPTS[:pipe]
begin
null = '/dev/null'
IO.popen(@OPTS[:pipe], 'w') do|tmp|
@OPTS[:out] = tmp
begin
while 1
break if @OPTS[:count] and cnt == @OPTS[:count].to_i
blk = @OPTS[:in].read(@OPTS[:bs])
break if !blk
@OPTS[:out].write(blk)
cnt += 1
tot += blk.size
gauge(tot, size)
end
end
end
end
else
while 1
break if @OPTS[:count] and cnt == @OPTS[:count].to_i
blk = @OPTS[:in].read(@OPTS[:bs])
break if !blk
@OPTS[:out].write(blk)
cnt += 1
tot += blk.size
gauge(tot, size)
end
end
puts
end
rescue NoMethodError => msg
puts msg
puts 'debug your shit yo'
exit
end
end
end
#----------------------------------------------------------------@@@ Methods
def usage
puts "\nAdd (Advanced dd) v#{VERSION}"
puts "\nUsage:"
puts "add [opts]"
puts "\nSupported options:"
puts 'if=<INPUT> (currently supports ftp/file/stdin: DEFAULT stdin)'
puts 'of=<OUTPUT> (supports file/stdout: DEFAULT stdout)'
puts 'bs=<BLOCK_SIZE> (default 1M)'
puts 'count=<ITERATIONS> (no defaults)'
puts 'pipe=<COMMAND> (no defaults)'
puts 'guage=(0|1)'
puts "\nExample:"
puts 'add if=ftp://10.0.0.1/src.tar.bz2 bs=1M pipe="tar -C /usr/src -jxf -"'
puts
end
#----------------------------------------------------------------@@@ Main Logic
ARGV.each do|arg|
if arg =~ /^.*help.*$/
usage
exit
elsif arg =~ /^.*=.*$/
key, val = arg.split('=')
OPTS[key.to_sym] = val if OPTS.keys.include? key.to_sym
end
end
# One final check, if OPTS[:pipe] is != nil, then OPTS[:of] must == nil as
# they are mutually exclusive. Pipe always overrides OPTS[:of]
OPTS[:of] = nil if OPTS[:pipe] != nil
begin
o = DD.new(OPTS)
o.transfer
rescue ArgumentError => msg
puts msg
usage
exit
rescue Interrupt
exit
end
#----------------------------------------------------------------@@@ End
| 30.823529 | 79 | 0.444929 |
1d531b6a47a1ae84b7c1b8255af573cb595034bc | 232 | def square(x)
#la palabra reservada return se utiliza solo para terminar la ejecucion
return 0 unless x.is_a? Integer
x * x
end
def saludar
puts "Hola desde un metodo"
end
puts square(3)
puts square("sas")
saludar | 17.846154 | 75 | 0.711207 |
4a4ea9875215455ea2830d7d0f75941eef7c85f9 | 251 | require 'rails_helper'
RSpec.describe DashboardController, type: :controller do
describe "GET #admin_dashboard" do
it "returns http success" do
get :admin_dashboard
expect(response).to have_http_status(:success)
end
end
end
| 19.307692 | 56 | 0.729084 |
e8cea05bdcee900fe9a8e45712bd3e5230b1d3d7 | 3,442 | # frozen_string_literal: true
::File.expand_path("../lib", __FILE__).tap do |directory|
$LOAD_PATH.include? directory or $LOAD_PATH.unshift directory
end
require "kitchen/terraform/version.rb"
require "rubygems"
version_parts_strings = RUBY_VERSION.split('.')
version_parts = []
version_parts_strings.each { |vp| version_parts.push(vp.to_i) }
if version_parts[0] > 2 ||
(version_parts[0] == 2 && version_parts[1] > 5)
version_gt_25 = true
else
version_gt_25 = false
end
::Gem::Specification.new do |specification|
specification.authors = ["Aaron Lane", "Nick Willever", "Kevin Dickerson", "Nell Shamrell-Harrington",
"Michael Glenney", "Walter Dolce", "Clay Thomas", "Erik R. Rygg", "Kyle Sexton",
"Ewa Czechowska", "Matt Long", "John Engelman", "Steven A. Burns", "David Begin",
"curleighbraces", "Austin Heiman", "Gary Foster", "Ed Bartholomew"]
specification.description = "kitchen-terraform is a set of Test Kitchen plugins for testing Terraform configuration"
specification.files = ::Dir.glob "{lib/**/*.rb,LICENSE,README.md}"
specification.name = "kitchen-terraform"
specification.summary = "Test Kitchen plugins for testing Terraform configuration"
::Kitchen::Terraform::Version.assign_specification_version specification: specification
specification.email = "[email protected]"
specification.homepage = "https://newcontext-oss.github.io/kitchen-terraform/"
specification.license = "Apache-2.0"
specification.add_development_dependency "bundler", "~> 2.0"
specification.add_development_dependency "guard-bundler", "~> 2.1"
specification.add_development_dependency "guard-rspec", "~> 4.7"
specification.add_development_dependency "guard-yard", "~> 2.2"
specification.add_development_dependency "guard", "~> 2.14"
specification.add_development_dependency "middleman-autoprefixer", "~> 2.7"
specification.add_development_dependency "middleman-favicon-maker", "~> 4.1"
specification.add_development_dependency "middleman-livereload", "~> 3.4"
specification.add_development_dependency "middleman-syntax", "~> 3.0"
specification.add_development_dependency "middleman", "~> 4.2"
specification.add_development_dependency "rspec", "~> 3.4"
specification.add_development_dependency "rufo", "~> 0.7"
specification.add_development_dependency "simplecov", "~> 0.16.1"
specification.add_development_dependency "travis", "~> 1.8"
specification.add_development_dependency "yard", "~> 0.9"
if version_gt_25
specification.add_development_dependency "reek", "~> 6.0.2"
else
puts 'version <= 2.5'
specification.add_development_dependency "reek", "~> 5.5"
end
specification.add_runtime_dependency "delegate", "~> 0.1.0"
specification.add_runtime_dependency "dry-validation", "~> 0.13"
specification.add_runtime_dependency "mixlib-shellout", "~> 3.0"
specification.add_runtime_dependency "inspec", ">= 3", "< 5"
specification.add_runtime_dependency "json", "~> 2.2"
specification.add_runtime_dependency "test-kitchen", "~> 2.1"
specification.add_runtime_dependency "tty-which", "~> 0.4.0"
specification.cert_chain = ["certs/gem-public_cert.pem"]
specification.required_ruby_version = [">= 2.4", "< 2.8"]
specification.requirements = ["Terraform >= v0.11.4, < v0.14.0"]
specification.signing_key = "certs/gem-private_key.pem" if $PROGRAM_NAME =~ /gem\z/
end
| 49.884058 | 118 | 0.728356 |
f8d55fc97c30829d1d12003127dadbe4b954aea0 | 947 | require 'spec_helper'
describe 'Groups (JavaScript fixtures)', type: :controller do
include JavaScriptFixturesHelpers
let(:admin) { create(:admin) }
let(:group) { create(:group, name: 'frontend-fixtures-group' )}
render_views
before(:all) do
clean_frontend_fixtures('groups/')
end
before do
group.add_maintainer(admin)
sign_in(admin)
end
describe GroupsController, '(JavaScript fixtures)', type: :controller do
it 'groups/edit.html.raw' do |example|
get :edit, params: { id: group }
expect(response).to be_success
store_frontend_fixture(response, example.description)
end
end
describe Groups::Settings::CiCdController, '(JavaScript fixtures)', type: :controller do
it 'groups/ci_cd_settings.html.raw' do |example|
get :show, params: { group_id: group }
expect(response).to be_success
store_frontend_fixture(response, example.description)
end
end
end
| 24.921053 | 90 | 0.703273 |
6a885e1ace012f29ab404d24b4693fbf83afe070 | 455 | class CreateAssets < ActiveRecord::Migration
def change
create_table :assets do |t|
t.string :attachment_file_name
t.integer :uploadable_id
t.string :uploadable_type, limit: 25
t.string :type, limit: 25
t.integer :position
t.text :description
t.timestamps
end
add_index :assets, :position
add_index :assets, [:uploadable_id, :uploadable_type, :type]
end
end | 25.277778 | 64 | 0.626374 |
6218528699f969481c66747277dcaf7162457bfd | 663 | require 'rubygems'
HAML_GEMSPEC = Gem::Specification.new do |spec|
spec.name = 'make_resourceful'
spec.summary = "An elegant, structured way to build ActionPack Controllers"
spec.version = File.read(File.dirname(__FILE__) + '/VERSION').strip
spec.authors = ['Hampton Catlin']
spec.email = '[email protected]'
spec.description = <<-END
Take back control of your Controllers. Make them awesome. Make them sleek. Make them resourceful.
END
spec.executables = []
spec.files = Dir['lib/**/*', 'Rakefile', "Readme.rdoc", "VERSION"]
spec.homepage = 'http://github.com/asee/make_resourceful'
spec.test_files = Dir['spec/**/*_spec.rb']
end
| 36.833333 | 103 | 0.702866 |
26ab91ea1cd1cf554ebca98d4435c08b375709f8 | 68 | class PermitController < ApplicationController
def index; end
end
| 17 | 46 | 0.823529 |
ac2836d1cf444450304291c4e90cba9fcf9c0bf9 | 3,930 | require File.expand_path('../../spec_helper', __FILE__)
describe ExternalReport do
let(:logging) { false }
let(:clazz) { FactoryBot.create(:portal_clazz, logging: logging) }
let(:offering) { FactoryBot.create(:portal_offering, {runnable: FactoryBot.create(:external_activity), clazz: clazz}) }
let(:external_report) { FactoryBot.create(:external_report, url: 'https://example.com?cool=true') }
let(:portal_teacher) { FactoryBot.create(:portal_teacher)}
let(:extra_params) { {} }
describe "#url_for_offering" do
subject { external_report.url_for_offering(offering, portal_teacher.user, 'https', 'perfect.host.com', extra_params) }
it "should handle report urls with parameters" do
expect(subject.scan('?').size).to eq(1)
expect(subject).to include('cool=true')
end
describe "when report type is `offering`" do
it "should include the correct parameters" do
expect(subject).to include('reportType=offering', 'offering=', 'classOfferings=', 'class=', 'token=', 'username=')
end
it "should have correctly escaped url params" do
uri = URI.parse(subject)
query_hash = Rack::Utils.parse_query(uri.query)
expect(query_hash['offering']).to start_with('https://')
expect(query_hash['classOfferings']).to start_with('https://')
expect(query_hash['class']).to start_with('https://')
end
describe "when extra params are provided" do
let(:external_activity) { FactoryBot.create(:external_activity) }
let(:investigation) { FactoryBot.create(:investigation) }
let(:activity) { FactoryBot.create(:activity) }
let(:offering) { FactoryBot.create(:portal_offering, {runnable: external_activity}) }
let(:learner) { FactoryBot.create(:full_portal_learner, {offering: offering }) }
let(:extra_params) { {activity_id: activity.id, student_id: learner.student.id} }
before(:each) do
investigation.activities << activity
external_activity.template = investigation
external_activity.save!
end
it "should include the correct parameters" do
expect(subject).to include('activityIndex=0', "studentId=#{learner.user.id}")
end
end
end
end
describe "#url_for_class" do
subject { external_report.url_for_class(offering.clazz, portal_teacher.user, 'https', 'perfect.host.com') }
it "should handle report urls with parameters" do
expect(subject.scan('?').size).to eq(1)
expect(subject).to include('cool=true')
end
it "should include the correct parameters" do
expect(subject).to include('reportType=class', 'classOfferings=',
'class=', 'token=', 'username=')
end
it "should have correctly escaped url params" do
uri = URI.parse(subject)
query_hash = Rack::Utils.parse_query(uri.query)
expect(query_hash['class']).to start_with('https://')
expect(query_hash['classOfferings']).to start_with('https://')
end
describe "with logging not enabled" do
let(:logging) { false }
it "should not include the logging parameter" do
expect(subject).not_to include('logging=')
end
end
describe "with logging enabled" do
let(:logging) { true }
it "should include the logging parameter" do
expect(subject).to include('logging=true')
end
end
end
# TODO: auto-generated
describe '#options_for_client' do
it 'options_for_client' do
external_report = described_class.new
result = external_report.options_for_client
expect(result).not_to be_nil
end
end
# TODO: auto-generated
describe '#options_for_report_type' do
it 'options_for_report_type' do
external_report = described_class.new
result = external_report.options_for_report_type
expect(result).not_to be_nil
end
end
end
| 36.055046 | 128 | 0.668702 |
ffe6f271fd4fe3b957fb04d788bdc0fcd83f6500 | 269 | require "formatador"
module Sakuvm
class Table
def self.display(title, table_data)
new.display(title, table_data)
end
def display(title, table_data)
Formatador.display_line(title)
Formatador.display_table(table_data)
end
end
end
| 17.933333 | 42 | 0.70632 |
080b0948a553337cb04e3388088b4834fc1b0215 | 527 | # frozen_string_literal: true
module Bunq
##
# https://doc.bunq.com/api/1/call/session-server
class SessionServers
def initialize(client)
@resource = Bunq::Resource.new(client, '/v1/session-server')
@api_key = client.configuration.api_key
end
##
# https://doc.bunq.com/api/1/call/session-server/method/post
def create
fail 'Cannot create session, please add the api_key to your configuration' unless @api_key
@resource.post({secret: @api_key})['Response']
end
end
end
| 25.095238 | 96 | 0.683112 |
bbbf32519af6c4aa70932acaa219b86fe197e52a | 948 | class Cms::Apis::UserFilesController < ApplicationController
include Cms::BaseFilter
include Cms::CrudFilter
include SS::FileFilter
include SS::AjaxFileFilter
model SS::UserFile
private
def fix_params
{ cur_user: @cur_user }
end
def set_items
@items ||= @model.user(@cur_user)
end
def set_item
set_items
@item ||= begin
item = @items.find(params[:id])
item.attributes = fix_params
item
end
rescue Mongoid::Errors::DocumentNotFound => e
return render_destroy(true) if params[:action] == 'destroy'
raise e
end
public
def index
set_items
@items = @items.
order_by(filename: 1).
page(params[:page]).per(20)
end
def select
select_with_clone
end
def selected_files
@select_ids = params[:select_ids].to_a
set_items
@items = @items.
in(id: @select_ids).
order_by(filename: 1)
render template: "index"
end
end
| 17.886792 | 63 | 0.652954 |
7a3808c3f90f820d3eea8b29152307a66b5d3d99 | 1,025 | cask 'datagrip' do
version '2019.3.2,193.6015.44'
sha256 '48854e2f5b29b14231b521dee0d28e4c04e0b7789242a8c2d8de86c07997d8db'
url "https://download.jetbrains.com/datagrip/datagrip-#{version.before_comma}.dmg"
appcast 'https://data.services.jetbrains.com/products/releases?code=DG&latest=true&type=release'
name 'DataGrip'
homepage 'https://www.jetbrains.com/datagrip/'
auto_updates true
app 'DataGrip.app'
uninstall_postflight do
ENV['PATH'].split(File::PATH_SEPARATOR).map { |path| File.join(path, 'datagrip') }.each { |path| File.delete(path) if File.exist?(path) && File.readlines(path).grep(%r{# see com.intellij.idea.SocketLock for the server side of this interface}).any? }
end
zap trash: [
"~/Library/Application Support/DataGrip#{version.major_minor}",
"~/Library/Caches/DataGrip#{version.major_minor}",
"~/Library/Logs/DataGrip#{version.major_minor}",
"~/Library/Preferences/DataGrip#{version.major_minor}",
]
end
| 41 | 253 | 0.696585 |
33e1001334e936355c937a2fdb8509de8c46870b | 498 | module BaseForm
extend ActiveSupport::Concern
included do
include ActiveModel::Validations
include ActiveModel::Validations::Callbacks
include ActiveModel::Conversion
extend ActiveModel::Naming
end
def persisted?
false
end
def submit(attributes = {})
attributes.each do |name, value|
send("#{name}=", value)
end
if valid?
valid_form_submitted if self.respond_to?(:valid_form_submitted)
true
else
false
end
end
end
| 16.6 | 69 | 0.674699 |
26a943d99c4736da31cc85463791cabc4df76763 | 2,038 |
#
# This file is part of the "Teapot" project, and is released under the MIT license.
#
teapot_version "3.0"
# Project Metadata
define_project "async" do |project|
project.title = "Async"
project.summary = 'Implements the reactor pattern using fibers.'
project.license = "MIT License"
project.add_author 'Samuel Williams', email: '[email protected]'
project.version = "1.0.0"
end
# Build Targets
define_target 'async-library' do |target|
target.depends "Language/C++14"
target.depends "Library/Memory"
target.depends "Library/Time", public: true
target.depends "Library/Concurrent", public: true
target.provides "Library/Async" do
source_root = target.package.path + 'source'
library_path = build static_library: "Async", source_files: source_root.glob('Async/**/*.cpp')
append linkflags library_path
append header_search_paths source_root
end
end
define_target "async-tests" do |target|
target.depends 'Library/UnitTest'
target.depends "Language/C++14", private: true
target.depends "Library/Async"
target.depends "Library/Parallel"
target.provides "Test/Async" do |*arguments|
test_root = target.package.path + 'test'
run source_files: test_root.glob('Async/**/*.cpp'), arguments: arguments
end
end
# Configurations
define_configuration "development" do |configuration|
configuration[:source] = "https://github.com/kurocha/"
configuration.import "async"
# Provides all the build related infrastructure:
configuration.require "platforms"
configuration.require "build-files"
# Provides unit testing infrastructure and generators:
configuration.require "unit-test"
# Provides some useful C++ generators:
configuration.require "generate-travis"
configuration.require "generate-project"
configuration.require "generate-cpp-class"
end
define_configuration "async" do |configuration|
configuration.public!
configuration.require "concurrent"
configuration.require "parallel"
configuration.require "time"
configuration.require "memory"
end
| 25.160494 | 96 | 0.759568 |
03df944623b30ebce682937f2c4dbda08762ab6f | 133 | class AddAdminToUsers < ActiveRecord::Migration[5.1]
def change
add_column :users, :admin, :boolean, default: :false
end
end
| 22.166667 | 56 | 0.729323 |
08b5d95000dd82dab0dd3cee2eac0e2410d1d9fd | 1,392 | # frozen_string_literal: true
require_relative "keys"
module TTY
class Reader
# Responsible for meta-data information about key pressed
#
# @api private
class Key < Struct.new(:name, :ctrl, :meta, :shift)
def initialize(*)
super(nil, false, false, false)
end
end
# Represents key event emitted during keyboard press
#
# @api public
class KeyEvent < Struct.new(:key, :value, :line)
# Create key event from read input codes
#
# @param [Hash[Symbol]] keys
# the keys and codes mapping
# @param [Array[Integer]] codes
#
# @return [KeyEvent]
#
# @api public
def self.from(keys, char, line = "")
key = Key.new
key.name = (name = keys[char]) ? name : :ignore
case char
when proc { |c| c =~ /^[a-z]{1}$/ }
key.name = :alpha
when proc { |c| c =~ /^[A-Z]{1}$/ }
key.name = :alpha
key.shift = true
when proc { |c| c =~ /^\d+$/ }
key.name = :num
when proc { |cs| !Keys.ctrl_keys[cs].nil? }
key.ctrl = true
end
new(key, char, line)
end
# Check if key event can be triggered
#
# @return [Boolean]
#
# @api public
def trigger?
!key.nil? && !key.name.nil?
end
end # KeyEvent
end # Reader
end # TTY
| 23.59322 | 61 | 0.51796 |
61beafc2f745bf9f6aa9e544e04dda27cf6cded9 | 11,196 | require './spec/factories'
module Seeding
class Seeder
class << self
def seed!
Db::Membership::Fee.destroy_all
Factories::Membership::Fee.mass_create!(range: (1..20), state: 'prepaid', cash: false)
Factories::Membership::Fee.mass_create!(range: (21..40), state: 'unpaid', cash: true)
Factories::Membership::Fee.mass_create!(range: (41..50), state: 'unpaid', cash: false)
Factories::Membership::Fee.mass_create!(range: (51..80), state: 'prepaid', cash: true)
Factories::Membership::Fee.mass_create!(range: (81..100), years: [2016, 2017], state: 'prepaid', cash: false)
Db::Profile.destroy_all
(1..80).step(1) do |i|
profile = Factories::Profile.create!(
first_name: Faker::Name.first_name,
last_name: Faker::Name.last_name,
email: Faker::Internet.unique.email,
phone: Faker::PhoneNumber.cell_phone,
position: 2.times.map { Db::Profile::POSITION.sample }.uniq,
acomplished_courses: 2.times.map { Db::Profile::ACOMPLISHED_COURSES.sample }.uniq,
sections: 2.times.map { Db::Profile::SECTIONS.sample }.uniq,
recommended_by: 3.times.map { Db::Profile::RECOMMENDED_BY.sample }.uniq,
cost: [100, 50, 150].sample,
added: [true, false].sample,
accepted: false,
main_discussion_group: [true, false].sample,
application_date: Faker::Date.between(from: 2.years.ago, to: Date.today),
birth_date: Faker::Date.birthday(min_age: 18, max_age: 65),
kw_id: nil,
city: Faker::Address.city,
birth_place: Faker::Address.city,
pesel: Faker::Code.ean,
postal_code: Faker::Address.postcode,
main_address: Faker::Address.street_address,
optional_address: Faker::Address.secondary_address
)
profile.create_payment(dotpay_id: SecureRandom.hex(13))
end
(81..100).step(1) do |i|
profile = Factories::Profile.create!(
first_name: Faker::Name.first_name,
last_name: Faker::Name.last_name,
email: Faker::Internet.unique.email,
phone: Faker::PhoneNumber.cell_phone,
position: 2.times.map { Db::Profile::POSITION.sample }.uniq,
acomplished_courses: 2.times.map { Db::Profile::ACOMPLISHED_COURSES.sample }.uniq,
sections: 2.times.map { Db::Profile::SECTIONS.sample }.uniq,
recommended_by: 3.times.map { Db::Profile::RECOMMENDED_BY.sample }.uniq,
cost: [100, 50, 150].sample,
added: [true, false].sample,
accepted: true,
main_discussion_group: [true, false].sample,
application_date: Faker::Date.between(from: 2.years.ago, to: Date.today),
birth_date: Faker::Date.birthday(min_age: 18, max_age: 65),
kw_id: i,
city: Faker::Address.city,
birth_place: Faker::Address.city,
pesel: Faker::Code.ean,
postal_code: Faker::Address.postcode,
main_address: Faker::Address.street_address,
optional_address: Faker::Address.secondary_address
)
profile.create_payment(dotpay_id: SecureRandom.hex(13), cash: false, state: 'prepaid')
end
Db::User.destroy_all
Db::Profile.where(kw_id: (81..100)).each do |profile|
user = Db::User.new(kw_id: profile.kw_id, first_name: profile.first_name, last_name: profile.last_name, email: profile.email, phone: profile.phone)
user.password = "test#{profile.id}"
user.save
fee = user.membership_fees.create year: Date.today.year, cost: 100, kw_id: user.kw_id, creator_id: user.id
fee.create_payment cash: true, state: 'prepaid', cash_user_id: Db::User.first.id
end
Factories::Profile.create!(
first_name: 'Dariusz',
last_name: 'Finster',
email: '[email protected]',
phone: Faker::PhoneNumber.cell_phone,
position: 2.times.map { Db::Profile::POSITION.sample }.uniq,
acomplished_courses: 2.times.map { Db::Profile::ACOMPLISHED_COURSES.sample }.uniq,
sections: 2.times.map { Db::Profile::SECTIONS.sample }.uniq,
recommended_by: 3.times.map { Db::Profile::RECOMMENDED_BY.sample }.uniq,
cost: [100, 50, 150].sample,
added: [true, false].sample,
accepted: true,
main_discussion_group: [true, false].sample,
application_date: Faker::Date.between(from: 2.years.ago, to: Date.today),
birth_date: Faker::Date.birthday(min_age: 18, max_age: 65),
kw_id: 2345,
city: Faker::Address.city,
birth_place: Faker::Address.city,
pesel: Faker::Code.ean,
postal_code: Faker::Address.postcode,
main_address: Faker::Address.street_address,
optional_address: Faker::Address.secondary_address
)
Factories::Profile.create!(
first_name: 'Piotr',
last_name: 'Podolski',
email: '[email protected]',
phone: Faker::PhoneNumber.cell_phone,
position: 2.times.map { Db::Profile::POSITION.sample }.uniq,
acomplished_courses: 2.times.map { Db::Profile::ACOMPLISHED_COURSES.sample }.uniq,
sections: 2.times.map { Db::Profile::SECTIONS.sample }.uniq,
recommended_by: 3.times.map { Db::Profile::RECOMMENDED_BY.sample }.uniq,
cost: [100, 50, 150].sample,
added: [true, false].sample,
accepted: true,
main_discussion_group: [true, false].sample,
application_date: Faker::Date.between(from: 2.years.ago, to: Date.today),
birth_date: Faker::Date.birthday(min_age: 18, max_age: 65),
kw_id: 3123,
city: Faker::Address.city,
birth_place: Faker::Address.city,
pesel: Faker::Code.ean,
postal_code: Faker::Address.postcode,
main_address: Faker::Address.street_address,
optional_address: Faker::Address.secondary_address
)
user1 = Db::User.new(
first_name: 'Małgorzata',
last_name: 'Kozak',
email: '[email protected]',
kw_id: 1720,
phone: Faker::PhoneNumber.cell_phone,
admin: true
)
user1.password = "test"
user1.save
user2 = Db::User.new(
first_name: 'Bartłomiej',
last_name: 'Klimas',
email: '[email protected]',
kw_id: 1720,
phone: Faker::PhoneNumber.cell_phone,
admin: true
)
user2.password = "test"
user2.save
user3 = Db::User.new(
first_name: 'Dariusz',
last_name: 'Finster',
email: '[email protected]',
kw_id: 2345,
phone: Faker::PhoneNumber.cell_phone,
roles: ['admin', 'office']
)
user3.password = "test"
user3.save
fee = user3.membership_fees.create year: Date.today.year, cost: 100, kw_id: user3.kw_id, creator_id: user3.id
fee.create_payment cash: true, state: 'prepaid', cash_user_id: Db::User.first.id
user4 = Db::User.new(
first_name: 'Piotr',
last_name: 'Podolski',
email: '[email protected]',
kw_id: 3123,
phone: Faker::PhoneNumber.cell_phone,
roles: ['admin', 'office']
)
user4.password = "test"
user4.save
fee = user4.membership_fees.create year: Date.today.year, cost: 100, kw_id: user4.kw_id, creator_id: user4.id
fee.create_payment cash: true, state: 'prepaid', cash_user_id: Db::User.first.id
Db::Item.destroy_all
(1..10).step(1) do |n|
Factories::Item.create!(id: n, display_name: Faker::Commerce.product_name, owner: ['snw', 'kw'].sample)
end
Db::Activities::MountainRoute.destroy_all
200.times do
Db::Activities::MountainRoute.create(
user: Db::User.all.sample,
name: Faker::Mountain.name,
description: Faker::Lorem.paragraph,
peak: Faker::Mountain.name,
area: Faker::Mountain.range,
mountains: Faker::Mountain.range,
colleagues: Db::User.all.sample(2),
difficulty: ['+IV', '6', 'M6', 'V'].sample,
partners: [Faker::Artist.name, Faker::Artist.name].to_sentence,
rating: [1, 2, 3].sample,
climbing_date: Faker::Date.backward(days: 23),
route_type: [0, 1].sample,
length: Faker::Number.within(range: 100..2500),
hidden: Faker::Boolean.boolean(true_ratio: 0.2),
training: Faker::Boolean.boolean(true_ratio: 0.3)
)
end
Management::News::InformationRecord.destroy_all
45.times do
Management::News::InformationRecord.create(
name: Faker::Lorem.sentence,
description: Faker::Lorem.paragraph,
url: [nil, Faker::Internet.url].sample,
news_type: [0, 1, 2].sample,
group_type: [0, 1, 2, 3].sample,
starred: Faker::Boolean.boolean(true_ratio: 0.2),
web: Faker::Boolean.boolean(true_ratio: 0.2)
)
end
Training::Supplementary::CourseRecord.destroy_all
100.times do
start_time = Faker::Time.between_dates(from: Date.today - 10, to: Date.today + 50, period: :afternoon)
Training::Supplementary::CourseRecord.create(
name: Faker::Lorem.sentence,
place: Faker::Space.constellation,
start_date: start_time,
end_date: [start_time, start_time + 3.days].sample,
organizator_id: Db::User.all.map(&:id).sample,
price_kw: 10,
application_date: start_time - 5.days,
accepted: true,
remarks: Faker::Lorem.paragraph,
category: [0, 1, 2].sample,
price: true,
limit: 10,
one_day: false,
active: true,
open: false,
last_fee_paid: false,
cash: false,
kind: [0, 1, 2, 3, 4, 5].sample,
state: [0, 1, 2, 3].sample,
payment_type: [0, 1].sample,
baner_type: [0, 1].sample,
expired_hours: 24,
end_application_date: start_time - 1.day
)
end
Management::ProjectRecord.destroy_all
50.times do
Management::ProjectRecord.create(
name: Faker::Lorem.sentence,
description: Faker::Lorem.paragraph,
coordinator_id: Db::User.all.map(&:id).sample,
needed_knowledge: Faker::Lorem.paragraph,
benefits: Faker::Lorem.paragraph,
estimated_time: Faker::Lorem.paragraph,
know_how: Faker::Lorem.paragraph,
users: Db::User.all.sample(2),
state: [:draft, :unassigned, :in_progress, :suspended, :archived].sample,
group_type: [:kw, :snw, :sww].sample
)
end
end
end
end
end
| 45.145161 | 157 | 0.585477 |
79bc249561ba0c4649b2519d4e665b88f1d75fb1 | 1,542 | class ActivemqCpp < Formula
desc "C++ API for message brokers such as Apache ActiveMQ"
homepage "https://activemq.apache.org/components/cms/"
url "https://www.apache.org/dyn/closer.lua?path=activemq/activemq-cpp/3.9.5/activemq-cpp-library-3.9.5-src.tar.bz2"
mirror "https://archive.apache.org/dist/activemq/activemq-cpp/3.9.5/activemq-cpp-library-3.9.5-src.tar.bz2"
sha256 "6bd794818ae5b5567dbdaeb30f0508cc7d03808a4b04e0d24695b2501ba70c15"
license "Apache-2.0"
revision 1
bottle do
sha256 cellar: :any, arm64_big_sur: "972d1a36b67866aa3181868044bce04ec8b70cc65e5ebf3e638d5b666c6585f5"
sha256 cellar: :any, big_sur: "7fed9bcc79042fde9d0c97ac83a6fb738523772c4247c828ddc4d6b1154db8fb"
sha256 cellar: :any, catalina: "c06d4253f9496b49b63c224637a97525b13ecb834884a3548adbdafe4dde0a73"
sha256 cellar: :any, mojave: "024bf1c2c3ef8e612180b9f82c98f854235e8e371e01210c142304a762a30b3c"
sha256 cellar: :any, high_sierra: "21855925e7e9ecfe125c959c84a6bce710ca409a2a33f4f8d396f45cc52a4ab9"
sha256 cellar: :any, sierra: "c994de229e86fb7e80c846d6f2b44acba306014f334ba65550c15102214dbcb8"
sha256 cellar: :any, x86_64_linux: "67179742b6afabbeb263b6b971fd7b432cd10d8567a38751586d910d8b4d9b7f" # linuxbrew-core
end
depends_on "pkg-config" => :build
depends_on "apr"
depends_on "[email protected]"
def install
system "./configure", "--prefix=#{prefix}"
system "make"
system "make", "install"
end
test do
system "#{bin}/activemqcpp-config", "--version"
end
end
| 45.352941 | 123 | 0.767834 |
1a55adbf5f1de0aa772cbfe714582883bae32128 | 2,910 | class Erlang < Formula
desc "Programming language for highly scalable real-time systems"
homepage "https://www.erlang.org/"
# Download tarball from GitHub; it is served faster than the official tarball.
url "https://github.com/erlang/otp/archive/OTP-22.3.tar.gz"
sha256 "886e6dbe1e4823c7e8d9c9c1ba8315075a1a9f7717f5a1eaf3b98345ca6c798e"
head "https://github.com/erlang/otp.git"
bottle do
cellar :any
sha256 "aeb18f87b3059b6256aba36957d3fb5c475f628c07aee49263003eccba4772f6" => :catalina
sha256 "86d816fcf632c71da3575edb78ef51e2160d39044a42203a011b0d193fe1a290" => :mojave
sha256 "5d10a00f5c377afd12975ad5c3e552fd1b94054bfa4168b137196488e4ab838a" => :high_sierra
end
depends_on "autoconf" => :build
depends_on "automake" => :build
depends_on "libtool" => :build
depends_on "[email protected]"
depends_on "wxmac" # for GUI apps like observer
uses_from_macos "m4" => :build
resource "man" do
url "https://www.erlang.org/download/otp_doc_man_22.2.tar.gz"
mirror "https://fossies.org/linux/misc/otp_doc_man_22.2.tar.gz"
sha256 "aad7e3795a44091aa33a460e3fdc94efe8757639caeba0b5ba7d79bd91c972b3"
end
resource "html" do
url "https://www.erlang.org/download/otp_doc_html_22.2.tar.gz"
mirror "https://fossies.org/linux/misc/otp_doc_html_22.2.tar.gz"
sha256 "09d41810d79fafde293feb48ebb249940eca6f9f5733abb235e37d06b8f482e3"
end
def install
# Work around Xcode 11 clang bug
# https://bitbucket.org/multicoreware/x265/issues/514/wrong-code-generated-on-macos-1015
ENV.append_to_cflags "-fno-stack-check" if DevelopmentTools.clang_build_version >= 1010
# Unset these so that building wx, kernel, compiler and
# other modules doesn't fail with an unintelligable error.
%w[LIBS FLAGS AFLAGS ZFLAGS].each { |k| ENV.delete("ERL_#{k}") }
# Do this if building from a checkout to generate configure
system "./otp_build", "autoconf" if File.exist? "otp_build"
args = %W[
--disable-debug
--disable-silent-rules
--prefix=#{prefix}
--enable-dynamic-ssl-lib
--enable-hipe
--enable-sctp
--enable-shared-zlib
--enable-smp-support
--enable-threads
--enable-wx
--with-ssl=#{Formula["[email protected]"].opt_prefix}
--without-javac
--enable-darwin-64bit
]
args << "--enable-kernel-poll" if MacOS.version > :el_capitan
args << "--with-dynamic-trace=dtrace" if MacOS::CLT.installed?
system "./configure", *args
system "make"
system "make", "install"
(lib/"erlang").install resource("man").files("man")
doc.install resource("html")
end
def caveats
<<~EOS
Man pages can be found in:
#{opt_lib}/erlang/man
Access them with `erl -man`, or add this directory to MANPATH.
EOS
end
test do
system "#{bin}/erl", "-noshell", "-eval", "crypto:start().", "-s", "init", "stop"
end
end
| 33.068182 | 93 | 0.698282 |
33f94928e36a72f549e0b1b994a8d70c2d4829cd | 470 | # # encoding: utf-8
# Inspec test for recipe artifactory::apache_proxy
# The Inspec reference, with examples and extensive documentation, can be
# found at http://inspec.io/docs/reference/resources/
unless os.windows?
# This is an example test, replace with your own test.
describe user('root'), :skip do
it { should exist }
end
end
# This is an example test, replace it with your own test.
describe port(80), :skip do
it { should_not be_listening }
end
| 24.736842 | 73 | 0.72766 |
3329971a782880c742fc9f3ec7f185923477f05b | 1,767 | # -*- ruby -*-
# encoding: utf-8
require File.expand_path("lib/google/cloud/memcache/v1beta2/version", __dir__)
Gem::Specification.new do |gem|
gem.name = "google-cloud-memcache-v1beta2"
gem.version = Google::Cloud::Memcache::V1beta2::VERSION
gem.authors = ["Google LLC"]
gem.email = "[email protected]"
gem.description = "Google Cloud Memorystore for Memcached API is used for creating and managing Memcached instances in GCP. Note that google-cloud-memcache-v1beta2 is a version-specific client library. For most uses, we recommend installing the main client library google-cloud-memcache instead. See the readme for more details."
gem.summary = "API Client library for the Google Cloud Memorystore for Memcached API"
gem.homepage = "https://github.com/googleapis/google-cloud-ruby"
gem.license = "Apache-2.0"
gem.platform = Gem::Platform::RUBY
gem.files = `git ls-files -- lib/*`.split("\n") +
`git ls-files -- proto_docs/*`.split("\n") +
["README.md", "LICENSE.md", "AUTHENTICATION.md", ".yardopts"]
gem.require_paths = ["lib"]
gem.required_ruby_version = ">= 2.4"
gem.add_dependency "gapic-common", "~> 0.3"
gem.add_dependency "google-cloud-errors", "~> 1.0"
gem.add_development_dependency "google-style", "~> 1.24.0"
gem.add_development_dependency "minitest", "~> 5.14"
gem.add_development_dependency "minitest-focus", "~> 1.1"
gem.add_development_dependency "minitest-rg", "~> 5.2"
gem.add_development_dependency "rake", ">= 12.0"
gem.add_development_dependency "redcarpet", "~> 3.0"
gem.add_development_dependency "simplecov", "~> 0.18"
gem.add_development_dependency "yard", "~> 0.9"
end
| 46.5 | 333 | 0.677985 |
f89361c49167929b792b39b70fc48706067597b8 | 3,718 | # encoding: UTF-8
module TZInfo
module Definitions
module America
module Argentina
module Cordoba
include TimezoneDefinition
timezone 'America/Argentina/Cordoba' do |tz|
tz.offset :o0, -15408, 0, :LMT
tz.offset :o1, -15408, 0, :CMT
tz.offset :o2, -14400, 0, :'-04'
tz.offset :o3, -14400, 3600, :'-03'
tz.offset :o4, -10800, 0, :'-03'
tz.offset :o5, -10800, 3600, :'-02'
tz.transition 1894, 10, :o1, 1447879607, 600
tz.transition 1920, 5, :o2, 1453467407, 600
tz.transition 1930, 12, :o3, 7278935, 3
tz.transition 1931, 4, :o2, 19411461, 8
tz.transition 1931, 10, :o3, 7279889, 3
tz.transition 1932, 3, :o2, 19414141, 8
tz.transition 1932, 11, :o3, 7281038, 3
tz.transition 1933, 3, :o2, 19417061, 8
tz.transition 1933, 11, :o3, 7282133, 3
tz.transition 1934, 3, :o2, 19419981, 8
tz.transition 1934, 11, :o3, 7283228, 3
tz.transition 1935, 3, :o2, 19422901, 8
tz.transition 1935, 11, :o3, 7284323, 3
tz.transition 1936, 3, :o2, 19425829, 8
tz.transition 1936, 11, :o3, 7285421, 3
tz.transition 1937, 3, :o2, 19428749, 8
tz.transition 1937, 11, :o3, 7286516, 3
tz.transition 1938, 3, :o2, 19431669, 8
tz.transition 1938, 11, :o3, 7287611, 3
tz.transition 1939, 3, :o2, 19434589, 8
tz.transition 1939, 11, :o3, 7288706, 3
tz.transition 1940, 3, :o2, 19437517, 8
tz.transition 1940, 7, :o3, 7289435, 3
tz.transition 1941, 6, :o2, 19441285, 8
tz.transition 1941, 10, :o3, 7290848, 3
tz.transition 1943, 8, :o2, 19447501, 8
tz.transition 1943, 10, :o3, 7293038, 3
tz.transition 1946, 3, :o2, 19455045, 8
tz.transition 1946, 10, :o3, 7296284, 3
tz.transition 1963, 10, :o2, 19506429, 8
tz.transition 1963, 12, :o3, 7315136, 3
tz.transition 1964, 3, :o2, 19507645, 8
tz.transition 1964, 10, :o3, 7316051, 3
tz.transition 1965, 3, :o2, 19510565, 8
tz.transition 1965, 10, :o3, 7317146, 3
tz.transition 1966, 3, :o2, 19513485, 8
tz.transition 1966, 10, :o3, 7318241, 3
tz.transition 1967, 4, :o2, 19516661, 8
tz.transition 1967, 10, :o3, 7319294, 3
tz.transition 1968, 4, :o2, 19519629, 8
tz.transition 1968, 10, :o3, 7320407, 3
tz.transition 1969, 4, :o2, 19522541, 8
tz.transition 1969, 10, :o4, 7321499, 3
tz.transition 1974, 1, :o5, 128142000
tz.transition 1974, 5, :o4, 136605600
tz.transition 1988, 12, :o5, 596948400
tz.transition 1989, 3, :o4, 605066400
tz.transition 1989, 10, :o5, 624423600
tz.transition 1990, 3, :o4, 636516000
tz.transition 1990, 10, :o5, 656478000
tz.transition 1991, 3, :o2, 667965600
tz.transition 1991, 10, :o5, 687931200
tz.transition 1992, 3, :o4, 699415200
tz.transition 1992, 10, :o5, 719377200
tz.transition 1993, 3, :o4, 731469600
tz.transition 1999, 10, :o3, 938919600
tz.transition 2000, 3, :o4, 952052400
tz.transition 2007, 12, :o5, 1198983600
tz.transition 2008, 3, :o4, 1205632800
tz.transition 2008, 10, :o5, 1224385200
tz.transition 2009, 3, :o4, 1237082400
end
end
end
end
end
end
| 43.741176 | 56 | 0.539268 |
acc421106e25838bc4b3b894056a6b411321d0ce | 171 | # frozen_string_literal: true
RSpec.describe RSpecJSONAPISerializer do
it "has a version number" do
expect(RSpecJSONAPISerializer::VERSION).not_to be nil
end
end
| 21.375 | 57 | 0.789474 |
e2b8cfa1f2ad92f816dd3c1b025de2b565bbc1fb | 265 | #!/usr/bin/ruby
require 'json'
runtime = JSON.parse(%x{xcrun simctl list devices --json})['runtimes']
.select{|x| (x['identifier'].include? 'com.apple.CoreSimulator.SimRuntime.iOS') &&
(x['availability'] == "(available)")}[0]["identifier"]
puts runtime
| 26.5 | 86 | 0.660377 |
e2c65bba804dbab8aa86ec02d82faeb2ef70ecda | 45,474 | //
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
[discriminatedType 'KnxNetIpMessage'
[implicit uint 8 'headerLength' '6']
[const uint 8 'protocolVersion' '0x10']
[discriminator uint 16 'msgType']
[implicit uint 16 'totalLength' 'lengthInBytes']
[typeSwitch 'msgType'
['0x0201' SearchRequest
[simple HPAIDiscoveryEndpoint 'hpaiIDiscoveryEndpoint']
]
['0x0202' SearchResponse
[simple HPAIControlEndpoint 'hpaiControlEndpoint']
[simple DIBDeviceInfo 'dibDeviceInfo']
[simple DIBSuppSvcFamilies 'dibSuppSvcFamilies']
]
['0x0203' DescriptionRequest
[simple HPAIControlEndpoint 'hpaiControlEndpoint']
]
['0x0204' DescriptionResponse
[simple DIBDeviceInfo 'dibDeviceInfo']
[simple DIBSuppSvcFamilies 'dibSuppSvcFamilies']
]
['0x0205' ConnectionRequest
[simple HPAIDiscoveryEndpoint 'hpaiDiscoveryEndpoint']
[simple HPAIDataEndpoint 'hpaiDataEndpoint']
[simple ConnectionRequestInformation 'connectionRequestInformation']
]
['0x0206' ConnectionResponse
[simple uint 8 'communicationChannelId']
[simple Status 'status']
[optional HPAIDataEndpoint 'hpaiDataEndpoint' 'status == Status.NO_ERROR']
[optional ConnectionResponseDataBlock 'connectionResponseDataBlock' 'status == Status.NO_ERROR']
]
['0x0207' ConnectionStateRequest
[simple uint 8 'communicationChannelId']
[reserved uint 8 '0x00']
[simple HPAIControlEndpoint 'hpaiControlEndpoint']
]
['0x0208' ConnectionStateResponse
[simple uint 8 'communicationChannelId']
[simple Status 'status']
]
['0x0209' DisconnectRequest
[simple uint 8 'communicationChannelId']
[reserved uint 8 '0x00']
[simple HPAIControlEndpoint 'hpaiControlEndpoint']
]
['0x020A' DisconnectResponse
[simple uint 8 'communicationChannelId']
[simple Status 'status']
]
['0x020B' UnknownMessage [uint 16 'totalLength']
[array int 8 'unknownData' count 'totalLength - 6']
]
['0x0310' DeviceConfigurationRequest [uint 16 'totalLength']
[simple DeviceConfigurationRequestDataBlock 'deviceConfigurationRequestDataBlock']
[simple CEMI 'cemi' ['totalLength - (6 + deviceConfigurationRequestDataBlock.lengthInBytes)']]
]
['0x0311' DeviceConfigurationAck
[simple DeviceConfigurationAckDataBlock 'deviceConfigurationAckDataBlock']
]
['0x0420' TunnelingRequest [uint 16 'totalLength']
[simple TunnelingRequestDataBlock 'tunnelingRequestDataBlock']
[simple CEMI 'cemi' ['totalLength - (6 + tunnelingRequestDataBlock.lengthInBytes)']]
]
['0x0421' TunnelingResponse
[simple TunnelingResponseDataBlock 'tunnelingResponseDataBlock']
]
['0x0530' RoutingIndication
]
]
]
[type 'HPAIDiscoveryEndpoint'
[implicit uint 8 'structureLength' 'lengthInBytes']
[simple HostProtocolCode 'hostProtocolCode']
[simple IPAddress 'ipAddress']
[simple uint 16 'ipPort']
]
[type 'HPAIControlEndpoint'
[implicit uint 8 'structureLength' 'lengthInBytes']
[simple HostProtocolCode 'hostProtocolCode']
[simple IPAddress 'ipAddress']
[simple uint 16 'ipPort']
]
[type 'DIBDeviceInfo'
[implicit uint 8 'structureLength' 'lengthInBytes']
[simple uint 8 'descriptionType']
[simple KnxMedium 'knxMedium']
[simple DeviceStatus 'deviceStatus']
[simple KnxAddress 'knxAddress']
[simple ProjectInstallationIdentifier 'projectInstallationIdentifier']
[array int 8 'knxNetIpDeviceSerialNumber' count '6']
[simple IPAddress 'knxNetIpDeviceMulticastAddress']
[simple MACAddress 'knxNetIpDeviceMacAddress']
[array int 8 'deviceFriendlyName' count '30']
]
[type 'DIBSuppSvcFamilies'
[implicit uint 8 'structureLength' 'lengthInBytes']
[simple uint 8 'descriptionType']
[array ServiceId 'serviceIds' length 'structureLength - 2']
]
[type 'HPAIDataEndpoint'
[implicit uint 8 'structureLength' 'lengthInBytes']
[simple HostProtocolCode 'hostProtocolCode']
[simple IPAddress 'ipAddress']
[simple uint 16 'ipPort']
]
[discriminatedType 'ConnectionRequestInformation'
[implicit uint 8 'structureLength' 'lengthInBytes']
[discriminator uint 8 'connectionType']
[typeSwitch 'connectionType'
['0x03' ConnectionRequestInformationDeviceManagement
]
['0x04' ConnectionRequestInformationTunnelConnection
[simple KnxLayer 'knxLayer']
[reserved uint 8 '0x00']
]
]
]
[discriminatedType 'ConnectionResponseDataBlock'
[implicit uint 8 'structureLength' 'lengthInBytes']
[discriminator uint 8 'connectionType']
[typeSwitch 'connectionType'
['0x03' ConnectionResponseDataBlockDeviceManagement
]
['0x04' ConnectionResponseDataBlockTunnelConnection
[simple KnxAddress 'knxAddress']
]
]
]
[type 'DeviceConfigurationRequestDataBlock'
[implicit uint 8 'structureLength' 'lengthInBytes']
[simple uint 8 'communicationChannelId']
[simple uint 8 'sequenceCounter']
[reserved uint 8 '0x00']
]
[type 'DeviceConfigurationAckDataBlock'
[implicit uint 8 'structureLength' 'lengthInBytes']
[simple uint 8 'communicationChannelId']
[simple uint 8 'sequenceCounter']
[simple Status 'status']
]
[type 'TunnelingRequestDataBlock'
[implicit uint 8 'structureLength' 'lengthInBytes']
[simple uint 8 'communicationChannelId']
[simple uint 8 'sequenceCounter']
[reserved uint 8 '0x00']
]
[type 'TunnelingResponseDataBlock'
[implicit uint 8 'structureLength' 'lengthInBytes']
[simple uint 8 'communicationChannelId']
[simple uint 8 'sequenceCounter']
[simple Status 'status']
]
[type 'IPAddress'
[array int 8 'addr' count '4']
]
[type 'MACAddress'
[array int 8 'addr' count '6']
]
[type 'KnxAddress'
[simple uint 4 'mainGroup']
[simple uint 4 'middleGroup']
[simple uint 8 'subGroup']
]
[type 'DeviceStatus'
[reserved uint 7 '0x00']
[simple bit 'programMode']
]
[type 'ProjectInstallationIdentifier'
[simple uint 8 'projectNumber']
[simple uint 8 'installationNumber']
]
[discriminatedType 'ServiceId'
[discriminator uint 8 'serviceType']
[typeSwitch 'serviceType'
['0x02' KnxNetIpCore
[simple uint 8 'version']
]
['0x03' KnxNetIpDeviceManagement
[simple uint 8 'version']
]
['0x04' KnxNetIpTunneling
[simple uint 8 'version']
]
['0x05' KnxNetIpRouting
[simple uint 8 'version']
]
// TODO: Check if this shouldn't be KnxNetIp instead of KnxNet
['0x06' KnxNetRemoteLogging
[simple uint 8 'version']
]
// TODO: Check if this shouldn't be KnxNetIp instead of KnxNet
['0x07' KnxNetRemoteConfigurationAndDiagnosis
[simple uint 8 'version']
]
// TODO: Check if this shouldn't be KnxNetIp instead of KnxNet
['0x08' KnxNetObjectServer
[simple uint 8 'version']
]
]
]
// The CEMI part is described in the document
// "03_06_03 EMI_IMI v01.03.03 AS" Page 6ff
// NOTE: When inspecting traffic in WireShark it seems they got the
// standard/extended frame thing wrong. When comparing to the spec most
// normal traffic is actually extended frames.
[discriminatedType 'CEMI' [uint 8 'size']
[discriminator uint 8 'messageCode']
[typeSwitch 'messageCode'
['0x2B' LBusmonInd
[simple uint 8 'additionalInformationLength']
[array CEMIAdditionalInformation 'additionalInformation' length 'additionalInformationLength']
[simple LDataFrame 'dataFrame']
[optional uint 8 'crc' 'dataFrame.notAckFrame']
]
// Page 72ff
['0x11' LDataReq
[simple uint 8 'additionalInformationLength']
[array CEMIAdditionalInformation 'additionalInformation' length 'additionalInformationLength']
[simple LDataFrame 'dataFrame']
]
['0x29' LDataInd
[simple uint 8 'additionalInformationLength']
[array CEMIAdditionalInformation 'additionalInformation' length 'additionalInformationLength']
[simple LDataFrame 'dataFrame']
]
['0x2E' LDataCon
[simple uint 8 'additionalInformationLength']
[array CEMIAdditionalInformation 'additionalInformation' length 'additionalInformationLength']
[simple LDataFrame 'dataFrame']
]
['0x10' LRawReq
]
['0x2D' LRawInd
]
['0x2F' LRawCon
]
['0x13' LPollDataReq
]
['0x25' LPollDataCon
]
['0x41' TDataConnectedReq
]
['0x89' TDataConnectedInd
]
['0x4A' TDataIndividualReq
]
['0x94' TDataIndividualInd
]
['0xFC' MPropReadReq
[simple uint 16 'interfaceObjectType']
[simple uint 8 'objectInstance']
[simple uint 8 'propertyId']
[simple uint 4 'numberOfElements']
[simple uint 12 'startIndex']
]
['0xFB' MPropReadCon
[simple uint 16 'interfaceObjectType']
[simple uint 8 'objectInstance']
[simple uint 8 'propertyId']
[simple uint 4 'numberOfElements']
[simple uint 12 'startIndex']
[simple uint 16 'unknown']
]
['0xF6' MPropWriteReq
]
['0xF5' MPropWriteCon
]
['0xF7' MPropInfoInd
]
['0xF8' MFuncPropCommandReq
]
['0xF9' MFuncPropStateReadReq
]
['0xFA' MFuncPropCon
]
['0xF1' MResetReq
]
['0xF0' MResetInd
]
]
]
[discriminatedType 'CEMIAdditionalInformation'
[discriminator uint 8 'additionalInformationType']
[typeSwitch 'additionalInformationType'
['0x03' CEMIAdditionalInformationBusmonitorInfo
[const uint 8 'len' '1']
[simple bit 'frameErrorFlag']
[simple bit 'bitErrorFlag']
[simple bit 'parityErrorFlag']
[simple bit 'unknownFlag']
[simple bit 'lostFlag']
[simple uint 3 'sequenceNumber']
]
['0x04' CEMIAdditionalInformationRelativeTimestamp
[const uint 8 'len' '2']
[simple RelativeTimestamp 'relativeTimestamp']
]
]
]
// The CEMI part is described in the document "03_06_03 EMI_IMI v01.03.03 AS" Page 73
// "03_02_02 Communication Medium TP1 v01.02.02 AS" Page 27
[discriminatedType 'LDataFrame'
[simple bit 'frameType']
[discriminator bit 'polling']
[simple bit 'notRepeated']
[discriminator bit 'notAckFrame']
[enum CEMIPriority 'priority']
[simple bit 'acknowledgeRequested']
[simple bit 'errorFlag']
// "03_02_02 Communication Medium TP1 v01.02.02 AS" Page 27
[typeSwitch 'notAckFrame','polling'
// Page 29ff
// TODO: For some reason it doesn't seem to matter what the frame format is set to, it always seems to be an extended frame
// ['true','false','false' LDataExtended
['true','false' LDataExtended
[simple bit 'groupAddress']
[simple uint 3 'hopCount']
[simple uint 4 'extendedFrameFormat']
[simple KnxAddress 'sourceAddress']
[array int 8 'destinationAddress' count '2']
[implicit uint 8 'dataLength' 'apdu.lengthInBytes - 1']
[simple Apdu 'apdu' ['dataLength']]
]
// Page 28ff
//['true','false','true' LDataStandard
// [simple KnxAddress 'sourceAddress']
// [array int 8 'destinationAddress' count '2']
// [simple bit 'groupAddress']
// [simple uint 3 'hopCount']
// [simple uint 4 'dataLength']
// [simple Apdu 'apdu' ['dataLength']]
//]
// Page 31ff
//['true','true','true' LPollData
['true','true' LPollData
[simple KnxAddress 'sourceAddress']
[array int 8 'targetAddress' count '2']
[reserved uint 4 '0x00']
[simple uint 6 'numberExpectedPollData']
]
['false' LDataFrameACK
// TODO: Implement this
]
]
]
[discriminatedType 'Apdu' [uint 8 'dataLength']
// 10_01 Logical Tag Extended v01.02.01 AS.pdf Page 74ff
[discriminator uint 1 'control']
[simple bit 'numbered']
[simple uint 4 'counter']
[typeSwitch 'control'
['1' ApduControlContainer
[simple ApduControl 'controlApdu']
]
['0' ApduDataContainer [uint 8 'dataLength']
[simple ApduData 'dataApdu' ['dataLength']]
]
]
]
[discriminatedType 'ApduControl'
[discriminator uint 2 'controlType']
[typeSwitch 'controlType'
['0x0' ApduControlConnect
]
['0x1' ApduControlDisconnect
]
['0x2' ApduControlAck
]
['0x3' ApduControlNack
]
]
]
[discriminatedType 'ApduData' [uint 8 'dataLength']
[discriminator uint 4 'apciType']
// 03_03_07 Application Layer v01.06.02 AS Page 9ff
[typeSwitch 'apciType'
['0x0' ApduDataGroupValueRead
[reserved uint 6 '0x00']
]
['0x1' ApduDataGroupValueResponse [uint 8 'dataLength']
[simple int 6 'dataFirstByte']
[array int 8 'data' count '(dataLength < 1) ? 0 : dataLength - 1']
]
['0x2' ApduDataGroupValueWrite [uint 8 'dataLength']
[simple int 6 'dataFirstByte']
[array int 8 'data' count '(dataLength < 1) ? 0 : dataLength - 1']
]
['0x3' ApduDataIndividualAddressWrite
]
['0x4' ApduDataIndividualAddressRead
]
['0x5' ApduDataIndividualAddressResponse
]
['0x6' ApduDataAdcRead
]
// In case of this type the following 6 bits contain more detailed information
['0x7' ApduDataAdcResponse
]
['0x8' ApduDataMemoryRead
[simple uint 6 'numBytes']
[simple uint 16 'address']
]
['0x9' ApduDataMemoryResponse
[implicit uint 6 'numBytes' 'COUNT(data)']
[simple uint 16 'address']
[array uint 8 'data' count 'numBytes']
]
['0xA' ApduDataMemoryWrite
]
// In case of this type the following 6 bits contain more detailed information
['0xB' ApduDataUserMessage
]
['0xC' ApduDataDeviceDescriptorRead
[simple uint 6 'descriptorType']
]
['0xD' ApduDataDeviceDescriptorResponse [uint 8 'dataLength']
[simple uint 6 'descriptorType']
[array int 8 'data' count '(dataLength < 1) ? 0 : dataLength - 1']
]
['0xE' ApduDataRestart
]
['0xF' ApduDataOther [uint 8 'dataLength']
[simple ApduDataExt 'extendedApdu' ['dataLength']]
]
]
]
// 03_03_07 Application Layer v01.06.02 AS Page 9ff
[discriminatedType 'ApduDataExt' [uint 8 'length']
[discriminator uint 6 'extApciType']
[typeSwitch 'extApciType'
['0x00' ApduDataExtOpenRoutingTableRequest
]
['0x01' ApduDataExtReadRoutingTableRequest
]
['0x02' ApduDataExtReadRoutingTableResponse
]
['0x03' ApduDataExtWriteRoutingTableRequest
]
['0x08' ApduDataExtReadRouterMemoryRequest
]
['0x09' ApduDataExtReadRouterMemoryResponse
]
['0x0A' ApduDataExtWriteRouterMemoryRequest
]
['0x0D' ApduDataExtReadRouterStatusRequest
]
['0x0E' ApduDataExtReadRouterStatusResponse
]
['0x0F' ApduDataExtWriteRouterStatusRequest
]
['0x10' ApduDataExtMemoryBitWrite
]
['0x11' ApduDataExtAuthorizeRequest
[simple uint 8 'level']
[array uint 8 'data' count '4']
]
['0x12' ApduDataExtAuthorizeResponse
[simple uint 8 'level']
]
['0x13' ApduDataExtKeyWrite
]
['0x14' ApduDataExtKeyResponse
]
['0x15' ApduDataExtPropertyValueRead
[simple uint 8 'objectIndex']
[simple uint 8 'propertyId']
[simple uint 4 'count']
[simple uint 12 'index']
]
['0x16' ApduDataExtPropertyValueResponse [uint 8 'length']
[simple uint 8 'objectIndex']
[simple uint 8 'propertyId']
[simple uint 4 'count']
[simple uint 12 'index']
[array uint 8 'data' count 'length - 5']
]
['0x17' ApduDataExtPropertyValueWrite [uint 8 'length']
[simple uint 8 'objectIndex']
[simple uint 8 'propertyId']
[simple uint 4 'count']
[simple uint 12 'index']
[array uint 8 'data' count 'length - 5']
]
['0x18' ApduDataExtPropertyDescriptionRead
[simple uint 8 'objectIndex']
[simple uint 8 'propertyId']
[simple uint 8 'index']
]
['0x19' ApduDataExtPropertyDescriptionResponse
[simple uint 8 'objectIndex']
[simple uint 8 'propertyId']
[simple uint 8 'index']
[simple bit 'writeEnabled']
[reserved uint 1 '0x0']
[simple KnxPropertyDataType 'propertyDataType']
[reserved uint 4 '0x0']
[simple uint 12 'maxNrOfElements']
[simple AccessLevel 'readLevel']
[simple AccessLevel 'writeLevel']
]
['0x1A' ApduDataExtNetworkParameterRead
]
['0x1B' ApduDataExtNetworkParameterResponse
]
['0x1C' ApduDataExtIndividualAddressSerialNumberRead
]
['0x1D' ApduDataExtIndividualAddressSerialNumberResponse
]
['0x1E' ApduDataExtIndividualAddressSerialNumberWrite
]
['0x20' ApduDataExtDomainAddressWrite
]
['0x21' ApduDataExtDomainAddressRead
]
['0x22' ApduDataExtDomainAddressResponse
]
['0x23' ApduDataExtDomainAddressSelectiveRead
]
['0x24' ApduDataExtNetworkParameterWrite
]
['0x25' ApduDataExtLinkRead
]
['0x26' ApduDataExtLinkResponse
]
['0x27' ApduDataExtLinkWrite
]
['0x28' ApduDataExtGroupPropertyValueRead
]
['0x29' ApduDataExtGroupPropertyValueResponse
]
['0x2A' ApduDataExtGroupPropertyValueWrite
]
['0x2B' ApduDataExtGroupPropertyValueInfoReport
]
['0x2C' ApduDataExtDomainAddressSerialNumberRead
]
['0x2D' ApduDataExtDomainAddressSerialNumberResponse
]
['0x2E' ApduDataExtDomainAddressSerialNumberWrite
]
['0x30' ApduDataExtFileStreamInfoReport
]
]
]
[type 'RelativeTimestamp'
[simple uint 16 'timestamp']
]
[discriminatedType 'KnxGroupAddress' [uint 2 'numLevels']
[typeSwitch 'numLevels'
['1' KnxGroupAddressFreeLevel
[simple uint 16 'subGroup']
]
['2' KnxGroupAddress2Level
[simple uint 5 'mainGroup']
[simple uint 11 'subGroup']
]
['3' KnxGroupAddress3Level
[simple uint 5 'mainGroup']
[simple uint 3 'middleGroup']
[simple uint 8 'subGroup']
]
]
]
[enum uint 2 'CEMIPriority'
['0x0' SYSTEM]
['0x1' NORMAL]
['0x2' URGENT]
['0x3' LOW]
]
[enum uint 8 'Status'
['0x00' NO_ERROR]
['0x01' PROTOCOL_TYPE_NOT_SUPPORTED]
['0x02' UNSUPPORTED_PROTOCOL_VERSION]
['0x04' OUT_OF_ORDER_SEQUENCE_NUMBER]
['0x21' INVALID_CONNECTION_ID]
['0x22' CONNECTION_TYPE_NOT_SUPPORTED]
['0x23' CONNECTION_OPTION_NOT_SUPPORTED]
['0x24' NO_MORE_CONNECTIONS]
['0x25' NO_MORE_UNIQUE_CONNECTIONS]
['0x26' DATA_CONNECTION]
['0x27' KNX_CONNECTION]
['0x29' TUNNELLING_LAYER_NOT_SUPPORTED]
]
[enum uint 8 'HostProtocolCode'
['0x01' IPV4_UDP]
['0x02' IPV4_TCP]
]
// The mode in which the connection should be established:
// TUNNEL_LINK_LAYER The gateway assigns a unique KNX address to the client.
// The client can then actively participate in communicating
// with other KNX devices.
// TUNNEL_RAW The gateway will just pass along the packets and not
// automatically generate Ack frames for the packets it
// receives for a given client.
// TUNNEL_BUSMONITOR The client becomes a passive participant and all frames
// on the KNX bus get forwarded to the client. Only one
// Busmonitor connection is allowed at any given time.
[enum uint 8 'KnxLayer'
['0x02' TUNNEL_LINK_LAYER]
['0x04' TUNNEL_RAW]
['0x80' TUNNEL_BUSMONITOR]
]
[enum uint 8 'KnxMedium'
['0x01' MEDIUM_RESERVED_1]
['0x02' MEDIUM_TP1]
['0x04' MEDIUM_PL110]
['0x08' MEDIUM_RESERVED_2]
['0x10' MEDIUM_RF]
['0x20' MEDIUM_KNX_IP]
]
[enum uint 8 'SupportedPhysicalMedia' [string 'description', bit 'knxSupport']
['0x00' OTHER ['used_for_undefined_physical_medium', 'true']]
['0x01' OIL_METER ['measures_volume_of_oil', 'true']]
['0x02' ELECTRICITY_METER ['measures_electric_energy', 'true']]
['0x03' GAS_METER ['measures_volume_of_gaseous_energy', 'true']]
['0x04' HEAT_METER ['heat_energy_measured_in_outlet_pipe', 'true']]
['0x05' STEAM_METER ['measures_weight_of_hot_steam', 'true']]
['0x06' WARM_WATER_METER ['measured_heated_water_volume', 'true']]
['0x07' WATER_METER ['measured_water_volume', 'true']]
['0x08' HEAT_COST_ALLOCATOR ['measured_relative_cumulated_heat_consumption', 'true']]
['0x09' COMPRESSED_AIR ['measures_weight_of_compressed_air', 'false']]
['0x0A' COOLING_LOAD_METER_INLET ['cooling_energy_measured_in_inlet_pipe', 'true']]
['0x0B' COOLING_LOAD_METER_OUTLET ['cooling_energy_measured_in_outlet_pipe', 'true']]
['0x0C' HEAT_INLET ['heat_energy_measured_in_inlet_pipe', 'true']]
['0x0D' HEAT_AND_COOL ['measures_both_heat_and_cool', 'true']]
['0x0E' BUS_OR_SYSTEM ['no_meter', 'false']]
['0x0F' UNKNOWN_DEVICE_TYPE ['used_for_undefined_physical_medium', 'false']]
['0x20' BREAKER ['status_of_electric_energy_supply', 'true']]
['0x21' VALVE ['status_of_supply_of_Gas_or_water', 'true']]
['0x28' WASTE_WATER_METER ['measured_volume_of_disposed_water', 'true']]
['0x29' GARBAGE ['measured_weight_of_disposed_rubbish', 'true']]
['0x37' RADIO_CONVERTER ['enables_the_radio_transmission_of_a_meter_without_a_radio_interface', 'false']]
]
// The definition of the constants for medium type in the device descriptor differs from that of the other parts
// 03_05_01 Resources v01.09.03 AS.pdf Page 22
[enum uint 4 'DeviceDescriptorMediumType'
['0x0' TP1 ]
['0x1' PL110 ]
['0x2' RF ]
['0x3' TP0 ]
['0x4' PL132 ]
['0x5' KNX_IP ]
]
// 03_05_01 Resources v01.09.03 AS.pdf Page 22
// REMARK: The last digit is intentionally set to 0 so this enum code can only be used as a mask.
[enum uint 16 'FirmwareType'
['0x0010' SYSTEM_1 ]
['0x0020' SYSTEM_2 ]
['0x0300' SYSTEM_300 ]
['0x0700' SYSTEM_7 ]
['0x07B0' SYSTEM_B ]
['0x0810' IR_DECODER ]
['0x0910' COUPLER ]
['0x0AF0' NONE ]
['0x10B0' SYSTEM_1_PL110 ]
['0x17B0' SYSTEM_B_PL110 ]
['0x1900' MEDIA_COUPLER_PL_TP ]
['0x2000' RF_BI_DIRECTIONAL_DEVICES ]
['0x2100' RF_UNI_DIRECTIONAL_DEVICES]
['0x3000' SYSTEM_1_TP0 ]
['0x4000' SYSTEM_1_PL132 ]
['0x5700' SYSTEM_7_KNX_NET_IP ]
]
// Helper enum that binds the combinations of medium type and firmware
// type to the pre-defined constants the spec defines
// 03_05_01 Resources v01.09.03 AS.pdf Page 22
[enum uint 16 'DeviceDescriptor' [DeviceDescriptorMediumType 'mediumType', FirmwareType 'firmwareType' ]
['0x0010' TP1_BCU_1_SYSTEM_1_0 ['DeviceDescriptorMediumType.TP1', 'FirmwareType.SYSTEM_1' ]]
['0x0011' TP1_BCU_1_SYSTEM_1_1 ['DeviceDescriptorMediumType.TP1', 'FirmwareType.SYSTEM_1' ]]
['0x0012' TP1_BCU_1_SYSTEM_1_2 ['DeviceDescriptorMediumType.TP1', 'FirmwareType.SYSTEM_1' ]]
['0x0013' TP1_BCU_1_SYSTEM_1_3 ['DeviceDescriptorMediumType.TP1', 'FirmwareType.SYSTEM_1' ]]
['0x0020' TP1_BCU_2_SYSTEM_2_0 ['DeviceDescriptorMediumType.TP1', 'FirmwareType.SYSTEM_2' ]]
['0x0021' TP1_BCU_2_SYSTEM_2_1 ['DeviceDescriptorMediumType.TP1', 'FirmwareType.SYSTEM_2' ]]
['0x0025' TP1_BCU_2_SYSTEM_2_5 ['DeviceDescriptorMediumType.TP1', 'FirmwareType.SYSTEM_2' ]]
['0x0300' TP1_SYSTEM_300 ['DeviceDescriptorMediumType.TP1', 'FirmwareType.SYSTEM_300' ]]
['0x0700' TP1_BIM_M112_0 ['DeviceDescriptorMediumType.TP1', 'FirmwareType.SYSTEM_7' ]]
['0x0701' TP1_BIM_M112_1 ['DeviceDescriptorMediumType.TP1', 'FirmwareType.SYSTEM_7' ]]
['0x0705' TP1_BIM_M112_5 ['DeviceDescriptorMediumType.TP1', 'FirmwareType.SYSTEM_7' ]]
['0x07B0' TP1_SYSTEM_B ['DeviceDescriptorMediumType.TP1', 'FirmwareType.SYSTEM_B' ]]
['0x0810' TP1_IR_DECODER_0 ['DeviceDescriptorMediumType.TP1', 'FirmwareType.IR_DECODER' ]]
['0x0811' TP1_IR_DECODER_1 ['DeviceDescriptorMediumType.TP1', 'FirmwareType.IR_DECODER' ]]
['0x0910' TP1_COUPLER_0 ['DeviceDescriptorMediumType.TP1', 'FirmwareType.COUPLER' ]]
['0x0911' TP1_COUPLER_1 ['DeviceDescriptorMediumType.TP1', 'FirmwareType.COUPLER' ]]
['0x0912' TP1_COUPLER_2 ['DeviceDescriptorMediumType.TP1', 'FirmwareType.COUPLER' ]]
['0x091A' TP1_KNXNETIP_ROUTER ['DeviceDescriptorMediumType.TP1', 'FirmwareType.COUPLER' ]]
['0x0AFD' TP1_NONE_D ['DeviceDescriptorMediumType.TP1', 'FirmwareType.NONE' ]]
['0x0AFE' TP1_NONE_E ['DeviceDescriptorMediumType.TP1', 'FirmwareType.NONE' ]]
['0x1012' PL110_BCU_1_2 ['DeviceDescriptorMediumType.PL110', 'FirmwareType.SYSTEM_1' ]]
['0x1013' PL110_BCU_1_3 ['DeviceDescriptorMediumType.PL110', 'FirmwareType.SYSTEM_1' ]]
['0x17B0' PL110_SYSTEM_B ['DeviceDescriptorMediumType.PL110', 'FirmwareType.SYSTEM_B' ]]
['0x1900' PL110_MEDIA_COUPLER_PL_TP ['DeviceDescriptorMediumType.PL110', 'FirmwareType.MEDIA_COUPLER_PL_TP' ]]
['0x2010' RF_BI_DIRECTIONAL_DEVICES ['DeviceDescriptorMediumType.RF', 'FirmwareType.RF_BI_DIRECTIONAL_DEVICES' ]]
['0x2110' RF_UNI_DIRECTIONAL_DEVICES['DeviceDescriptorMediumType.RF', 'FirmwareType.RF_UNI_DIRECTIONAL_DEVICES']]
['0x3012' TP0_BCU_1 ['DeviceDescriptorMediumType.TP0', 'FirmwareType.SYSTEM_1' ]]
['0x4012' PL132_BCU_1 ['DeviceDescriptorMediumType.PL132', 'FirmwareType.SYSTEM_1' ]]
['0x5705' KNX_IP_SYSTEM7 ['DeviceDescriptorMediumType.KNX_IP', 'FirmwareType.SYSTEM_7' ]]
]
[enum uint 4 'AccessLevel' [string 'purpose', bit 'needsAuthentication']
['0x0' Level0 ['"system manufacturer"', 'true' ]]
['0x1' Level1 ['"product manufacturer"', 'true' ]]
['0x2' Level2 ['"configuration"', 'true' ]]
['0x3' Level3 ['"end-user"', 'false' ]]
['0xF' Level15 ['"read access"', 'false' ]]
]
// 03_05_01 Resources v01.09.03 AS.pdf Page 23ff
[type 'DeviceDescriptorType2'
// Same manufacturer id as used elsewhere (Assigned by KNX Association)
[simple uint 16 'manufacturerId' ]
// Manufacturer specific device type id
[simple uint 16 'deviceType' ]
// Manufacturer specific device type version
[simple uint 8 'version' ]
// Indicates the Network Management procedures based on A_Link_Read-service are supported
[simple bit 'readSupported' ]
// Indicates the Network Management procedures based on A_Link_Write-service are supported
[simple bit 'writeSupported' ]
[simple uint 6 'logicalTagBase' ]
[simple ChannelInformation 'channelInfo1' ]
[simple ChannelInformation 'channelInfo2' ]
[simple ChannelInformation 'channelInfo3' ]
[simple ChannelInformation 'channelInfo4' ]
]
// 03_05_01 Resources v01.09.03 AS.pdf Page 24
[type 'ChannelInformation'
[simple uint 3 'numChannels']
[simple uint 13 'channelCode']
]
// Switch constants taken from the generated KnxPropertyDataType type
// Representation looked up in:
// - 03_05_01 Resources v01.09.03 AS.pdf
// - 03_07_03 Standardized Identifier Tables v01.03.01 AS.pdf
// - 03_07_02 Datapoint Types v01.08.02 AS.pdf
[dataIo 'KnxProperty' [KnxPropertyDataType 'propertyType', uint 8 'dataLengthInBytes']
[typeSwitch 'propertyType','dataLengthInBytes'
['KnxPropertyDataType.PDT_CONTROL' BOOL
[reserved uint 7 '0x00']
[simple bit 'value']
]
['KnxPropertyDataType.PDT_CHAR' SINT
[simple int 8 'value']
]
['KnxPropertyDataType.PDT_UNSIGNED_CHAR' USINT
[simple uint 8 'value']
]
['KnxPropertyDataType.PDT_INT' INT
[simple int 16 'value']
]
// On some systems this property is bigger
['KnxPropertyDataType.PDT_UNSIGNED_INT','4' UDINT
[simple uint 32 'value']
]
['KnxPropertyDataType.PDT_UNSIGNED_INT' UINT
[simple uint 16 'value']
]
['KnxPropertyDataType.PDT_KNX_FLOAT' REAL
[simple float 4.11 'value']
]
['KnxPropertyDataType.PDT_DATE' Struct
[reserved uint 3 '0x00']
[simple uint 5 'dayOfMonth']
[reserved uint 4 '0x00']
[simple uint 4 'month']
[reserved uint 1 '0x00']
[simple uint 7 'year'] ]
['KnxPropertyDataType.PDT_TIME' Struct
[simple uint 3 'day']
[simple uint 5 'hour']
[reserved uint 2 '0x00']
[simple uint 6 'minutes']
[reserved uint 2 '0x00']
[simple uint 6 'seconds']
]
['KnxPropertyDataType.PDT_LONG' DINT
[simple int 32 'value']
]
['KnxPropertyDataType.PDT_UNSIGNED_LONG' UDINT
[simple uint 32 'value']
]
['KnxPropertyDataType.PDT_FLOAT' REAL
[simple float 8.23 'value']
]
['KnxPropertyDataType.PDT_DOUBLE' LREAL
[simple float 11.52 'value']
]
['KnxPropertyDataType.PDT_CHAR_BLOCK' List
[array uint 8 'value' count '10']
]
['KnxPropertyDataType.PDT_POLL_GROUP_SETTINGS' Struct
[array uint 8 'groupAddress' count '2']
[simple bit 'disable']
[reserved uint 3 '0x0']
[simple uint 4 'pollingSoftNr']
]
['KnxPropertyDataType.PDT_SHORT_CHAR_BLOCK' List
[array uint 8 'value' count '5']
]
['KnxPropertyDataType.PDT_DATE_TIME' Struct
[simple uint 8 'year']
[reserved uint 4 '0x00']
[simple uint 4 'month']
[reserved uint 3 '0x00']
[simple uint 5 'dayofmonth']
[simple uint 3 'dayofweek']
[simple uint 5 'hourofday']
[reserved uint 2 '0x00']
[simple uint 6 'minutes']
[reserved uint 2 '0x00']
[simple uint 6 'seconds']
[simple bit 'fault']
[simple bit 'workingDay']
[simple bit 'noWd']
[simple bit 'noYear']
[simple bit 'noDate']
[simple bit 'noDayOfWeek']
[simple bit 'noTime']
[simple bit 'standardSummerTime']
[simple bit 'qualityOfClock']
[reserved uint 7 '0x00']
]
['KnxPropertyDataType.PDT_GENERIC_01' List
[array uint 8 'value' count '1']
]
['KnxPropertyDataType.PDT_GENERIC_02' List
[array uint 8 'value' count '2']
]
['KnxPropertyDataType.PDT_GENERIC_03' List
[array uint 8 'value' count '3']
]
['KnxPropertyDataType.PDT_GENERIC_04' List
[array uint 8 'value' count '4']
]
['KnxPropertyDataType.PDT_GENERIC_05' List
[array uint 8 'value' count '5']
]
['KnxPropertyDataType.PDT_GENERIC_06' List
[array uint 8 'value' count '6']
]
['KnxPropertyDataType.PDT_GENERIC_07' List
[array uint 8 'value' count '7']
]
['KnxPropertyDataType.PDT_GENERIC_08' List
[array uint 8 'value' count '8']
]
['KnxPropertyDataType.PDT_GENERIC_09' List
[array uint 8 'value' count '9']
]
['KnxPropertyDataType.PDT_GENERIC_10' List
[array uint 8 'value' count '10']
]
['KnxPropertyDataType.PDT_GENERIC_11' List
[array uint 8 'value' count '11']
]
['KnxPropertyDataType.PDT_GENERIC_12' List
[array uint 8 'value' count '12']
]
['KnxPropertyDataType.PDT_GENERIC_13' List
[array uint 8 'value' count '13']
]
['KnxPropertyDataType.PDT_GENERIC_14' List
[array uint 8 'value' count '14']
]
['KnxPropertyDataType.PDT_GENERIC_15' List
[array uint 8 'value' count '15']
]
['KnxPropertyDataType.PDT_GENERIC_16' List
[array uint 8 'value' count '16']
]
['KnxPropertyDataType.PDT_GENERIC_17' List
[array uint 8 'value' count '17']
]
['KnxPropertyDataType.PDT_GENERIC_18' List
[array uint 8 'value' count '18']
]
['KnxPropertyDataType.PDT_GENERIC_19' List
[array uint 8 'value' count '19']
]
['KnxPropertyDataType.PDT_GENERIC_20' List
[array uint 8 'value' count '20']
]
// Defaults to PDT_VARIABLE_LENGTH
//['KnxPropertyDataType.PDT_UTF_8'
//]
['KnxPropertyDataType.PDT_VERSION' Struct
[simple uint 5 'magicNumber']
[simple uint 5 'versionNumber']
[simple uint 6 'revisionNumber']
]
['KnxPropertyDataType.PDT_ALARM_INFO' Struct
[simple uint 8 'logNumber']
[simple uint 8 'alarmPriority']
[simple uint 8 'applicationArea']
[simple uint 8 'errorClass']
[reserved uint 4 '0x00']
[simple bit 'errorcodeSup']
[simple bit 'alarmtextSup']
[simple bit 'timestampSup']
[simple bit 'ackSup']
[reserved uint 5 '0x00']
[simple bit 'locked']
[simple bit 'alarmunack']
[simple bit 'inalarm']
]
['KnxPropertyDataType.PDT_BINARY_INFORMATION' BOOL
[reserved uint 7 '0x00']
[simple bit 'value']
]
['KnxPropertyDataType.PDT_BITSET8' List
[array bit 'value' count '8']
]
['KnxPropertyDataType.PDT_BITSET16' List
[array bit 'value' count '16']
]
['KnxPropertyDataType.PDT_ENUM8' USINT
[simple uint 8 'value']
]
['KnxPropertyDataType.PDT_SCALING' USINT
[simple uint 8 'value']
]
// Defaults to PDT_VARIABLE_LENGTH
//['KnxPropertyDataType.PDT_NE_VL'
//]
// Defaults to PDT_VARIABLE_LENGTH
//['KnxPropertyDataType.PDT_NE_FL'
//]
// Defaults to PDT_VARIABLE_LENGTH
//['KnxPropertyDataType.PDT_FUNCTION'
//]
// Defaults to PDT_VARIABLE_LENGTH
//['KnxPropertyDataType.PDT_ESCAPE'
//]
// 'KnxPropertyDataType.PDT_VARIABLE_LENGTH' == Catch all
[ List [uint 8 'dataLengthInBytes']
[array uint 8 'value' count 'dataLengthInBytes']
]
]
]
// 03_05_01 Resources v01.09.03 AS page 171
[enum uint 8 'ComObjectValueType' [uint 8 'sizeInBytes']
['0x00' BIT1 ['1']]
['0x01' BIT2 ['1']]
['0x02' BIT3 ['1']]
['0x03' BIT4 ['1']]
['0x04' BIT5 ['1']]
['0x05' BIT6 ['1']]
['0x06' BIT7 ['1']]
['0x07' BYTE1 ['1']]
['0x08' BYTE2 ['2']]
['0x09' BYTE3 ['3']]
['0x0A' BYTE4 ['4']]
['0x0B' BYTE6 ['6']]
['0x0C' BYTE8 ['8']]
['0x0D' BYTE10 ['10']]
['0x0E' BYTE14 ['14']]
]
[discriminatedType 'ComObjectTable' [FirmwareType 'firmwareType']
[typeSwitch 'firmwareType'
// The location of the Group Object Table - Realization Type 1 is calculated by
// adding 0x100 to the value of the resource 'Group Object Table Pointer', which
// is a single byte located at memory address 0x112
['FirmwareType.SYSTEM_1' ComObjectTableRealisationType1
[simple uint 8 'numEntries']
[simple uint 8 'ramFlagsTablePointer']
[array GroupObjectDescriptorRealisationType1 'comObjectDescriptors' count 'numEntries']
]
// The location of the Group Object Table - Realization Type 2 is calculated by
// adding 0x100 to the value of the resource 'Group Object Table Pointer', which
// is a single byte located at memory address 0x112
['FirmwareType.SYSTEM_2' ComObjectTableRealisationType2
[simple uint 8 'numEntries']
[simple uint 8 'ramFlagsTablePointer']
[array GroupObjectDescriptorRealisationType2 'comObjectDescriptors' count 'numEntries']
]
// The Group Object Table in Realization Type 6 is accessed via Properties instead of
// reading memory.
['FirmwareType.SYSTEM_300' ComObjectTableRealisationType6
// TODO: This probably needs to be changed to an array as soon as I know how to actually work with these types
[simple GroupObjectDescriptorRealisationType6 'comObjectDescriptors']
]
//['FirmwareType.SYSTEM_7' ComObjectTableRealisationType7
//]
]
]
// 03_05_01 Resources v01.09.03 AS page 168ff
[type 'GroupObjectDescriptorRealisationType1'
// Offset to the data (Also pay attention to the value of 'segmentSelectorEnable',
// if set to 'true' 0x100 has to be added to this value
[simple uint 8 'dataPointer']
[reserved uint 1 '0x1']
// The com object emits GroupValueWrites if the internal value changes
[simple bit 'transmitEnable']
// Additional information to the 'dataPointer', if set to 'true' 0x100 needs to be added to the address
[simple bit 'segmentSelectorEnable']
// The Com Object reacts to GroupValueWrite requests
[simple bit 'writeEnable']
// The Com Object reacts to GroupValueRead requests
[simple bit 'readEnable']
// Communication is generally enabled (If this is set to false, 'transmitEnable',
// 'writeEnable' and 'readEnable' are generally considered 'false'
[simple bit 'communicationEnable']
// Transmission priority
[simple CEMIPriority 'priority']
[simple ComObjectValueType 'valueType']
]
// 03_05_01 Resources v01.09.03 AS page 172ff
// It's generally identical to the type 1, but uses the reserved bit from type 1 as "updateEnable"
[type 'GroupObjectDescriptorRealisationType2'
// Offset to the data (Also pay attention to the value of 'segmentSelectorEnable',
// if set to 'true' 0x100 has to be added to this value
[simple uint 8 'dataPointer']
[simple bit 'updateEnable']
// The com object emits GroupValueWrites if the internal value changes
[simple bit 'transmitEnable']
// Additional information to the 'dataPointer', if set to 'true' 0x100 needs to be added to the address
[simple bit 'segmentSelectorEnable']
// The Com Object reacts to GroupValueWrite requests
[simple bit 'writeEnable']
// The Com Object reacts to GroupValueRead requests
[simple bit 'readEnable']
// Communication is generally enabled (If this is set to false, 'transmitEnable',
// 'writeEnable' and 'readEnable' are generally considered 'false'
[simple bit 'communicationEnable']
// Transmission priority
[simple CEMIPriority 'priority']
[simple ComObjectValueType 'valueType']
]
// 03_05_01 Resources v01.09.03 AS page 173ff
[type 'GroupObjectDescriptorRealisationType6'
// TODO: Implement
]
| 40.63807 | 137 | 0.569446 |
1c2d06451d99a604403d36299d4ddf0a2ddd4ff5 | 3,313 | require_relative '../../../../spec_helper'
describe 'govuk::app::envvar::redis', :type => :define do
let(:title) { 'giraffe' }
context 'with empty parameters' do
let(:params) { {} }
it 'sets the Redis host to 127.0.0.1 by default' do
is_expected.to contain_govuk__app__envvar("#{title}-redis_host")
.with_app(title)
.with_varname('REDIS_HOST')
.with_value('127.0.0.1')
end
it 'sets the Redis port to 6379 by default' do
is_expected.to contain_govuk__app__envvar("#{title}-redis_port")
.with_app(title)
.with_varname('REDIS_PORT')
.with_value('6379')
end
it 'sets a Redis URL with the default values' do
is_expected.to contain_govuk__app__envvar("#{title}-redis_url")
.with_app(title)
.with_varname('REDIS_URL')
.with_value('redis://127.0.0.1:6379')
end
end
context 'with some good parameters' do
let(:host) { 'redis.backend' }
let(:port) { '1234' }
let(:params) { { host: host, port: port } }
it 'sets the Redis host' do
is_expected.to contain_govuk__app__envvar("#{title}-redis_host")
.with_app(title)
.with_varname('REDIS_HOST')
.with_value(host)
end
it 'sets the Redis port' do
is_expected.to contain_govuk__app__envvar("#{title}-redis_port")
.with_app(title)
.with_varname('REDIS_PORT')
.with_value(port)
end
it 'sets a Redis URL' do
is_expected.to contain_govuk__app__envvar("#{title}-redis_url")
.with_app(title)
.with_varname('REDIS_URL')
.with_value('redis://redis.backend:1234')
end
end
context 'with a specific app' do
let(:app) { 'enclosure' }
let(:params) { { app: app } }
it 'uses that app when setting the host variable' do
is_expected.to contain_govuk__app__envvar("#{title}-redis_host")
.with_app(app)
end
it 'uses that app when setting the port variable' do
is_expected.to contain_govuk__app__envvar("#{title}-redis_port")
.with_app(app)
end
it 'uses that app when setting the url variable' do
is_expected.to contain_govuk__app__envvar("#{title}-redis_url")
.with_app(app)
end
end
context 'white a prefix' do
let(:params) { { prefix: 'zoo' } }
it 'adds a prefix to the host variable' do
is_expected.to contain_govuk__app__envvar("#{title}-zoo_redis_host")
.with_app(title)
.with_varname('ZOO_REDIS_HOST')
end
it 'adds a prefix to the port variable' do
is_expected.to contain_govuk__app__envvar("#{title}-zoo_redis_port")
.with_app(title)
.with_varname('ZOO_REDIS_PORT')
end
it 'adds a prefix to the url variable' do
is_expected.to contain_govuk__app__envvar("#{title}-zoo_redis_url")
.with_app(title)
.with_varname('ZOO_REDIS_URL')
end
end
end
| 33.13 | 74 | 0.561123 |
acbc2a4ec60d72895efc292cf8a237feb1223c68 | 2,434 | # frozen_string_literal: true
lib = File.expand_path("../lib", __FILE__)
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
require "sassc/version"
Gem::Specification.new do |spec|
spec.name = "sassc"
spec.version = SassC::VERSION
spec.authors = ["Ryan Boland"]
spec.email = ["[email protected]"]
spec.summary = "Use libsass with Ruby!"
spec.description = "Use libsass with Ruby!"
spec.homepage = "https://github.com/sass/sassc-ruby"
spec.license = "MIT"
spec.files = `git ls-files -z`.split("\x0")
spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) }
spec.test_files = spec.files.grep(%r{^(test|spec|features)/})
spec.required_ruby_version = ">= 2.0.0"
spec.require_paths = ["lib"]
spec.platform = Gem::Platform::RUBY
spec.extensions = ["ext/extconf.rb"]
spec.add_development_dependency "minitest", "~> 5.5.1"
spec.add_development_dependency "minitest-around"
spec.add_development_dependency "test_construct"
spec.add_development_dependency "pry"
spec.add_development_dependency "bundler"
spec.add_development_dependency "rake"
spec.add_development_dependency "rake-compiler"
spec.add_development_dependency "rake-compiler-dock"
spec.add_dependency "ffi", "~> 1.15"
gem_dir = File.expand_path(File.dirname(__FILE__)) + "/"
libsass_dir = File.join(gem_dir, 'ext', 'libsass')
if !File.directory?(libsass_dir) ||
# '.', '..', and possibly '.git' from a failed checkout:
Dir.entries(libsass_dir).size <= 3
Dir.chdir(__dir__) { system('git submodule update --init') } or
fail 'Could not fetch libsass'
end
# Write a VERSION file for non-binary gems (for `SassC::Native.version`).
if File.exist?(File.join(libsass_dir, '.git'))
libsass_version = Dir.chdir(libsass_dir) do
%x[git describe --abbrev=4 --dirty --always --tags].chomp
end
File.write(File.join(libsass_dir, 'VERSION'), libsass_version)
end
Dir.chdir(libsass_dir) do
submodule_relative_path = File.join('ext', 'libsass')
skip_re = %r{(^("?test|docs|script)/)|\.md$|\.yml$}
only_re = %r{\.[ch](pp)?$}
`git ls-files`.split($\).each do |filename|
next if filename =~ skip_re || filename !~ only_re
spec.files << File.join(submodule_relative_path, filename)
end
spec.files << File.join(submodule_relative_path, 'VERSION')
end
end
| 34.771429 | 75 | 0.668036 |
91a63344a8a9e491a9a21f71fac6c0c3d7622fa0 | 241,742 | # frozen_string_literal: true
# WARNING ABOUT GENERATED CODE
#
# This file is generated. See the contributing guide for more information:
# https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
#
# WARNING ABOUT GENERATED CODE
require 'seahorse/client/plugins/content_length.rb'
require 'aws-sdk-core/plugins/credentials_configuration.rb'
require 'aws-sdk-core/plugins/logging.rb'
require 'aws-sdk-core/plugins/param_converter.rb'
require 'aws-sdk-core/plugins/param_validator.rb'
require 'aws-sdk-core/plugins/user_agent.rb'
require 'aws-sdk-core/plugins/helpful_socket_errors.rb'
require 'aws-sdk-core/plugins/retry_errors.rb'
require 'aws-sdk-core/plugins/global_configuration.rb'
require 'aws-sdk-core/plugins/regional_endpoint.rb'
require 'aws-sdk-core/plugins/endpoint_discovery.rb'
require 'aws-sdk-core/plugins/endpoint_pattern.rb'
require 'aws-sdk-core/plugins/response_paging.rb'
require 'aws-sdk-core/plugins/stub_responses.rb'
require 'aws-sdk-core/plugins/idempotency_token.rb'
require 'aws-sdk-core/plugins/jsonvalue_converter.rb'
require 'aws-sdk-core/plugins/client_metrics_plugin.rb'
require 'aws-sdk-core/plugins/client_metrics_send_plugin.rb'
require 'aws-sdk-core/plugins/transfer_encoding.rb'
require 'aws-sdk-core/plugins/http_checksum.rb'
require 'aws-sdk-core/plugins/signature_v4.rb'
require 'aws-sdk-core/plugins/protocols/json_rpc.rb'
Aws::Plugins::GlobalConfiguration.add_identifier(:rekognition)
module Aws::Rekognition
# An API client for Rekognition. To construct a client, you need to configure a `:region` and `:credentials`.
#
# client = Aws::Rekognition::Client.new(
# region: region_name,
# credentials: credentials,
# # ...
# )
#
# For details on configuring region and credentials see
# the [developer guide](/sdk-for-ruby/v3/developer-guide/setup-config.html).
#
# See {#initialize} for a full list of supported configuration options.
class Client < Seahorse::Client::Base
include Aws::ClientStubs
@identifier = :rekognition
set_api(ClientApi::API)
add_plugin(Seahorse::Client::Plugins::ContentLength)
add_plugin(Aws::Plugins::CredentialsConfiguration)
add_plugin(Aws::Plugins::Logging)
add_plugin(Aws::Plugins::ParamConverter)
add_plugin(Aws::Plugins::ParamValidator)
add_plugin(Aws::Plugins::UserAgent)
add_plugin(Aws::Plugins::HelpfulSocketErrors)
add_plugin(Aws::Plugins::RetryErrors)
add_plugin(Aws::Plugins::GlobalConfiguration)
add_plugin(Aws::Plugins::RegionalEndpoint)
add_plugin(Aws::Plugins::EndpointDiscovery)
add_plugin(Aws::Plugins::EndpointPattern)
add_plugin(Aws::Plugins::ResponsePaging)
add_plugin(Aws::Plugins::StubResponses)
add_plugin(Aws::Plugins::IdempotencyToken)
add_plugin(Aws::Plugins::JsonvalueConverter)
add_plugin(Aws::Plugins::ClientMetricsPlugin)
add_plugin(Aws::Plugins::ClientMetricsSendPlugin)
add_plugin(Aws::Plugins::TransferEncoding)
add_plugin(Aws::Plugins::HttpChecksum)
add_plugin(Aws::Plugins::SignatureV4)
add_plugin(Aws::Plugins::Protocols::JsonRpc)
# @overload initialize(options)
# @param [Hash] options
# @option options [required, Aws::CredentialProvider] :credentials
# Your AWS credentials. This can be an instance of any one of the
# following classes:
#
# * `Aws::Credentials` - Used for configuring static, non-refreshing
# credentials.
#
# * `Aws::SharedCredentials` - Used for loading static credentials from a
# shared file, such as `~/.aws/config`.
#
# * `Aws::AssumeRoleCredentials` - Used when you need to assume a role.
#
# * `Aws::AssumeRoleWebIdentityCredentials` - Used when you need to
# assume a role after providing credentials via the web.
#
# * `Aws::SSOCredentials` - Used for loading credentials from AWS SSO using an
# access token generated from `aws login`.
#
# * `Aws::ProcessCredentials` - Used for loading credentials from a
# process that outputs to stdout.
#
# * `Aws::InstanceProfileCredentials` - Used for loading credentials
# from an EC2 IMDS on an EC2 instance.
#
# * `Aws::ECSCredentials` - Used for loading credentials from
# instances running in ECS.
#
# * `Aws::CognitoIdentityCredentials` - Used for loading credentials
# from the Cognito Identity service.
#
# When `:credentials` are not configured directly, the following
# locations will be searched for credentials:
#
# * `Aws.config[:credentials]`
# * The `:access_key_id`, `:secret_access_key`, and `:session_token` options.
# * ENV['AWS_ACCESS_KEY_ID'], ENV['AWS_SECRET_ACCESS_KEY']
# * `~/.aws/credentials`
# * `~/.aws/config`
# * EC2/ECS IMDS instance profile - When used by default, the timeouts
# are very aggressive. Construct and pass an instance of
# `Aws::InstanceProfileCredentails` or `Aws::ECSCredentials` to
# enable retries and extended timeouts.
#
# @option options [required, String] :region
# The AWS region to connect to. The configured `:region` is
# used to determine the service `:endpoint`. When not passed,
# a default `:region` is searched for in the following locations:
#
# * `Aws.config[:region]`
# * `ENV['AWS_REGION']`
# * `ENV['AMAZON_REGION']`
# * `ENV['AWS_DEFAULT_REGION']`
# * `~/.aws/credentials`
# * `~/.aws/config`
#
# @option options [String] :access_key_id
#
# @option options [Boolean] :active_endpoint_cache (false)
# When set to `true`, a thread polling for endpoints will be running in
# the background every 60 secs (default). Defaults to `false`.
#
# @option options [Boolean] :adaptive_retry_wait_to_fill (true)
# Used only in `adaptive` retry mode. When true, the request will sleep
# until there is sufficent client side capacity to retry the request.
# When false, the request will raise a `RetryCapacityNotAvailableError` and will
# not retry instead of sleeping.
#
# @option options [Boolean] :client_side_monitoring (false)
# When `true`, client-side metrics will be collected for all API requests from
# this client.
#
# @option options [String] :client_side_monitoring_client_id ("")
# Allows you to provide an identifier for this client which will be attached to
# all generated client side metrics. Defaults to an empty string.
#
# @option options [String] :client_side_monitoring_host ("127.0.0.1")
# Allows you to specify the DNS hostname or IPv4 or IPv6 address that the client
# side monitoring agent is running on, where client metrics will be published via UDP.
#
# @option options [Integer] :client_side_monitoring_port (31000)
# Required for publishing client metrics. The port that the client side monitoring
# agent is running on, where client metrics will be published via UDP.
#
# @option options [Aws::ClientSideMonitoring::Publisher] :client_side_monitoring_publisher (Aws::ClientSideMonitoring::Publisher)
# Allows you to provide a custom client-side monitoring publisher class. By default,
# will use the Client Side Monitoring Agent Publisher.
#
# @option options [Boolean] :convert_params (true)
# When `true`, an attempt is made to coerce request parameters into
# the required types.
#
# @option options [Boolean] :correct_clock_skew (true)
# Used only in `standard` and adaptive retry modes. Specifies whether to apply
# a clock skew correction and retry requests with skewed client clocks.
#
# @option options [Boolean] :disable_host_prefix_injection (false)
# Set to true to disable SDK automatically adding host prefix
# to default service endpoint when available.
#
# @option options [String] :endpoint
# The client endpoint is normally constructed from the `:region`
# option. You should only configure an `:endpoint` when connecting
# to test or custom endpoints. This should be a valid HTTP(S) URI.
#
# @option options [Integer] :endpoint_cache_max_entries (1000)
# Used for the maximum size limit of the LRU cache storing endpoints data
# for endpoint discovery enabled operations. Defaults to 1000.
#
# @option options [Integer] :endpoint_cache_max_threads (10)
# Used for the maximum threads in use for polling endpoints to be cached, defaults to 10.
#
# @option options [Integer] :endpoint_cache_poll_interval (60)
# When :endpoint_discovery and :active_endpoint_cache is enabled,
# Use this option to config the time interval in seconds for making
# requests fetching endpoints information. Defaults to 60 sec.
#
# @option options [Boolean] :endpoint_discovery (false)
# When set to `true`, endpoint discovery will be enabled for operations when available.
#
# @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default)
# The log formatter.
#
# @option options [Symbol] :log_level (:info)
# The log level to send messages to the `:logger` at.
#
# @option options [Logger] :logger
# The Logger instance to send log messages to. If this option
# is not set, logging will be disabled.
#
# @option options [Integer] :max_attempts (3)
# An integer representing the maximum number attempts that will be made for
# a single request, including the initial attempt. For example,
# setting this value to 5 will result in a request being retried up to
# 4 times. Used in `standard` and `adaptive` retry modes.
#
# @option options [String] :profile ("default")
# Used when loading credentials from the shared credentials file
# at HOME/.aws/credentials. When not specified, 'default' is used.
#
# @option options [Proc] :retry_backoff
# A proc or lambda used for backoff. Defaults to 2**retries * retry_base_delay.
# This option is only used in the `legacy` retry mode.
#
# @option options [Float] :retry_base_delay (0.3)
# The base delay in seconds used by the default backoff function. This option
# is only used in the `legacy` retry mode.
#
# @option options [Symbol] :retry_jitter (:none)
# A delay randomiser function used by the default backoff function.
# Some predefined functions can be referenced by name - :none, :equal, :full,
# otherwise a Proc that takes and returns a number. This option is only used
# in the `legacy` retry mode.
#
# @see https://www.awsarchitectureblog.com/2015/03/backoff.html
#
# @option options [Integer] :retry_limit (3)
# The maximum number of times to retry failed requests. Only
# ~ 500 level server errors and certain ~ 400 level client errors
# are retried. Generally, these are throttling errors, data
# checksum errors, networking errors, timeout errors, auth errors,
# endpoint discovery, and errors from expired credentials.
# This option is only used in the `legacy` retry mode.
#
# @option options [Integer] :retry_max_delay (0)
# The maximum number of seconds to delay between retries (0 for no limit)
# used by the default backoff function. This option is only used in the
# `legacy` retry mode.
#
# @option options [String] :retry_mode ("legacy")
# Specifies which retry algorithm to use. Values are:
#
# * `legacy` - The pre-existing retry behavior. This is default value if
# no retry mode is provided.
#
# * `standard` - A standardized set of retry rules across the AWS SDKs.
# This includes support for retry quotas, which limit the number of
# unsuccessful retries a client can make.
#
# * `adaptive` - An experimental retry mode that includes all the
# functionality of `standard` mode along with automatic client side
# throttling. This is a provisional mode that may change behavior
# in the future.
#
#
# @option options [String] :secret_access_key
#
# @option options [String] :session_token
#
# @option options [Boolean] :simple_json (false)
# Disables request parameter conversion, validation, and formatting.
# Also disable response data type conversions. This option is useful
# when you want to ensure the highest level of performance by
# avoiding overhead of walking request parameters and response data
# structures.
#
# When `:simple_json` is enabled, the request parameters hash must
# be formatted exactly as the DynamoDB API expects.
#
# @option options [Boolean] :stub_responses (false)
# Causes the client to return stubbed responses. By default
# fake responses are generated and returned. You can specify
# the response data to return or errors to raise by calling
# {ClientStubs#stub_responses}. See {ClientStubs} for more information.
#
# ** Please note ** When response stubbing is enabled, no HTTP
# requests are made, and retries are disabled.
#
# @option options [Boolean] :validate_params (true)
# When `true`, request parameters are validated before
# sending the request.
#
# @option options [URI::HTTP,String] :http_proxy A proxy to send
# requests through. Formatted like 'http://proxy.com:123'.
#
# @option options [Float] :http_open_timeout (15) The number of
# seconds to wait when opening a HTTP session before raising a
# `Timeout::Error`.
#
# @option options [Integer] :http_read_timeout (60) The default
# number of seconds to wait for response data. This value can
# safely be set per-request on the session.
#
# @option options [Float] :http_idle_timeout (5) The number of
# seconds a connection is allowed to sit idle before it is
# considered stale. Stale connections are closed and removed
# from the pool before making a request.
#
# @option options [Float] :http_continue_timeout (1) The number of
# seconds to wait for a 100-continue response before sending the
# request body. This option has no effect unless the request has
# "Expect" header set to "100-continue". Defaults to `nil` which
# disables this behaviour. This value can safely be set per
# request on the session.
#
# @option options [Boolean] :http_wire_trace (false) When `true`,
# HTTP debug output will be sent to the `:logger`.
#
# @option options [Boolean] :ssl_verify_peer (true) When `true`,
# SSL peer certificates are verified when establishing a
# connection.
#
# @option options [String] :ssl_ca_bundle Full path to the SSL
# certificate authority bundle file that should be used when
# verifying peer certificates. If you do not pass
# `:ssl_ca_bundle` or `:ssl_ca_directory` the the system default
# will be used if available.
#
# @option options [String] :ssl_ca_directory Full path of the
# directory that contains the unbundled SSL certificate
# authority files for verifying peer certificates. If you do
# not pass `:ssl_ca_bundle` or `:ssl_ca_directory` the the
# system default will be used if available.
#
def initialize(*args)
super
end
# @!group API Operations
# Compares a face in the *source* input image with each of the 100
# largest faces detected in the *target* input image.
#
# <note markdown="1"> If the source image contains multiple faces, the service detects the
# largest face and compares it with each face detected in the target
# image.
#
# </note>
#
# You pass the input and target images either as base64-encoded image
# bytes or as references to images in an Amazon S3 bucket. If you use
# the AWS CLI to call Amazon Rekognition operations, passing image bytes
# isn't supported. The image must be formatted as a PNG or JPEG file.
#
# In response, the operation returns an array of face matches ordered by
# similarity score in descending order. For each face match, the
# response provides a bounding box of the face, facial landmarks, pose
# details (pitch, role, and yaw), quality (brightness and sharpness),
# and confidence value (indicating the level of confidence that the
# bounding box contains a face). The response also provides a similarity
# score, which indicates how closely the faces match.
#
# <note markdown="1"> By default, only faces with a similarity score of greater than or
# equal to 80% are returned in the response. You can change this value
# by specifying the `SimilarityThreshold` parameter.
#
# </note>
#
# `CompareFaces` also returns an array of faces that don't match the
# source image. For each face, it returns a bounding box, confidence
# value, landmarks, pose details, and quality. The response also returns
# information about the face in the source image, including the bounding
# box of the face and confidence value.
#
# The `QualityFilter` input parameter allows you to filter out detected
# faces that don’t meet a required quality bar. The quality bar is based
# on a variety of common use cases. Use `QualityFilter` to set the
# quality bar by specifying `LOW`, `MEDIUM`, or `HIGH`. If you do not
# want to filter detected faces, specify `NONE`. The default value is
# `NONE`.
#
# If the image doesn't contain Exif metadata, `CompareFaces` returns
# orientation information for the source and target images. Use these
# values to display the images with the correct image orientation.
#
# If no faces are detected in the source or target images,
# `CompareFaces` returns an `InvalidParameterException` error.
#
# <note markdown="1"> This is a stateless API operation. That is, data returned by this
# operation doesn't persist.
#
# </note>
#
# For an example, see Comparing Faces in Images in the Amazon
# Rekognition Developer Guide.
#
# This operation requires permissions to perform the
# `rekognition:CompareFaces` action.
#
# @option params [required, Types::Image] :source_image
# The input image as base64-encoded bytes or an S3 object. If you use
# the AWS CLI to call Amazon Rekognition operations, passing
# base64-encoded image bytes is not supported.
#
# If you are using an AWS SDK to call Amazon Rekognition, you might not
# need to base64-encode image bytes passed using the `Bytes` field. For
# more information, see Images in the Amazon Rekognition developer
# guide.
#
# @option params [required, Types::Image] :target_image
# The target image as base64-encoded bytes or an S3 object. If you use
# the AWS CLI to call Amazon Rekognition operations, passing
# base64-encoded image bytes is not supported.
#
# If you are using an AWS SDK to call Amazon Rekognition, you might not
# need to base64-encode image bytes passed using the `Bytes` field. For
# more information, see Images in the Amazon Rekognition developer
# guide.
#
# @option params [Float] :similarity_threshold
# The minimum level of confidence in the face matches that a match must
# meet to be included in the `FaceMatches` array.
#
# @option params [String] :quality_filter
# A filter that specifies a quality bar for how much filtering is done
# to identify faces. Filtered faces aren't compared. If you specify
# `AUTO`, Amazon Rekognition chooses the quality bar. If you specify
# `LOW`, `MEDIUM`, or `HIGH`, filtering removes all faces that don’t
# meet the chosen quality bar. The quality bar is based on a variety of
# common use cases. Low-quality detections can occur for a number of
# reasons. Some examples are an object that's misidentified as a face,
# a face that's too blurry, or a face with a pose that's too extreme
# to use. If you specify `NONE`, no filtering is performed. The default
# value is `NONE`.
#
# To use quality filtering, the collection you are using must be
# associated with version 3 of the face model or higher.
#
# @return [Types::CompareFacesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::CompareFacesResponse#source_image_face #source_image_face} => Types::ComparedSourceImageFace
# * {Types::CompareFacesResponse#face_matches #face_matches} => Array<Types::CompareFacesMatch>
# * {Types::CompareFacesResponse#unmatched_faces #unmatched_faces} => Array<Types::ComparedFace>
# * {Types::CompareFacesResponse#source_image_orientation_correction #source_image_orientation_correction} => String
# * {Types::CompareFacesResponse#target_image_orientation_correction #target_image_orientation_correction} => String
#
#
# @example Example: To compare two images
#
# # This operation compares the largest face detected in the source image with each face detected in the target image.
#
# resp = client.compare_faces({
# similarity_threshold: 90,
# source_image: {
# s3_object: {
# bucket: "mybucket",
# name: "mysourceimage",
# },
# },
# target_image: {
# s3_object: {
# bucket: "mybucket",
# name: "mytargetimage",
# },
# },
# })
#
# resp.to_h outputs the following:
# {
# face_matches: [
# {
# face: {
# bounding_box: {
# height: 0.33481481671333313,
# left: 0.31888890266418457,
# top: 0.4933333396911621,
# width: 0.25,
# },
# confidence: 99.9991226196289,
# },
# similarity: 100,
# },
# ],
# source_image_face: {
# bounding_box: {
# height: 0.33481481671333313,
# left: 0.31888890266418457,
# top: 0.4933333396911621,
# width: 0.25,
# },
# confidence: 99.9991226196289,
# },
# }
#
# @example Request syntax with placeholder values
#
# resp = client.compare_faces({
# source_image: { # required
# bytes: "data",
# s3_object: {
# bucket: "S3Bucket",
# name: "S3ObjectName",
# version: "S3ObjectVersion",
# },
# },
# target_image: { # required
# bytes: "data",
# s3_object: {
# bucket: "S3Bucket",
# name: "S3ObjectName",
# version: "S3ObjectVersion",
# },
# },
# similarity_threshold: 1.0,
# quality_filter: "NONE", # accepts NONE, AUTO, LOW, MEDIUM, HIGH
# })
#
# @example Response structure
#
# resp.source_image_face.bounding_box.width #=> Float
# resp.source_image_face.bounding_box.height #=> Float
# resp.source_image_face.bounding_box.left #=> Float
# resp.source_image_face.bounding_box.top #=> Float
# resp.source_image_face.confidence #=> Float
# resp.face_matches #=> Array
# resp.face_matches[0].similarity #=> Float
# resp.face_matches[0].face.bounding_box.width #=> Float
# resp.face_matches[0].face.bounding_box.height #=> Float
# resp.face_matches[0].face.bounding_box.left #=> Float
# resp.face_matches[0].face.bounding_box.top #=> Float
# resp.face_matches[0].face.confidence #=> Float
# resp.face_matches[0].face.landmarks #=> Array
# resp.face_matches[0].face.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil", "upperJawlineLeft", "midJawlineLeft", "chinBottom", "midJawlineRight", "upperJawlineRight"
# resp.face_matches[0].face.landmarks[0].x #=> Float
# resp.face_matches[0].face.landmarks[0].y #=> Float
# resp.face_matches[0].face.pose.roll #=> Float
# resp.face_matches[0].face.pose.yaw #=> Float
# resp.face_matches[0].face.pose.pitch #=> Float
# resp.face_matches[0].face.quality.brightness #=> Float
# resp.face_matches[0].face.quality.sharpness #=> Float
# resp.unmatched_faces #=> Array
# resp.unmatched_faces[0].bounding_box.width #=> Float
# resp.unmatched_faces[0].bounding_box.height #=> Float
# resp.unmatched_faces[0].bounding_box.left #=> Float
# resp.unmatched_faces[0].bounding_box.top #=> Float
# resp.unmatched_faces[0].confidence #=> Float
# resp.unmatched_faces[0].landmarks #=> Array
# resp.unmatched_faces[0].landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil", "upperJawlineLeft", "midJawlineLeft", "chinBottom", "midJawlineRight", "upperJawlineRight"
# resp.unmatched_faces[0].landmarks[0].x #=> Float
# resp.unmatched_faces[0].landmarks[0].y #=> Float
# resp.unmatched_faces[0].pose.roll #=> Float
# resp.unmatched_faces[0].pose.yaw #=> Float
# resp.unmatched_faces[0].pose.pitch #=> Float
# resp.unmatched_faces[0].quality.brightness #=> Float
# resp.unmatched_faces[0].quality.sharpness #=> Float
# resp.source_image_orientation_correction #=> String, one of "ROTATE_0", "ROTATE_90", "ROTATE_180", "ROTATE_270"
# resp.target_image_orientation_correction #=> String, one of "ROTATE_0", "ROTATE_90", "ROTATE_180", "ROTATE_270"
#
# @overload compare_faces(params = {})
# @param [Hash] params ({})
def compare_faces(params = {}, options = {})
req = build_request(:compare_faces, params)
req.send_request(options)
end
# Creates a collection in an AWS Region. You can add faces to the
# collection using the IndexFaces operation.
#
# For example, you might create collections, one for each of your
# application users. A user can then index faces using the `IndexFaces`
# operation and persist results in a specific collection. Then, a user
# can search the collection for faces in the user-specific container.
#
# When you create a collection, it is associated with the latest version
# of the face model version.
#
# <note markdown="1"> Collection names are case-sensitive.
#
# </note>
#
# This operation requires permissions to perform the
# `rekognition:CreateCollection` action.
#
# @option params [required, String] :collection_id
# ID for the collection that you are creating.
#
# @return [Types::CreateCollectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::CreateCollectionResponse#status_code #status_code} => Integer
# * {Types::CreateCollectionResponse#collection_arn #collection_arn} => String
# * {Types::CreateCollectionResponse#face_model_version #face_model_version} => String
#
#
# @example Example: To create a collection
#
# # This operation creates a Rekognition collection for storing image data.
#
# resp = client.create_collection({
# collection_id: "myphotos",
# })
#
# resp.to_h outputs the following:
# {
# collection_arn: "aws:rekognition:us-west-2:123456789012:collection/myphotos",
# status_code: 200,
# }
#
# @example Request syntax with placeholder values
#
# resp = client.create_collection({
# collection_id: "CollectionId", # required
# })
#
# @example Response structure
#
# resp.status_code #=> Integer
# resp.collection_arn #=> String
# resp.face_model_version #=> String
#
# @overload create_collection(params = {})
# @param [Hash] params ({})
def create_collection(params = {}, options = {})
req = build_request(:create_collection, params)
req.send_request(options)
end
# Creates a new Amazon Rekognition Custom Labels project. A project is a
# logical grouping of resources (images, Labels, models) and operations
# (training, evaluation and detection).
#
# This operation requires permissions to perform the
# `rekognition:CreateProject` action.
#
# @option params [required, String] :project_name
# The name of the project to create.
#
# @return [Types::CreateProjectResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::CreateProjectResponse#project_arn #project_arn} => String
#
# @example Request syntax with placeholder values
#
# resp = client.create_project({
# project_name: "ProjectName", # required
# })
#
# @example Response structure
#
# resp.project_arn #=> String
#
# @overload create_project(params = {})
# @param [Hash] params ({})
def create_project(params = {}, options = {})
req = build_request(:create_project, params)
req.send_request(options)
end
# Creates a new version of a model and begins training. Models are
# managed as part of an Amazon Rekognition Custom Labels project. You
# can specify one training dataset and one testing dataset. The response
# from `CreateProjectVersion` is an Amazon Resource Name (ARN) for the
# version of the model.
#
# Training takes a while to complete. You can get the current status by
# calling DescribeProjectVersions.
#
# Once training has successfully completed, call DescribeProjectVersions
# to get the training results and evaluate the model.
#
# After evaluating the model, you start the model by calling
# StartProjectVersion.
#
# This operation requires permissions to perform the
# `rekognition:CreateProjectVersion` action.
#
# @option params [required, String] :project_arn
# The ARN of the Amazon Rekognition Custom Labels project that manages
# the model that you want to train.
#
# @option params [required, String] :version_name
# A name for the version of the model. This value must be unique.
#
# @option params [required, Types::OutputConfig] :output_config
# The Amazon S3 location to store the results of training.
#
# @option params [required, Types::TrainingData] :training_data
# The dataset to use for training.
#
# @option params [required, Types::TestingData] :testing_data
# The dataset to use for testing.
#
# @return [Types::CreateProjectVersionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::CreateProjectVersionResponse#project_version_arn #project_version_arn} => String
#
# @example Request syntax with placeholder values
#
# resp = client.create_project_version({
# project_arn: "ProjectArn", # required
# version_name: "VersionName", # required
# output_config: { # required
# s3_bucket: "S3Bucket",
# s3_key_prefix: "S3KeyPrefix",
# },
# training_data: { # required
# assets: [
# {
# ground_truth_manifest: {
# s3_object: {
# bucket: "S3Bucket",
# name: "S3ObjectName",
# version: "S3ObjectVersion",
# },
# },
# },
# ],
# },
# testing_data: { # required
# assets: [
# {
# ground_truth_manifest: {
# s3_object: {
# bucket: "S3Bucket",
# name: "S3ObjectName",
# version: "S3ObjectVersion",
# },
# },
# },
# ],
# auto_create: false,
# },
# })
#
# @example Response structure
#
# resp.project_version_arn #=> String
#
# @overload create_project_version(params = {})
# @param [Hash] params ({})
def create_project_version(params = {}, options = {})
req = build_request(:create_project_version, params)
req.send_request(options)
end
# Creates an Amazon Rekognition stream processor that you can use to
# detect and recognize faces in a streaming video.
#
# Amazon Rekognition Video is a consumer of live video from Amazon
# Kinesis Video Streams. Amazon Rekognition Video sends analysis results
# to Amazon Kinesis Data Streams.
#
# You provide as input a Kinesis video stream (`Input`) and a Kinesis
# data stream (`Output`) stream. You also specify the face recognition
# criteria in `Settings`. For example, the collection containing faces
# that you want to recognize. Use `Name` to assign an identifier for the
# stream processor. You use `Name` to manage the stream processor. For
# example, you can start processing the source video by calling
# StartStreamProcessor with the `Name` field.
#
# After you have finished analyzing a streaming video, use
# StopStreamProcessor to stop processing. You can delete the stream
# processor by calling DeleteStreamProcessor.
#
# @option params [required, Types::StreamProcessorInput] :input
# Kinesis video stream stream that provides the source streaming video.
# If you are using the AWS CLI, the parameter name is
# `StreamProcessorInput`.
#
# @option params [required, Types::StreamProcessorOutput] :output
# Kinesis data stream stream to which Amazon Rekognition Video puts the
# analysis results. If you are using the AWS CLI, the parameter name is
# `StreamProcessorOutput`.
#
# @option params [required, String] :name
# An identifier you assign to the stream processor. You can use `Name`
# to manage the stream processor. For example, you can get the current
# status of the stream processor by calling DescribeStreamProcessor.
# `Name` is idempotent.
#
# @option params [required, Types::StreamProcessorSettings] :settings
# Face recognition input parameters to be used by the stream processor.
# Includes the collection to use for face recognition and the face
# attributes to detect.
#
# @option params [required, String] :role_arn
# ARN of the IAM role that allows access to the stream processor.
#
# @return [Types::CreateStreamProcessorResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::CreateStreamProcessorResponse#stream_processor_arn #stream_processor_arn} => String
#
# @example Request syntax with placeholder values
#
# resp = client.create_stream_processor({
# input: { # required
# kinesis_video_stream: {
# arn: "KinesisVideoArn",
# },
# },
# output: { # required
# kinesis_data_stream: {
# arn: "KinesisDataArn",
# },
# },
# name: "StreamProcessorName", # required
# settings: { # required
# face_search: {
# collection_id: "CollectionId",
# face_match_threshold: 1.0,
# },
# },
# role_arn: "RoleArn", # required
# })
#
# @example Response structure
#
# resp.stream_processor_arn #=> String
#
# @overload create_stream_processor(params = {})
# @param [Hash] params ({})
def create_stream_processor(params = {}, options = {})
req = build_request(:create_stream_processor, params)
req.send_request(options)
end
# Deletes the specified collection. Note that this operation removes all
# faces in the collection. For an example, see
# delete-collection-procedure.
#
# This operation requires permissions to perform the
# `rekognition:DeleteCollection` action.
#
# @option params [required, String] :collection_id
# ID of the collection to delete.
#
# @return [Types::DeleteCollectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DeleteCollectionResponse#status_code #status_code} => Integer
#
#
# @example Example: To delete a collection
#
# # This operation deletes a Rekognition collection.
#
# resp = client.delete_collection({
# collection_id: "myphotos",
# })
#
# resp.to_h outputs the following:
# {
# status_code: 200,
# }
#
# @example Request syntax with placeholder values
#
# resp = client.delete_collection({
# collection_id: "CollectionId", # required
# })
#
# @example Response structure
#
# resp.status_code #=> Integer
#
# @overload delete_collection(params = {})
# @param [Hash] params ({})
def delete_collection(params = {}, options = {})
req = build_request(:delete_collection, params)
req.send_request(options)
end
# Deletes faces from a collection. You specify a collection ID and an
# array of face IDs to remove from the collection.
#
# This operation requires permissions to perform the
# `rekognition:DeleteFaces` action.
#
# @option params [required, String] :collection_id
# Collection from which to remove the specific faces.
#
# @option params [required, Array<String>] :face_ids
# An array of face IDs to delete.
#
# @return [Types::DeleteFacesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DeleteFacesResponse#deleted_faces #deleted_faces} => Array<String>
#
#
# @example Example: To delete a face
#
# # This operation deletes one or more faces from a Rekognition collection.
#
# resp = client.delete_faces({
# collection_id: "myphotos",
# face_ids: [
# "ff43d742-0c13-5d16-a3e8-03d3f58e980b",
# ],
# })
#
# resp.to_h outputs the following:
# {
# deleted_faces: [
# "ff43d742-0c13-5d16-a3e8-03d3f58e980b",
# ],
# }
#
# @example Request syntax with placeholder values
#
# resp = client.delete_faces({
# collection_id: "CollectionId", # required
# face_ids: ["FaceId"], # required
# })
#
# @example Response structure
#
# resp.deleted_faces #=> Array
# resp.deleted_faces[0] #=> String
#
# @overload delete_faces(params = {})
# @param [Hash] params ({})
def delete_faces(params = {}, options = {})
req = build_request(:delete_faces, params)
req.send_request(options)
end
# Deletes an Amazon Rekognition Custom Labels project. To delete a
# project you must first delete all models associated with the project.
# To delete a model, see DeleteProjectVersion.
#
# This operation requires permissions to perform the
# `rekognition:DeleteProject` action.
#
# @option params [required, String] :project_arn
# The Amazon Resource Name (ARN) of the project that you want to delete.
#
# @return [Types::DeleteProjectResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DeleteProjectResponse#status #status} => String
#
# @example Request syntax with placeholder values
#
# resp = client.delete_project({
# project_arn: "ProjectArn", # required
# })
#
# @example Response structure
#
# resp.status #=> String, one of "CREATING", "CREATED", "DELETING"
#
# @overload delete_project(params = {})
# @param [Hash] params ({})
def delete_project(params = {}, options = {})
req = build_request(:delete_project, params)
req.send_request(options)
end
# Deletes an Amazon Rekognition Custom Labels model.
#
# You can't delete a model if it is running or if it is training. To
# check the status of a model, use the `Status` field returned from
# DescribeProjectVersions. To stop a running model call
# StopProjectVersion. If the model is training, wait until it finishes.
#
# This operation requires permissions to perform the
# `rekognition:DeleteProjectVersion` action.
#
# @option params [required, String] :project_version_arn
# The Amazon Resource Name (ARN) of the model version that you want to
# delete.
#
# @return [Types::DeleteProjectVersionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DeleteProjectVersionResponse#status #status} => String
#
# @example Request syntax with placeholder values
#
# resp = client.delete_project_version({
# project_version_arn: "ProjectVersionArn", # required
# })
#
# @example Response structure
#
# resp.status #=> String, one of "TRAINING_IN_PROGRESS", "TRAINING_COMPLETED", "TRAINING_FAILED", "STARTING", "RUNNING", "FAILED", "STOPPING", "STOPPED", "DELETING"
#
# @overload delete_project_version(params = {})
# @param [Hash] params ({})
def delete_project_version(params = {}, options = {})
req = build_request(:delete_project_version, params)
req.send_request(options)
end
# Deletes the stream processor identified by `Name`. You assign the
# value for `Name` when you create the stream processor with
# CreateStreamProcessor. You might not be able to use the same name for
# a stream processor for a few seconds after calling
# `DeleteStreamProcessor`.
#
# @option params [required, String] :name
# The name of the stream processor you want to delete.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.delete_stream_processor({
# name: "StreamProcessorName", # required
# })
#
# @overload delete_stream_processor(params = {})
# @param [Hash] params ({})
def delete_stream_processor(params = {}, options = {})
req = build_request(:delete_stream_processor, params)
req.send_request(options)
end
# Describes the specified collection. You can use `DescribeCollection`
# to get information, such as the number of faces indexed into a
# collection and the version of the model used by the collection for
# face detection.
#
# For more information, see Describing a Collection in the Amazon
# Rekognition Developer Guide.
#
# @option params [required, String] :collection_id
# The ID of the collection to describe.
#
# @return [Types::DescribeCollectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DescribeCollectionResponse#face_count #face_count} => Integer
# * {Types::DescribeCollectionResponse#face_model_version #face_model_version} => String
# * {Types::DescribeCollectionResponse#collection_arn #collection_arn} => String
# * {Types::DescribeCollectionResponse#creation_timestamp #creation_timestamp} => Time
#
# @example Request syntax with placeholder values
#
# resp = client.describe_collection({
# collection_id: "CollectionId", # required
# })
#
# @example Response structure
#
# resp.face_count #=> Integer
# resp.face_model_version #=> String
# resp.collection_arn #=> String
# resp.creation_timestamp #=> Time
#
# @overload describe_collection(params = {})
# @param [Hash] params ({})
def describe_collection(params = {}, options = {})
req = build_request(:describe_collection, params)
req.send_request(options)
end
# Lists and describes the models in an Amazon Rekognition Custom Labels
# project. You can specify up to 10 model versions in
# `ProjectVersionArns`. If you don't specify a value, descriptions for
# all models are returned.
#
# This operation requires permissions to perform the
# `rekognition:DescribeProjectVersions` action.
#
# @option params [required, String] :project_arn
# The Amazon Resource Name (ARN) of the project that contains the models
# you want to describe.
#
# @option params [Array<String>] :version_names
# A list of model version names that you want to describe. You can add
# up to 10 model version names to the list. If you don't specify a
# value, all model descriptions are returned. A version name is part of
# a model (ProjectVersion) ARN. For example,
# `my-model.2020-01-21T09.10.15` is the version name in the following
# ARN.
# `arn:aws:rekognition:us-east-1:123456789012:project/getting-started/version/my-model.2020-01-21T09.10.15/1234567890123`.
#
# @option params [String] :next_token
# If the previous response was incomplete (because there is more results
# to retrieve), Amazon Rekognition Custom Labels returns a pagination
# token in the response. You can use this pagination token to retrieve
# the next set of results.
#
# @option params [Integer] :max_results
# The maximum number of results to return per paginated call. The
# largest value you can specify is 100. If you specify a value greater
# than 100, a ValidationException error occurs. The default value is
# 100.
#
# @return [Types::DescribeProjectVersionsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DescribeProjectVersionsResponse#project_version_descriptions #project_version_descriptions} => Array<Types::ProjectVersionDescription>
# * {Types::DescribeProjectVersionsResponse#next_token #next_token} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.describe_project_versions({
# project_arn: "ProjectArn", # required
# version_names: ["VersionName"],
# next_token: "ExtendedPaginationToken",
# max_results: 1,
# })
#
# @example Response structure
#
# resp.project_version_descriptions #=> Array
# resp.project_version_descriptions[0].project_version_arn #=> String
# resp.project_version_descriptions[0].creation_timestamp #=> Time
# resp.project_version_descriptions[0].min_inference_units #=> Integer
# resp.project_version_descriptions[0].status #=> String, one of "TRAINING_IN_PROGRESS", "TRAINING_COMPLETED", "TRAINING_FAILED", "STARTING", "RUNNING", "FAILED", "STOPPING", "STOPPED", "DELETING"
# resp.project_version_descriptions[0].status_message #=> String
# resp.project_version_descriptions[0].billable_training_time_in_seconds #=> Integer
# resp.project_version_descriptions[0].training_end_timestamp #=> Time
# resp.project_version_descriptions[0].output_config.s3_bucket #=> String
# resp.project_version_descriptions[0].output_config.s3_key_prefix #=> String
# resp.project_version_descriptions[0].training_data_result.input.assets #=> Array
# resp.project_version_descriptions[0].training_data_result.input.assets[0].ground_truth_manifest.s3_object.bucket #=> String
# resp.project_version_descriptions[0].training_data_result.input.assets[0].ground_truth_manifest.s3_object.name #=> String
# resp.project_version_descriptions[0].training_data_result.input.assets[0].ground_truth_manifest.s3_object.version #=> String
# resp.project_version_descriptions[0].training_data_result.output.assets #=> Array
# resp.project_version_descriptions[0].training_data_result.output.assets[0].ground_truth_manifest.s3_object.bucket #=> String
# resp.project_version_descriptions[0].training_data_result.output.assets[0].ground_truth_manifest.s3_object.name #=> String
# resp.project_version_descriptions[0].training_data_result.output.assets[0].ground_truth_manifest.s3_object.version #=> String
# resp.project_version_descriptions[0].training_data_result.validation.assets #=> Array
# resp.project_version_descriptions[0].training_data_result.validation.assets[0].ground_truth_manifest.s3_object.bucket #=> String
# resp.project_version_descriptions[0].training_data_result.validation.assets[0].ground_truth_manifest.s3_object.name #=> String
# resp.project_version_descriptions[0].training_data_result.validation.assets[0].ground_truth_manifest.s3_object.version #=> String
# resp.project_version_descriptions[0].testing_data_result.input.assets #=> Array
# resp.project_version_descriptions[0].testing_data_result.input.assets[0].ground_truth_manifest.s3_object.bucket #=> String
# resp.project_version_descriptions[0].testing_data_result.input.assets[0].ground_truth_manifest.s3_object.name #=> String
# resp.project_version_descriptions[0].testing_data_result.input.assets[0].ground_truth_manifest.s3_object.version #=> String
# resp.project_version_descriptions[0].testing_data_result.input.auto_create #=> Boolean
# resp.project_version_descriptions[0].testing_data_result.output.assets #=> Array
# resp.project_version_descriptions[0].testing_data_result.output.assets[0].ground_truth_manifest.s3_object.bucket #=> String
# resp.project_version_descriptions[0].testing_data_result.output.assets[0].ground_truth_manifest.s3_object.name #=> String
# resp.project_version_descriptions[0].testing_data_result.output.assets[0].ground_truth_manifest.s3_object.version #=> String
# resp.project_version_descriptions[0].testing_data_result.output.auto_create #=> Boolean
# resp.project_version_descriptions[0].testing_data_result.validation.assets #=> Array
# resp.project_version_descriptions[0].testing_data_result.validation.assets[0].ground_truth_manifest.s3_object.bucket #=> String
# resp.project_version_descriptions[0].testing_data_result.validation.assets[0].ground_truth_manifest.s3_object.name #=> String
# resp.project_version_descriptions[0].testing_data_result.validation.assets[0].ground_truth_manifest.s3_object.version #=> String
# resp.project_version_descriptions[0].evaluation_result.f1_score #=> Float
# resp.project_version_descriptions[0].evaluation_result.summary.s3_object.bucket #=> String
# resp.project_version_descriptions[0].evaluation_result.summary.s3_object.name #=> String
# resp.project_version_descriptions[0].evaluation_result.summary.s3_object.version #=> String
# resp.project_version_descriptions[0].manifest_summary.s3_object.bucket #=> String
# resp.project_version_descriptions[0].manifest_summary.s3_object.name #=> String
# resp.project_version_descriptions[0].manifest_summary.s3_object.version #=> String
# resp.next_token #=> String
#
#
# The following waiters are defined for this operation (see {Client#wait_until} for detailed usage):
#
# * project_version_running
# * project_version_training_completed
#
# @overload describe_project_versions(params = {})
# @param [Hash] params ({})
def describe_project_versions(params = {}, options = {})
req = build_request(:describe_project_versions, params)
req.send_request(options)
end
# Lists and gets information about your Amazon Rekognition Custom Labels
# projects.
#
# This operation requires permissions to perform the
# `rekognition:DescribeProjects` action.
#
# @option params [String] :next_token
# If the previous response was incomplete (because there is more results
# to retrieve), Amazon Rekognition Custom Labels returns a pagination
# token in the response. You can use this pagination token to retrieve
# the next set of results.
#
# @option params [Integer] :max_results
# The maximum number of results to return per paginated call. The
# largest value you can specify is 100. If you specify a value greater
# than 100, a ValidationException error occurs. The default value is
# 100.
#
# @return [Types::DescribeProjectsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DescribeProjectsResponse#project_descriptions #project_descriptions} => Array<Types::ProjectDescription>
# * {Types::DescribeProjectsResponse#next_token #next_token} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.describe_projects({
# next_token: "ExtendedPaginationToken",
# max_results: 1,
# })
#
# @example Response structure
#
# resp.project_descriptions #=> Array
# resp.project_descriptions[0].project_arn #=> String
# resp.project_descriptions[0].creation_timestamp #=> Time
# resp.project_descriptions[0].status #=> String, one of "CREATING", "CREATED", "DELETING"
# resp.next_token #=> String
#
# @overload describe_projects(params = {})
# @param [Hash] params ({})
def describe_projects(params = {}, options = {})
req = build_request(:describe_projects, params)
req.send_request(options)
end
# Provides information about a stream processor created by
# CreateStreamProcessor. You can get information about the input and
# output streams, the input parameters for the face recognition being
# performed, and the current status of the stream processor.
#
# @option params [required, String] :name
# Name of the stream processor for which you want information.
#
# @return [Types::DescribeStreamProcessorResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DescribeStreamProcessorResponse#name #name} => String
# * {Types::DescribeStreamProcessorResponse#stream_processor_arn #stream_processor_arn} => String
# * {Types::DescribeStreamProcessorResponse#status #status} => String
# * {Types::DescribeStreamProcessorResponse#status_message #status_message} => String
# * {Types::DescribeStreamProcessorResponse#creation_timestamp #creation_timestamp} => Time
# * {Types::DescribeStreamProcessorResponse#last_update_timestamp #last_update_timestamp} => Time
# * {Types::DescribeStreamProcessorResponse#input #input} => Types::StreamProcessorInput
# * {Types::DescribeStreamProcessorResponse#output #output} => Types::StreamProcessorOutput
# * {Types::DescribeStreamProcessorResponse#role_arn #role_arn} => String
# * {Types::DescribeStreamProcessorResponse#settings #settings} => Types::StreamProcessorSettings
#
# @example Request syntax with placeholder values
#
# resp = client.describe_stream_processor({
# name: "StreamProcessorName", # required
# })
#
# @example Response structure
#
# resp.name #=> String
# resp.stream_processor_arn #=> String
# resp.status #=> String, one of "STOPPED", "STARTING", "RUNNING", "FAILED", "STOPPING"
# resp.status_message #=> String
# resp.creation_timestamp #=> Time
# resp.last_update_timestamp #=> Time
# resp.input.kinesis_video_stream.arn #=> String
# resp.output.kinesis_data_stream.arn #=> String
# resp.role_arn #=> String
# resp.settings.face_search.collection_id #=> String
# resp.settings.face_search.face_match_threshold #=> Float
#
# @overload describe_stream_processor(params = {})
# @param [Hash] params ({})
def describe_stream_processor(params = {}, options = {})
req = build_request(:describe_stream_processor, params)
req.send_request(options)
end
# Detects custom labels in a supplied image by using an Amazon
# Rekognition Custom Labels model.
#
# You specify which version of a model version to use by using the
# `ProjectVersionArn` input parameter.
#
# You pass the input image as base64-encoded image bytes or as a
# reference to an image in an Amazon S3 bucket. If you use the AWS CLI
# to call Amazon Rekognition operations, passing image bytes is not
# supported. The image must be either a PNG or JPEG formatted file.
#
# For each object that the model version detects on an image, the API
# returns a (`CustomLabel`) object in an array (`CustomLabels`). Each
# `CustomLabel` object provides the label name (`Name`), the level of
# confidence that the image contains the object (`Confidence`), and
# object location information, if it exists, for the label on the image
# (`Geometry`).
#
# During training model calculates a threshold value that determines if
# a prediction for a label is true. By default, `DetectCustomLabels`
# doesn't return labels whose confidence value is below the model's
# calculated threshold value. To filter labels that are returned,
# specify a value for `MinConfidence` that is higher than the model's
# calculated threshold. You can get the model's calculated threshold
# from the model's training results shown in the Amazon Rekognition
# Custom Labels console. To get all labels, regardless of confidence,
# specify a `MinConfidence` value of 0.
#
# You can also add the `MaxResults` parameter to limit the number of
# labels returned.
#
# This is a stateless API operation. That is, the operation does not
# persist any data.
#
# This operation requires permissions to perform the
# `rekognition:DetectCustomLabels` action.
#
# @option params [required, String] :project_version_arn
# The ARN of the model version that you want to use.
#
# @option params [required, Types::Image] :image
# Provides the input image either as bytes or an S3 object.
#
# You pass image bytes to an Amazon Rekognition API operation by using
# the `Bytes` property. For example, you would use the `Bytes` property
# to pass an image loaded from a local file system. Image bytes passed
# by using the `Bytes` property must be base64-encoded. Your code may
# not need to encode image bytes if you are using an AWS SDK to call
# Amazon Rekognition API operations.
#
# For more information, see Analyzing an Image Loaded from a Local File
# System in the Amazon Rekognition Developer Guide.
#
# You pass images stored in an S3 bucket to an Amazon Rekognition API
# operation by using the `S3Object` property. Images stored in an S3
# bucket do not need to be base64-encoded.
#
# The region for the S3 bucket containing the S3 object must match the
# region you use for Amazon Rekognition operations.
#
# If you use the AWS CLI to call Amazon Rekognition operations, passing
# image bytes using the Bytes property is not supported. You must first
# upload the image to an Amazon S3 bucket and then call the operation
# using the S3Object property.
#
# For Amazon Rekognition to process an S3 object, the user must have
# permission to access the S3 object. For more information, see Resource
# Based Policies in the Amazon Rekognition Developer Guide.
#
# @option params [Integer] :max_results
# Maximum number of results you want the service to return in the
# response. The service returns the specified number of highest
# confidence labels ranked from highest confidence to lowest.
#
# @option params [Float] :min_confidence
# Specifies the minimum confidence level for the labels to return.
# Amazon Rekognition doesn't return any labels with a confidence lower
# than this specified value. If you specify a value of 0, all labels are
# return, regardless of the default thresholds that the model version
# applies.
#
# @return [Types::DetectCustomLabelsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DetectCustomLabelsResponse#custom_labels #custom_labels} => Array<Types::CustomLabel>
#
# @example Request syntax with placeholder values
#
# resp = client.detect_custom_labels({
# project_version_arn: "ProjectVersionArn", # required
# image: { # required
# bytes: "data",
# s3_object: {
# bucket: "S3Bucket",
# name: "S3ObjectName",
# version: "S3ObjectVersion",
# },
# },
# max_results: 1,
# min_confidence: 1.0,
# })
#
# @example Response structure
#
# resp.custom_labels #=> Array
# resp.custom_labels[0].name #=> String
# resp.custom_labels[0].confidence #=> Float
# resp.custom_labels[0].geometry.bounding_box.width #=> Float
# resp.custom_labels[0].geometry.bounding_box.height #=> Float
# resp.custom_labels[0].geometry.bounding_box.left #=> Float
# resp.custom_labels[0].geometry.bounding_box.top #=> Float
# resp.custom_labels[0].geometry.polygon #=> Array
# resp.custom_labels[0].geometry.polygon[0].x #=> Float
# resp.custom_labels[0].geometry.polygon[0].y #=> Float
#
# @overload detect_custom_labels(params = {})
# @param [Hash] params ({})
def detect_custom_labels(params = {}, options = {})
req = build_request(:detect_custom_labels, params)
req.send_request(options)
end
# Detects faces within an image that is provided as input.
#
# `DetectFaces` detects the 100 largest faces in the image. For each
# face detected, the operation returns face details. These details
# include a bounding box of the face, a confidence value (that the
# bounding box contains a face), and a fixed set of attributes such as
# facial landmarks (for example, coordinates of eye and mouth), presence
# of beard, sunglasses, and so on.
#
# The face-detection algorithm is most effective on frontal faces. For
# non-frontal or obscured faces, the algorithm might not detect the
# faces or might detect faces with lower confidence.
#
# You pass the input image either as base64-encoded image bytes or as a
# reference to an image in an Amazon S3 bucket. If you use the AWS CLI
# to call Amazon Rekognition operations, passing image bytes is not
# supported. The image must be either a PNG or JPEG formatted file.
#
# <note markdown="1"> This is a stateless API operation. That is, the operation does not
# persist any data.
#
# </note>
#
# This operation requires permissions to perform the
# `rekognition:DetectFaces` action.
#
# @option params [required, Types::Image] :image
# The input image as base64-encoded bytes or an S3 object. If you use
# the AWS CLI to call Amazon Rekognition operations, passing
# base64-encoded image bytes is not supported.
#
# If you are using an AWS SDK to call Amazon Rekognition, you might not
# need to base64-encode image bytes passed using the `Bytes` field. For
# more information, see Images in the Amazon Rekognition developer
# guide.
#
# @option params [Array<String>] :attributes
# An array of facial attributes you want to be returned. This can be the
# default list of attributes or all attributes. If you don't specify a
# value for `Attributes` or if you specify `["DEFAULT"]`, the API
# returns the following subset of facial attributes: `BoundingBox`,
# `Confidence`, `Pose`, `Quality`, and `Landmarks`. If you provide
# `["ALL"]`, all facial attributes are returned, but the operation takes
# longer to complete.
#
# If you provide both, `["ALL", "DEFAULT"]`, the service uses a logical
# AND operator to determine which attributes to return (in this case,
# all attributes).
#
# @return [Types::DetectFacesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DetectFacesResponse#face_details #face_details} => Array<Types::FaceDetail>
# * {Types::DetectFacesResponse#orientation_correction #orientation_correction} => String
#
#
# @example Example: To detect faces in an image
#
# # This operation detects faces in an image stored in an AWS S3 bucket.
#
# resp = client.detect_faces({
# image: {
# s3_object: {
# bucket: "mybucket",
# name: "myphoto",
# },
# },
# })
#
# resp.to_h outputs the following:
# {
# face_details: [
# {
# bounding_box: {
# height: 0.18000000715255737,
# left: 0.5555555820465088,
# top: 0.33666667342185974,
# width: 0.23999999463558197,
# },
# confidence: 100,
# landmarks: [
# {
# type: "eyeLeft",
# x: 0.6394737362861633,
# y: 0.40819624066352844,
# },
# {
# type: "eyeRight",
# x: 0.7266660928726196,
# y: 0.41039225459098816,
# },
# {
# type: "eyeRight",
# x: 0.6912462115287781,
# y: 0.44240960478782654,
# },
# {
# type: "mouthDown",
# x: 0.6306198239326477,
# y: 0.46700039505958557,
# },
# {
# type: "mouthUp",
# x: 0.7215608954429626,
# y: 0.47114261984825134,
# },
# ],
# pose: {
# pitch: 4.050806522369385,
# roll: 0.9950747489929199,
# yaw: 13.693790435791016,
# },
# quality: {
# brightness: 37.60169982910156,
# sharpness: 80,
# },
# },
# ],
# orientation_correction: "ROTATE_0",
# }
#
# @example Request syntax with placeholder values
#
# resp = client.detect_faces({
# image: { # required
# bytes: "data",
# s3_object: {
# bucket: "S3Bucket",
# name: "S3ObjectName",
# version: "S3ObjectVersion",
# },
# },
# attributes: ["DEFAULT"], # accepts DEFAULT, ALL
# })
#
# @example Response structure
#
# resp.face_details #=> Array
# resp.face_details[0].bounding_box.width #=> Float
# resp.face_details[0].bounding_box.height #=> Float
# resp.face_details[0].bounding_box.left #=> Float
# resp.face_details[0].bounding_box.top #=> Float
# resp.face_details[0].age_range.low #=> Integer
# resp.face_details[0].age_range.high #=> Integer
# resp.face_details[0].smile.value #=> Boolean
# resp.face_details[0].smile.confidence #=> Float
# resp.face_details[0].eyeglasses.value #=> Boolean
# resp.face_details[0].eyeglasses.confidence #=> Float
# resp.face_details[0].sunglasses.value #=> Boolean
# resp.face_details[0].sunglasses.confidence #=> Float
# resp.face_details[0].gender.value #=> String, one of "Male", "Female"
# resp.face_details[0].gender.confidence #=> Float
# resp.face_details[0].beard.value #=> Boolean
# resp.face_details[0].beard.confidence #=> Float
# resp.face_details[0].mustache.value #=> Boolean
# resp.face_details[0].mustache.confidence #=> Float
# resp.face_details[0].eyes_open.value #=> Boolean
# resp.face_details[0].eyes_open.confidence #=> Float
# resp.face_details[0].mouth_open.value #=> Boolean
# resp.face_details[0].mouth_open.confidence #=> Float
# resp.face_details[0].emotions #=> Array
# resp.face_details[0].emotions[0].type #=> String, one of "HAPPY", "SAD", "ANGRY", "CONFUSED", "DISGUSTED", "SURPRISED", "CALM", "UNKNOWN", "FEAR"
# resp.face_details[0].emotions[0].confidence #=> Float
# resp.face_details[0].landmarks #=> Array
# resp.face_details[0].landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil", "upperJawlineLeft", "midJawlineLeft", "chinBottom", "midJawlineRight", "upperJawlineRight"
# resp.face_details[0].landmarks[0].x #=> Float
# resp.face_details[0].landmarks[0].y #=> Float
# resp.face_details[0].pose.roll #=> Float
# resp.face_details[0].pose.yaw #=> Float
# resp.face_details[0].pose.pitch #=> Float
# resp.face_details[0].quality.brightness #=> Float
# resp.face_details[0].quality.sharpness #=> Float
# resp.face_details[0].confidence #=> Float
# resp.orientation_correction #=> String, one of "ROTATE_0", "ROTATE_90", "ROTATE_180", "ROTATE_270"
#
# @overload detect_faces(params = {})
# @param [Hash] params ({})
def detect_faces(params = {}, options = {})
req = build_request(:detect_faces, params)
req.send_request(options)
end
# Detects instances of real-world entities within an image (JPEG or PNG)
# provided as input. This includes objects like flower, tree, and table;
# events like wedding, graduation, and birthday party; and concepts like
# landscape, evening, and nature.
#
# For an example, see Analyzing Images Stored in an Amazon S3 Bucket in
# the Amazon Rekognition Developer Guide.
#
# <note markdown="1"> `DetectLabels` does not support the detection of activities. However,
# activity detection is supported for label detection in videos. For
# more information, see StartLabelDetection in the Amazon Rekognition
# Developer Guide.
#
# </note>
#
# You pass the input image as base64-encoded image bytes or as a
# reference to an image in an Amazon S3 bucket. If you use the AWS CLI
# to call Amazon Rekognition operations, passing image bytes is not
# supported. The image must be either a PNG or JPEG formatted file.
#
# For each object, scene, and concept the API returns one or more
# labels. Each label provides the object name, and the level of
# confidence that the image contains the object. For example, suppose
# the input image has a lighthouse, the sea, and a rock. The response
# includes all three labels, one for each object.
#
# `\{Name: lighthouse, Confidence: 98.4629\}`
#
# `\{Name: rock,Confidence: 79.2097\}`
#
# ` \{Name: sea,Confidence: 75.061\}`
#
# In the preceding example, the operation returns one label for each of
# the three objects. The operation can also return multiple labels for
# the same object in the image. For example, if the input image shows a
# flower (for example, a tulip), the operation might return the
# following three labels.
#
# `\{Name: flower,Confidence: 99.0562\}`
#
# `\{Name: plant,Confidence: 99.0562\}`
#
# `\{Name: tulip,Confidence: 99.0562\}`
#
# In this example, the detection algorithm more precisely identifies the
# flower as a tulip.
#
# In response, the API returns an array of labels. In addition, the
# response also includes the orientation correction. Optionally, you can
# specify `MinConfidence` to control the confidence threshold for the
# labels returned. The default is 55%. You can also add the `MaxLabels`
# parameter to limit the number of labels returned.
#
# <note markdown="1"> If the object detected is a person, the operation doesn't provide the
# same facial details that the DetectFaces operation provides.
#
# </note>
#
# `DetectLabels` returns bounding boxes for instances of common object
# labels in an array of Instance objects. An `Instance` object contains
# a BoundingBox object, for the location of the label on the image. It
# also includes the confidence by which the bounding box was detected.
#
# `DetectLabels` also returns a hierarchical taxonomy of detected
# labels. For example, a detected car might be assigned the label *car*.
# The label *car* has two parent labels: *Vehicle* (its parent) and
# *Transportation* (its grandparent). The response returns the entire
# list of ancestors for a label. Each ancestor is a unique label in the
# response. In the previous example, *Car*, *Vehicle*, and
# *Transportation* are returned as unique labels in the response.
#
# This is a stateless API operation. That is, the operation does not
# persist any data.
#
# This operation requires permissions to perform the
# `rekognition:DetectLabels` action.
#
# @option params [required, Types::Image] :image
# The input image as base64-encoded bytes or an S3 object. If you use
# the AWS CLI to call Amazon Rekognition operations, passing image bytes
# is not supported. Images stored in an S3 Bucket do not need to be
# base64-encoded.
#
# If you are using an AWS SDK to call Amazon Rekognition, you might not
# need to base64-encode image bytes passed using the `Bytes` field. For
# more information, see Images in the Amazon Rekognition developer
# guide.
#
# @option params [Integer] :max_labels
# Maximum number of labels you want the service to return in the
# response. The service returns the specified number of highest
# confidence labels.
#
# @option params [Float] :min_confidence
# Specifies the minimum confidence level for the labels to return.
# Amazon Rekognition doesn't return any labels with confidence lower
# than this specified value.
#
# If `MinConfidence` is not specified, the operation returns labels with
# a confidence values greater than or equal to 55 percent.
#
# @return [Types::DetectLabelsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DetectLabelsResponse#labels #labels} => Array<Types::Label>
# * {Types::DetectLabelsResponse#orientation_correction #orientation_correction} => String
# * {Types::DetectLabelsResponse#label_model_version #label_model_version} => String
#
#
# @example Example: To detect labels
#
# # This operation detects labels in the supplied image
#
# resp = client.detect_labels({
# image: {
# s3_object: {
# bucket: "mybucket",
# name: "myphoto",
# },
# },
# max_labels: 123,
# min_confidence: 70,
# })
#
# resp.to_h outputs the following:
# {
# labels: [
# {
# confidence: 99.25072479248047,
# name: "People",
# },
# {
# confidence: 99.25074005126953,
# name: "Person",
# },
# ],
# }
#
# @example Request syntax with placeholder values
#
# resp = client.detect_labels({
# image: { # required
# bytes: "data",
# s3_object: {
# bucket: "S3Bucket",
# name: "S3ObjectName",
# version: "S3ObjectVersion",
# },
# },
# max_labels: 1,
# min_confidence: 1.0,
# })
#
# @example Response structure
#
# resp.labels #=> Array
# resp.labels[0].name #=> String
# resp.labels[0].confidence #=> Float
# resp.labels[0].instances #=> Array
# resp.labels[0].instances[0].bounding_box.width #=> Float
# resp.labels[0].instances[0].bounding_box.height #=> Float
# resp.labels[0].instances[0].bounding_box.left #=> Float
# resp.labels[0].instances[0].bounding_box.top #=> Float
# resp.labels[0].instances[0].confidence #=> Float
# resp.labels[0].parents #=> Array
# resp.labels[0].parents[0].name #=> String
# resp.orientation_correction #=> String, one of "ROTATE_0", "ROTATE_90", "ROTATE_180", "ROTATE_270"
# resp.label_model_version #=> String
#
# @overload detect_labels(params = {})
# @param [Hash] params ({})
def detect_labels(params = {}, options = {})
req = build_request(:detect_labels, params)
req.send_request(options)
end
# Detects unsafe content in a specified JPEG or PNG format image. Use
# `DetectModerationLabels` to moderate images depending on your
# requirements. For example, you might want to filter images that
# contain nudity, but not images containing suggestive content.
#
# To filter images, use the labels returned by `DetectModerationLabels`
# to determine which types of content are appropriate.
#
# For information about moderation labels, see Detecting Unsafe Content
# in the Amazon Rekognition Developer Guide.
#
# You pass the input image either as base64-encoded image bytes or as a
# reference to an image in an Amazon S3 bucket. If you use the AWS CLI
# to call Amazon Rekognition operations, passing image bytes is not
# supported. The image must be either a PNG or JPEG formatted file.
#
# @option params [required, Types::Image] :image
# The input image as base64-encoded bytes or an S3 object. If you use
# the AWS CLI to call Amazon Rekognition operations, passing
# base64-encoded image bytes is not supported.
#
# If you are using an AWS SDK to call Amazon Rekognition, you might not
# need to base64-encode image bytes passed using the `Bytes` field. For
# more information, see Images in the Amazon Rekognition developer
# guide.
#
# @option params [Float] :min_confidence
# Specifies the minimum confidence level for the labels to return.
# Amazon Rekognition doesn't return any labels with a confidence level
# lower than this specified value.
#
# If you don't specify `MinConfidence`, the operation returns labels
# with confidence values greater than or equal to 50 percent.
#
# @option params [Types::HumanLoopConfig] :human_loop_config
# Sets up the configuration for human evaluation, including the
# FlowDefinition the image will be sent to.
#
# @return [Types::DetectModerationLabelsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DetectModerationLabelsResponse#moderation_labels #moderation_labels} => Array<Types::ModerationLabel>
# * {Types::DetectModerationLabelsResponse#moderation_model_version #moderation_model_version} => String
# * {Types::DetectModerationLabelsResponse#human_loop_activation_output #human_loop_activation_output} => Types::HumanLoopActivationOutput
#
# @example Request syntax with placeholder values
#
# resp = client.detect_moderation_labels({
# image: { # required
# bytes: "data",
# s3_object: {
# bucket: "S3Bucket",
# name: "S3ObjectName",
# version: "S3ObjectVersion",
# },
# },
# min_confidence: 1.0,
# human_loop_config: {
# human_loop_name: "HumanLoopName", # required
# flow_definition_arn: "FlowDefinitionArn", # required
# data_attributes: {
# content_classifiers: ["FreeOfPersonallyIdentifiableInformation"], # accepts FreeOfPersonallyIdentifiableInformation, FreeOfAdultContent
# },
# },
# })
#
# @example Response structure
#
# resp.moderation_labels #=> Array
# resp.moderation_labels[0].confidence #=> Float
# resp.moderation_labels[0].name #=> String
# resp.moderation_labels[0].parent_name #=> String
# resp.moderation_model_version #=> String
# resp.human_loop_activation_output.human_loop_arn #=> String
# resp.human_loop_activation_output.human_loop_activation_reasons #=> Array
# resp.human_loop_activation_output.human_loop_activation_reasons[0] #=> String
# resp.human_loop_activation_output.human_loop_activation_conditions_evaluation_results #=> String
#
# @overload detect_moderation_labels(params = {})
# @param [Hash] params ({})
def detect_moderation_labels(params = {}, options = {})
req = build_request(:detect_moderation_labels, params)
req.send_request(options)
end
# Detects Personal Protective Equipment (PPE) worn by people detected in
# an image. Amazon Rekognition can detect the following types of PPE.
#
# * Face cover
#
# * Hand cover
#
# * Head cover
#
# You pass the input image as base64-encoded image bytes or as a
# reference to an image in an Amazon S3 bucket. The image must be either
# a PNG or JPG formatted file.
#
# `DetectProtectiveEquipment` detects PPE worn by up to 15 persons
# detected in an image.
#
# For each person detected in the image the API returns an array of body
# parts (face, head, left-hand, right-hand). For each body part, an
# array of detected items of PPE is returned, including an indicator of
# whether or not the PPE covers the body part. The API returns the
# confidence it has in each detection (person, PPE, body part and body
# part coverage). It also returns a bounding box (BoundingBox) for each
# detected person and each detected item of PPE.
#
# You can optionally request a summary of detected PPE items with the
# `SummarizationAttributes` input parameter. The summary provides the
# following information.
#
# * The persons detected as wearing all of the types of PPE that you
# specify.
#
# * The persons detected as not wearing all of the types PPE that you
# specify.
#
# * The persons detected where PPE adornment could not be determined.
#
# This is a stateless API operation. That is, the operation does not
# persist any data.
#
# This operation requires permissions to perform the
# `rekognition:DetectProtectiveEquipment` action.
#
# @option params [required, Types::Image] :image
# The image in which you want to detect PPE on detected persons. The
# image can be passed as image bytes or you can reference an image
# stored in an Amazon S3 bucket.
#
# @option params [Types::ProtectiveEquipmentSummarizationAttributes] :summarization_attributes
# An array of PPE types that you want to summarize.
#
# @return [Types::DetectProtectiveEquipmentResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DetectProtectiveEquipmentResponse#protective_equipment_model_version #protective_equipment_model_version} => String
# * {Types::DetectProtectiveEquipmentResponse#persons #persons} => Array<Types::ProtectiveEquipmentPerson>
# * {Types::DetectProtectiveEquipmentResponse#summary #summary} => Types::ProtectiveEquipmentSummary
#
# @example Request syntax with placeholder values
#
# resp = client.detect_protective_equipment({
# image: { # required
# bytes: "data",
# s3_object: {
# bucket: "S3Bucket",
# name: "S3ObjectName",
# version: "S3ObjectVersion",
# },
# },
# summarization_attributes: {
# min_confidence: 1.0, # required
# required_equipment_types: ["FACE_COVER"], # required, accepts FACE_COVER, HAND_COVER, HEAD_COVER
# },
# })
#
# @example Response structure
#
# resp.protective_equipment_model_version #=> String
# resp.persons #=> Array
# resp.persons[0].body_parts #=> Array
# resp.persons[0].body_parts[0].name #=> String, one of "FACE", "HEAD", "LEFT_HAND", "RIGHT_HAND"
# resp.persons[0].body_parts[0].confidence #=> Float
# resp.persons[0].body_parts[0].equipment_detections #=> Array
# resp.persons[0].body_parts[0].equipment_detections[0].bounding_box.width #=> Float
# resp.persons[0].body_parts[0].equipment_detections[0].bounding_box.height #=> Float
# resp.persons[0].body_parts[0].equipment_detections[0].bounding_box.left #=> Float
# resp.persons[0].body_parts[0].equipment_detections[0].bounding_box.top #=> Float
# resp.persons[0].body_parts[0].equipment_detections[0].confidence #=> Float
# resp.persons[0].body_parts[0].equipment_detections[0].type #=> String, one of "FACE_COVER", "HAND_COVER", "HEAD_COVER"
# resp.persons[0].body_parts[0].equipment_detections[0].covers_body_part.confidence #=> Float
# resp.persons[0].body_parts[0].equipment_detections[0].covers_body_part.value #=> Boolean
# resp.persons[0].bounding_box.width #=> Float
# resp.persons[0].bounding_box.height #=> Float
# resp.persons[0].bounding_box.left #=> Float
# resp.persons[0].bounding_box.top #=> Float
# resp.persons[0].confidence #=> Float
# resp.persons[0].id #=> Integer
# resp.summary.persons_with_required_equipment #=> Array
# resp.summary.persons_with_required_equipment[0] #=> Integer
# resp.summary.persons_without_required_equipment #=> Array
# resp.summary.persons_without_required_equipment[0] #=> Integer
# resp.summary.persons_indeterminate #=> Array
# resp.summary.persons_indeterminate[0] #=> Integer
#
# @overload detect_protective_equipment(params = {})
# @param [Hash] params ({})
def detect_protective_equipment(params = {}, options = {})
req = build_request(:detect_protective_equipment, params)
req.send_request(options)
end
# Detects text in the input image and converts it into machine-readable
# text.
#
# Pass the input image as base64-encoded image bytes or as a reference
# to an image in an Amazon S3 bucket. If you use the AWS CLI to call
# Amazon Rekognition operations, you must pass it as a reference to an
# image in an Amazon S3 bucket. For the AWS CLI, passing image bytes is
# not supported. The image must be either a .png or .jpeg formatted
# file.
#
# The `DetectText` operation returns text in an array of TextDetection
# elements, `TextDetections`. Each `TextDetection` element provides
# information about a single word or line of text that was detected in
# the image.
#
# A word is one or more ISO basic latin script characters that are not
# separated by spaces. `DetectText` can detect up to 50 words in an
# image.
#
# A line is a string of equally spaced words. A line isn't necessarily
# a complete sentence. For example, a driver's license number is
# detected as a line. A line ends when there is no aligned text after
# it. Also, a line ends when there is a large gap between words,
# relative to the length of the words. This means, depending on the gap
# between words, Amazon Rekognition may detect multiple lines in text
# aligned in the same direction. Periods don't represent the end of a
# line. If a sentence spans multiple lines, the `DetectText` operation
# returns multiple lines.
#
# To determine whether a `TextDetection` element is a line of text or a
# word, use the `TextDetection` object `Type` field.
#
# To be detected, text must be within +/- 90 degrees orientation of the
# horizontal axis.
#
# For more information, see DetectText in the Amazon Rekognition
# Developer Guide.
#
# @option params [required, Types::Image] :image
# The input image as base64-encoded bytes or an Amazon S3 object. If you
# use the AWS CLI to call Amazon Rekognition operations, you can't pass
# image bytes.
#
# If you are using an AWS SDK to call Amazon Rekognition, you might not
# need to base64-encode image bytes passed using the `Bytes` field. For
# more information, see Images in the Amazon Rekognition developer
# guide.
#
# @option params [Types::DetectTextFilters] :filters
# Optional parameters that let you set the criteria that the text must
# meet to be included in your response.
#
# @return [Types::DetectTextResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DetectTextResponse#text_detections #text_detections} => Array<Types::TextDetection>
# * {Types::DetectTextResponse#text_model_version #text_model_version} => String
#
# @example Request syntax with placeholder values
#
# resp = client.detect_text({
# image: { # required
# bytes: "data",
# s3_object: {
# bucket: "S3Bucket",
# name: "S3ObjectName",
# version: "S3ObjectVersion",
# },
# },
# filters: {
# word_filter: {
# min_confidence: 1.0,
# min_bounding_box_height: 1.0,
# min_bounding_box_width: 1.0,
# },
# regions_of_interest: [
# {
# bounding_box: {
# width: 1.0,
# height: 1.0,
# left: 1.0,
# top: 1.0,
# },
# },
# ],
# },
# })
#
# @example Response structure
#
# resp.text_detections #=> Array
# resp.text_detections[0].detected_text #=> String
# resp.text_detections[0].type #=> String, one of "LINE", "WORD"
# resp.text_detections[0].id #=> Integer
# resp.text_detections[0].parent_id #=> Integer
# resp.text_detections[0].confidence #=> Float
# resp.text_detections[0].geometry.bounding_box.width #=> Float
# resp.text_detections[0].geometry.bounding_box.height #=> Float
# resp.text_detections[0].geometry.bounding_box.left #=> Float
# resp.text_detections[0].geometry.bounding_box.top #=> Float
# resp.text_detections[0].geometry.polygon #=> Array
# resp.text_detections[0].geometry.polygon[0].x #=> Float
# resp.text_detections[0].geometry.polygon[0].y #=> Float
# resp.text_model_version #=> String
#
# @overload detect_text(params = {})
# @param [Hash] params ({})
def detect_text(params = {}, options = {})
req = build_request(:detect_text, params)
req.send_request(options)
end
# Gets the name and additional information about a celebrity based on
# his or her Amazon Rekognition ID. The additional information is
# returned as an array of URLs. If there is no additional information
# about the celebrity, this list is empty.
#
# For more information, see Recognizing Celebrities in an Image in the
# Amazon Rekognition Developer Guide.
#
# This operation requires permissions to perform the
# `rekognition:GetCelebrityInfo` action.
#
# @option params [required, String] :id
# The ID for the celebrity. You get the celebrity ID from a call to the
# RecognizeCelebrities operation, which recognizes celebrities in an
# image.
#
# @return [Types::GetCelebrityInfoResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetCelebrityInfoResponse#urls #urls} => Array<String>
# * {Types::GetCelebrityInfoResponse#name #name} => String
#
# @example Request syntax with placeholder values
#
# resp = client.get_celebrity_info({
# id: "RekognitionUniqueId", # required
# })
#
# @example Response structure
#
# resp.urls #=> Array
# resp.urls[0] #=> String
# resp.name #=> String
#
# @overload get_celebrity_info(params = {})
# @param [Hash] params ({})
def get_celebrity_info(params = {}, options = {})
req = build_request(:get_celebrity_info, params)
req.send_request(options)
end
# Gets the celebrity recognition results for a Amazon Rekognition Video
# analysis started by StartCelebrityRecognition.
#
# Celebrity recognition in a video is an asynchronous operation.
# Analysis is started by a call to StartCelebrityRecognition which
# returns a job identifier (`JobId`). When the celebrity recognition
# operation finishes, Amazon Rekognition Video publishes a completion
# status to the Amazon Simple Notification Service topic registered in
# the initial call to `StartCelebrityRecognition`. To get the results of
# the celebrity recognition analysis, first check that the status value
# published to the Amazon SNS topic is `SUCCEEDED`. If so, call
# `GetCelebrityDetection` and pass the job identifier (`JobId`) from the
# initial call to `StartCelebrityDetection`.
#
# For more information, see Working With Stored Videos in the Amazon
# Rekognition Developer Guide.
#
# `GetCelebrityRecognition` returns detected celebrities and the time(s)
# they are detected in an array (`Celebrities`) of CelebrityRecognition
# objects. Each `CelebrityRecognition` contains information about the
# celebrity in a CelebrityDetail object and the time, `Timestamp`, the
# celebrity was detected.
#
# <note markdown="1"> `GetCelebrityRecognition` only returns the default facial attributes
# (`BoundingBox`, `Confidence`, `Landmarks`, `Pose`, and `Quality`). The
# other facial attributes listed in the `Face` object of the following
# response syntax are not returned. For more information, see FaceDetail
# in the Amazon Rekognition Developer Guide.
#
# </note>
#
# By default, the `Celebrities` array is sorted by time (milliseconds
# from the start of the video). You can also sort the array by celebrity
# by specifying the value `ID` in the `SortBy` input parameter.
#
# The `CelebrityDetail` object includes the celebrity identifer and
# additional information urls. If you don't store the additional
# information urls, you can get them later by calling GetCelebrityInfo
# with the celebrity identifer.
#
# No information is returned for faces not recognized as celebrities.
#
# Use MaxResults parameter to limit the number of labels returned. If
# there are more results than specified in `MaxResults`, the value of
# `NextToken` in the operation response contains a pagination token for
# getting the next set of results. To get the next page of results, call
# `GetCelebrityDetection` and populate the `NextToken` request parameter
# with the token value returned from the previous call to
# `GetCelebrityRecognition`.
#
# @option params [required, String] :job_id
# Job identifier for the required celebrity recognition analysis. You
# can get the job identifer from a call to `StartCelebrityRecognition`.
#
# @option params [Integer] :max_results
# Maximum number of results to return per paginated call. The largest
# value you can specify is 1000. If you specify a value greater than
# 1000, a maximum of 1000 results is returned. The default value is
# 1000.
#
# @option params [String] :next_token
# If the previous response was incomplete (because there is more
# recognized celebrities to retrieve), Amazon Rekognition Video returns
# a pagination token in the response. You can use this pagination token
# to retrieve the next set of celebrities.
#
# @option params [String] :sort_by
# Sort to use for celebrities returned in `Celebrities` field. Specify
# `ID` to sort by the celebrity identifier, specify `TIMESTAMP` to sort
# by the time the celebrity was recognized.
#
# @return [Types::GetCelebrityRecognitionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetCelebrityRecognitionResponse#job_status #job_status} => String
# * {Types::GetCelebrityRecognitionResponse#status_message #status_message} => String
# * {Types::GetCelebrityRecognitionResponse#video_metadata #video_metadata} => Types::VideoMetadata
# * {Types::GetCelebrityRecognitionResponse#next_token #next_token} => String
# * {Types::GetCelebrityRecognitionResponse#celebrities #celebrities} => Array<Types::CelebrityRecognition>
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.get_celebrity_recognition({
# job_id: "JobId", # required
# max_results: 1,
# next_token: "PaginationToken",
# sort_by: "ID", # accepts ID, TIMESTAMP
# })
#
# @example Response structure
#
# resp.job_status #=> String, one of "IN_PROGRESS", "SUCCEEDED", "FAILED"
# resp.status_message #=> String
# resp.video_metadata.codec #=> String
# resp.video_metadata.duration_millis #=> Integer
# resp.video_metadata.format #=> String
# resp.video_metadata.frame_rate #=> Float
# resp.video_metadata.frame_height #=> Integer
# resp.video_metadata.frame_width #=> Integer
# resp.next_token #=> String
# resp.celebrities #=> Array
# resp.celebrities[0].timestamp #=> Integer
# resp.celebrities[0].celebrity.urls #=> Array
# resp.celebrities[0].celebrity.urls[0] #=> String
# resp.celebrities[0].celebrity.name #=> String
# resp.celebrities[0].celebrity.id #=> String
# resp.celebrities[0].celebrity.confidence #=> Float
# resp.celebrities[0].celebrity.bounding_box.width #=> Float
# resp.celebrities[0].celebrity.bounding_box.height #=> Float
# resp.celebrities[0].celebrity.bounding_box.left #=> Float
# resp.celebrities[0].celebrity.bounding_box.top #=> Float
# resp.celebrities[0].celebrity.face.bounding_box.width #=> Float
# resp.celebrities[0].celebrity.face.bounding_box.height #=> Float
# resp.celebrities[0].celebrity.face.bounding_box.left #=> Float
# resp.celebrities[0].celebrity.face.bounding_box.top #=> Float
# resp.celebrities[0].celebrity.face.age_range.low #=> Integer
# resp.celebrities[0].celebrity.face.age_range.high #=> Integer
# resp.celebrities[0].celebrity.face.smile.value #=> Boolean
# resp.celebrities[0].celebrity.face.smile.confidence #=> Float
# resp.celebrities[0].celebrity.face.eyeglasses.value #=> Boolean
# resp.celebrities[0].celebrity.face.eyeglasses.confidence #=> Float
# resp.celebrities[0].celebrity.face.sunglasses.value #=> Boolean
# resp.celebrities[0].celebrity.face.sunglasses.confidence #=> Float
# resp.celebrities[0].celebrity.face.gender.value #=> String, one of "Male", "Female"
# resp.celebrities[0].celebrity.face.gender.confidence #=> Float
# resp.celebrities[0].celebrity.face.beard.value #=> Boolean
# resp.celebrities[0].celebrity.face.beard.confidence #=> Float
# resp.celebrities[0].celebrity.face.mustache.value #=> Boolean
# resp.celebrities[0].celebrity.face.mustache.confidence #=> Float
# resp.celebrities[0].celebrity.face.eyes_open.value #=> Boolean
# resp.celebrities[0].celebrity.face.eyes_open.confidence #=> Float
# resp.celebrities[0].celebrity.face.mouth_open.value #=> Boolean
# resp.celebrities[0].celebrity.face.mouth_open.confidence #=> Float
# resp.celebrities[0].celebrity.face.emotions #=> Array
# resp.celebrities[0].celebrity.face.emotions[0].type #=> String, one of "HAPPY", "SAD", "ANGRY", "CONFUSED", "DISGUSTED", "SURPRISED", "CALM", "UNKNOWN", "FEAR"
# resp.celebrities[0].celebrity.face.emotions[0].confidence #=> Float
# resp.celebrities[0].celebrity.face.landmarks #=> Array
# resp.celebrities[0].celebrity.face.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil", "upperJawlineLeft", "midJawlineLeft", "chinBottom", "midJawlineRight", "upperJawlineRight"
# resp.celebrities[0].celebrity.face.landmarks[0].x #=> Float
# resp.celebrities[0].celebrity.face.landmarks[0].y #=> Float
# resp.celebrities[0].celebrity.face.pose.roll #=> Float
# resp.celebrities[0].celebrity.face.pose.yaw #=> Float
# resp.celebrities[0].celebrity.face.pose.pitch #=> Float
# resp.celebrities[0].celebrity.face.quality.brightness #=> Float
# resp.celebrities[0].celebrity.face.quality.sharpness #=> Float
# resp.celebrities[0].celebrity.face.confidence #=> Float
#
# @overload get_celebrity_recognition(params = {})
# @param [Hash] params ({})
def get_celebrity_recognition(params = {}, options = {})
req = build_request(:get_celebrity_recognition, params)
req.send_request(options)
end
# Gets the unsafe content analysis results for a Amazon Rekognition
# Video analysis started by StartContentModeration.
#
# Unsafe content analysis of a video is an asynchronous operation. You
# start analysis by calling StartContentModeration which returns a job
# identifier (`JobId`). When analysis finishes, Amazon Rekognition Video
# publishes a completion status to the Amazon Simple Notification
# Service topic registered in the initial call to
# `StartContentModeration`. To get the results of the unsafe content
# analysis, first check that the status value published to the Amazon
# SNS topic is `SUCCEEDED`. If so, call `GetContentModeration` and pass
# the job identifier (`JobId`) from the initial call to
# `StartContentModeration`.
#
# For more information, see Working with Stored Videos in the Amazon
# Rekognition Devlopers Guide.
#
# `GetContentModeration` returns detected unsafe content labels, and the
# time they are detected, in an array, `ModerationLabels`, of
# ContentModerationDetection objects.
#
# By default, the moderated labels are returned sorted by time, in
# milliseconds from the start of the video. You can also sort them by
# moderated label by specifying `NAME` for the `SortBy` input parameter.
#
# Since video analysis can return a large number of results, use the
# `MaxResults` parameter to limit the number of labels returned in a
# single call to `GetContentModeration`. If there are more results than
# specified in `MaxResults`, the value of `NextToken` in the operation
# response contains a pagination token for getting the next set of
# results. To get the next page of results, call `GetContentModeration`
# and populate the `NextToken` request parameter with the value of
# `NextToken` returned from the previous call to `GetContentModeration`.
#
# For more information, see Detecting Unsafe Content in the Amazon
# Rekognition Developer Guide.
#
# @option params [required, String] :job_id
# The identifier for the unsafe content job. Use `JobId` to identify the
# job in a subsequent call to `GetContentModeration`.
#
# @option params [Integer] :max_results
# Maximum number of results to return per paginated call. The largest
# value you can specify is 1000. If you specify a value greater than
# 1000, a maximum of 1000 results is returned. The default value is
# 1000.
#
# @option params [String] :next_token
# If the previous response was incomplete (because there is more data to
# retrieve), Amazon Rekognition returns a pagination token in the
# response. You can use this pagination token to retrieve the next set
# of unsafe content labels.
#
# @option params [String] :sort_by
# Sort to use for elements in the `ModerationLabelDetections` array. Use
# `TIMESTAMP` to sort array elements by the time labels are detected.
# Use `NAME` to alphabetically group elements for a label together.
# Within each label group, the array element are sorted by detection
# confidence. The default sort is by `TIMESTAMP`.
#
# @return [Types::GetContentModerationResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetContentModerationResponse#job_status #job_status} => String
# * {Types::GetContentModerationResponse#status_message #status_message} => String
# * {Types::GetContentModerationResponse#video_metadata #video_metadata} => Types::VideoMetadata
# * {Types::GetContentModerationResponse#moderation_labels #moderation_labels} => Array<Types::ContentModerationDetection>
# * {Types::GetContentModerationResponse#next_token #next_token} => String
# * {Types::GetContentModerationResponse#moderation_model_version #moderation_model_version} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.get_content_moderation({
# job_id: "JobId", # required
# max_results: 1,
# next_token: "PaginationToken",
# sort_by: "NAME", # accepts NAME, TIMESTAMP
# })
#
# @example Response structure
#
# resp.job_status #=> String, one of "IN_PROGRESS", "SUCCEEDED", "FAILED"
# resp.status_message #=> String
# resp.video_metadata.codec #=> String
# resp.video_metadata.duration_millis #=> Integer
# resp.video_metadata.format #=> String
# resp.video_metadata.frame_rate #=> Float
# resp.video_metadata.frame_height #=> Integer
# resp.video_metadata.frame_width #=> Integer
# resp.moderation_labels #=> Array
# resp.moderation_labels[0].timestamp #=> Integer
# resp.moderation_labels[0].moderation_label.confidence #=> Float
# resp.moderation_labels[0].moderation_label.name #=> String
# resp.moderation_labels[0].moderation_label.parent_name #=> String
# resp.next_token #=> String
# resp.moderation_model_version #=> String
#
# @overload get_content_moderation(params = {})
# @param [Hash] params ({})
def get_content_moderation(params = {}, options = {})
req = build_request(:get_content_moderation, params)
req.send_request(options)
end
# Gets face detection results for a Amazon Rekognition Video analysis
# started by StartFaceDetection.
#
# Face detection with Amazon Rekognition Video is an asynchronous
# operation. You start face detection by calling StartFaceDetection
# which returns a job identifier (`JobId`). When the face detection
# operation finishes, Amazon Rekognition Video publishes a completion
# status to the Amazon Simple Notification Service topic registered in
# the initial call to `StartFaceDetection`. To get the results of the
# face detection operation, first check that the status value published
# to the Amazon SNS topic is `SUCCEEDED`. If so, call GetFaceDetection
# and pass the job identifier (`JobId`) from the initial call to
# `StartFaceDetection`.
#
# `GetFaceDetection` returns an array of detected faces (`Faces`) sorted
# by the time the faces were detected.
#
# Use MaxResults parameter to limit the number of labels returned. If
# there are more results than specified in `MaxResults`, the value of
# `NextToken` in the operation response contains a pagination token for
# getting the next set of results. To get the next page of results, call
# `GetFaceDetection` and populate the `NextToken` request parameter with
# the token value returned from the previous call to `GetFaceDetection`.
#
# @option params [required, String] :job_id
# Unique identifier for the face detection job. The `JobId` is returned
# from `StartFaceDetection`.
#
# @option params [Integer] :max_results
# Maximum number of results to return per paginated call. The largest
# value you can specify is 1000. If you specify a value greater than
# 1000, a maximum of 1000 results is returned. The default value is
# 1000.
#
# @option params [String] :next_token
# If the previous response was incomplete (because there are more faces
# to retrieve), Amazon Rekognition Video returns a pagination token in
# the response. You can use this pagination token to retrieve the next
# set of faces.
#
# @return [Types::GetFaceDetectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetFaceDetectionResponse#job_status #job_status} => String
# * {Types::GetFaceDetectionResponse#status_message #status_message} => String
# * {Types::GetFaceDetectionResponse#video_metadata #video_metadata} => Types::VideoMetadata
# * {Types::GetFaceDetectionResponse#next_token #next_token} => String
# * {Types::GetFaceDetectionResponse#faces #faces} => Array<Types::FaceDetection>
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.get_face_detection({
# job_id: "JobId", # required
# max_results: 1,
# next_token: "PaginationToken",
# })
#
# @example Response structure
#
# resp.job_status #=> String, one of "IN_PROGRESS", "SUCCEEDED", "FAILED"
# resp.status_message #=> String
# resp.video_metadata.codec #=> String
# resp.video_metadata.duration_millis #=> Integer
# resp.video_metadata.format #=> String
# resp.video_metadata.frame_rate #=> Float
# resp.video_metadata.frame_height #=> Integer
# resp.video_metadata.frame_width #=> Integer
# resp.next_token #=> String
# resp.faces #=> Array
# resp.faces[0].timestamp #=> Integer
# resp.faces[0].face.bounding_box.width #=> Float
# resp.faces[0].face.bounding_box.height #=> Float
# resp.faces[0].face.bounding_box.left #=> Float
# resp.faces[0].face.bounding_box.top #=> Float
# resp.faces[0].face.age_range.low #=> Integer
# resp.faces[0].face.age_range.high #=> Integer
# resp.faces[0].face.smile.value #=> Boolean
# resp.faces[0].face.smile.confidence #=> Float
# resp.faces[0].face.eyeglasses.value #=> Boolean
# resp.faces[0].face.eyeglasses.confidence #=> Float
# resp.faces[0].face.sunglasses.value #=> Boolean
# resp.faces[0].face.sunglasses.confidence #=> Float
# resp.faces[0].face.gender.value #=> String, one of "Male", "Female"
# resp.faces[0].face.gender.confidence #=> Float
# resp.faces[0].face.beard.value #=> Boolean
# resp.faces[0].face.beard.confidence #=> Float
# resp.faces[0].face.mustache.value #=> Boolean
# resp.faces[0].face.mustache.confidence #=> Float
# resp.faces[0].face.eyes_open.value #=> Boolean
# resp.faces[0].face.eyes_open.confidence #=> Float
# resp.faces[0].face.mouth_open.value #=> Boolean
# resp.faces[0].face.mouth_open.confidence #=> Float
# resp.faces[0].face.emotions #=> Array
# resp.faces[0].face.emotions[0].type #=> String, one of "HAPPY", "SAD", "ANGRY", "CONFUSED", "DISGUSTED", "SURPRISED", "CALM", "UNKNOWN", "FEAR"
# resp.faces[0].face.emotions[0].confidence #=> Float
# resp.faces[0].face.landmarks #=> Array
# resp.faces[0].face.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil", "upperJawlineLeft", "midJawlineLeft", "chinBottom", "midJawlineRight", "upperJawlineRight"
# resp.faces[0].face.landmarks[0].x #=> Float
# resp.faces[0].face.landmarks[0].y #=> Float
# resp.faces[0].face.pose.roll #=> Float
# resp.faces[0].face.pose.yaw #=> Float
# resp.faces[0].face.pose.pitch #=> Float
# resp.faces[0].face.quality.brightness #=> Float
# resp.faces[0].face.quality.sharpness #=> Float
# resp.faces[0].face.confidence #=> Float
#
# @overload get_face_detection(params = {})
# @param [Hash] params ({})
def get_face_detection(params = {}, options = {})
req = build_request(:get_face_detection, params)
req.send_request(options)
end
# Gets the face search results for Amazon Rekognition Video face search
# started by StartFaceSearch. The search returns faces in a collection
# that match the faces of persons detected in a video. It also includes
# the time(s) that faces are matched in the video.
#
# Face search in a video is an asynchronous operation. You start face
# search by calling to StartFaceSearch which returns a job identifier
# (`JobId`). When the search operation finishes, Amazon Rekognition
# Video publishes a completion status to the Amazon Simple Notification
# Service topic registered in the initial call to `StartFaceSearch`. To
# get the search results, first check that the status value published to
# the Amazon SNS topic is `SUCCEEDED`. If so, call `GetFaceSearch` and
# pass the job identifier (`JobId`) from the initial call to
# `StartFaceSearch`.
#
# For more information, see Searching Faces in a Collection in the
# Amazon Rekognition Developer Guide.
#
# The search results are retured in an array, `Persons`, of PersonMatch
# objects. Each`PersonMatch` element contains details about the matching
# faces in the input collection, person information (facial attributes,
# bounding boxes, and person identifer) for the matched person, and the
# time the person was matched in the video.
#
# <note markdown="1"> `GetFaceSearch` only returns the default facial attributes
# (`BoundingBox`, `Confidence`, `Landmarks`, `Pose`, and `Quality`). The
# other facial attributes listed in the `Face` object of the following
# response syntax are not returned. For more information, see FaceDetail
# in the Amazon Rekognition Developer Guide.
#
# </note>
#
# By default, the `Persons` array is sorted by the time, in milliseconds
# from the start of the video, persons are matched. You can also sort by
# persons by specifying `INDEX` for the `SORTBY` input parameter.
#
# @option params [required, String] :job_id
# The job identifer for the search request. You get the job identifier
# from an initial call to `StartFaceSearch`.
#
# @option params [Integer] :max_results
# Maximum number of results to return per paginated call. The largest
# value you can specify is 1000. If you specify a value greater than
# 1000, a maximum of 1000 results is returned. The default value is
# 1000.
#
# @option params [String] :next_token
# If the previous response was incomplete (because there is more search
# results to retrieve), Amazon Rekognition Video returns a pagination
# token in the response. You can use this pagination token to retrieve
# the next set of search results.
#
# @option params [String] :sort_by
# Sort to use for grouping faces in the response. Use `TIMESTAMP` to
# group faces by the time that they are recognized. Use `INDEX` to sort
# by recognized faces.
#
# @return [Types::GetFaceSearchResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetFaceSearchResponse#job_status #job_status} => String
# * {Types::GetFaceSearchResponse#status_message #status_message} => String
# * {Types::GetFaceSearchResponse#next_token #next_token} => String
# * {Types::GetFaceSearchResponse#video_metadata #video_metadata} => Types::VideoMetadata
# * {Types::GetFaceSearchResponse#persons #persons} => Array<Types::PersonMatch>
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.get_face_search({
# job_id: "JobId", # required
# max_results: 1,
# next_token: "PaginationToken",
# sort_by: "INDEX", # accepts INDEX, TIMESTAMP
# })
#
# @example Response structure
#
# resp.job_status #=> String, one of "IN_PROGRESS", "SUCCEEDED", "FAILED"
# resp.status_message #=> String
# resp.next_token #=> String
# resp.video_metadata.codec #=> String
# resp.video_metadata.duration_millis #=> Integer
# resp.video_metadata.format #=> String
# resp.video_metadata.frame_rate #=> Float
# resp.video_metadata.frame_height #=> Integer
# resp.video_metadata.frame_width #=> Integer
# resp.persons #=> Array
# resp.persons[0].timestamp #=> Integer
# resp.persons[0].person.index #=> Integer
# resp.persons[0].person.bounding_box.width #=> Float
# resp.persons[0].person.bounding_box.height #=> Float
# resp.persons[0].person.bounding_box.left #=> Float
# resp.persons[0].person.bounding_box.top #=> Float
# resp.persons[0].person.face.bounding_box.width #=> Float
# resp.persons[0].person.face.bounding_box.height #=> Float
# resp.persons[0].person.face.bounding_box.left #=> Float
# resp.persons[0].person.face.bounding_box.top #=> Float
# resp.persons[0].person.face.age_range.low #=> Integer
# resp.persons[0].person.face.age_range.high #=> Integer
# resp.persons[0].person.face.smile.value #=> Boolean
# resp.persons[0].person.face.smile.confidence #=> Float
# resp.persons[0].person.face.eyeglasses.value #=> Boolean
# resp.persons[0].person.face.eyeglasses.confidence #=> Float
# resp.persons[0].person.face.sunglasses.value #=> Boolean
# resp.persons[0].person.face.sunglasses.confidence #=> Float
# resp.persons[0].person.face.gender.value #=> String, one of "Male", "Female"
# resp.persons[0].person.face.gender.confidence #=> Float
# resp.persons[0].person.face.beard.value #=> Boolean
# resp.persons[0].person.face.beard.confidence #=> Float
# resp.persons[0].person.face.mustache.value #=> Boolean
# resp.persons[0].person.face.mustache.confidence #=> Float
# resp.persons[0].person.face.eyes_open.value #=> Boolean
# resp.persons[0].person.face.eyes_open.confidence #=> Float
# resp.persons[0].person.face.mouth_open.value #=> Boolean
# resp.persons[0].person.face.mouth_open.confidence #=> Float
# resp.persons[0].person.face.emotions #=> Array
# resp.persons[0].person.face.emotions[0].type #=> String, one of "HAPPY", "SAD", "ANGRY", "CONFUSED", "DISGUSTED", "SURPRISED", "CALM", "UNKNOWN", "FEAR"
# resp.persons[0].person.face.emotions[0].confidence #=> Float
# resp.persons[0].person.face.landmarks #=> Array
# resp.persons[0].person.face.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil", "upperJawlineLeft", "midJawlineLeft", "chinBottom", "midJawlineRight", "upperJawlineRight"
# resp.persons[0].person.face.landmarks[0].x #=> Float
# resp.persons[0].person.face.landmarks[0].y #=> Float
# resp.persons[0].person.face.pose.roll #=> Float
# resp.persons[0].person.face.pose.yaw #=> Float
# resp.persons[0].person.face.pose.pitch #=> Float
# resp.persons[0].person.face.quality.brightness #=> Float
# resp.persons[0].person.face.quality.sharpness #=> Float
# resp.persons[0].person.face.confidence #=> Float
# resp.persons[0].face_matches #=> Array
# resp.persons[0].face_matches[0].similarity #=> Float
# resp.persons[0].face_matches[0].face.face_id #=> String
# resp.persons[0].face_matches[0].face.bounding_box.width #=> Float
# resp.persons[0].face_matches[0].face.bounding_box.height #=> Float
# resp.persons[0].face_matches[0].face.bounding_box.left #=> Float
# resp.persons[0].face_matches[0].face.bounding_box.top #=> Float
# resp.persons[0].face_matches[0].face.image_id #=> String
# resp.persons[0].face_matches[0].face.external_image_id #=> String
# resp.persons[0].face_matches[0].face.confidence #=> Float
#
# @overload get_face_search(params = {})
# @param [Hash] params ({})
def get_face_search(params = {}, options = {})
req = build_request(:get_face_search, params)
req.send_request(options)
end
# Gets the label detection results of a Amazon Rekognition Video
# analysis started by StartLabelDetection.
#
# The label detection operation is started by a call to
# StartLabelDetection which returns a job identifier (`JobId`). When the
# label detection operation finishes, Amazon Rekognition publishes a
# completion status to the Amazon Simple Notification Service topic
# registered in the initial call to `StartlabelDetection`. To get the
# results of the label detection operation, first check that the status
# value published to the Amazon SNS topic is `SUCCEEDED`. If so, call
# GetLabelDetection and pass the job identifier (`JobId`) from the
# initial call to `StartLabelDetection`.
#
# `GetLabelDetection` returns an array of detected labels (`Labels`)
# sorted by the time the labels were detected. You can also sort by the
# label name by specifying `NAME` for the `SortBy` input parameter.
#
# The labels returned include the label name, the percentage confidence
# in the accuracy of the detected label, and the time the label was
# detected in the video.
#
# The returned labels also include bounding box information for common
# objects, a hierarchical taxonomy of detected labels, and the version
# of the label model used for detection.
#
# Use MaxResults parameter to limit the number of labels returned. If
# there are more results than specified in `MaxResults`, the value of
# `NextToken` in the operation response contains a pagination token for
# getting the next set of results. To get the next page of results, call
# `GetlabelDetection` and populate the `NextToken` request parameter
# with the token value returned from the previous call to
# `GetLabelDetection`.
#
# @option params [required, String] :job_id
# Job identifier for the label detection operation for which you want
# results returned. You get the job identifer from an initial call to
# `StartlabelDetection`.
#
# @option params [Integer] :max_results
# Maximum number of results to return per paginated call. The largest
# value you can specify is 1000. If you specify a value greater than
# 1000, a maximum of 1000 results is returned. The default value is
# 1000.
#
# @option params [String] :next_token
# If the previous response was incomplete (because there are more labels
# to retrieve), Amazon Rekognition Video returns a pagination token in
# the response. You can use this pagination token to retrieve the next
# set of labels.
#
# @option params [String] :sort_by
# Sort to use for elements in the `Labels` array. Use `TIMESTAMP` to
# sort array elements by the time labels are detected. Use `NAME` to
# alphabetically group elements for a label together. Within each label
# group, the array element are sorted by detection confidence. The
# default sort is by `TIMESTAMP`.
#
# @return [Types::GetLabelDetectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetLabelDetectionResponse#job_status #job_status} => String
# * {Types::GetLabelDetectionResponse#status_message #status_message} => String
# * {Types::GetLabelDetectionResponse#video_metadata #video_metadata} => Types::VideoMetadata
# * {Types::GetLabelDetectionResponse#next_token #next_token} => String
# * {Types::GetLabelDetectionResponse#labels #labels} => Array<Types::LabelDetection>
# * {Types::GetLabelDetectionResponse#label_model_version #label_model_version} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.get_label_detection({
# job_id: "JobId", # required
# max_results: 1,
# next_token: "PaginationToken",
# sort_by: "NAME", # accepts NAME, TIMESTAMP
# })
#
# @example Response structure
#
# resp.job_status #=> String, one of "IN_PROGRESS", "SUCCEEDED", "FAILED"
# resp.status_message #=> String
# resp.video_metadata.codec #=> String
# resp.video_metadata.duration_millis #=> Integer
# resp.video_metadata.format #=> String
# resp.video_metadata.frame_rate #=> Float
# resp.video_metadata.frame_height #=> Integer
# resp.video_metadata.frame_width #=> Integer
# resp.next_token #=> String
# resp.labels #=> Array
# resp.labels[0].timestamp #=> Integer
# resp.labels[0].label.name #=> String
# resp.labels[0].label.confidence #=> Float
# resp.labels[0].label.instances #=> Array
# resp.labels[0].label.instances[0].bounding_box.width #=> Float
# resp.labels[0].label.instances[0].bounding_box.height #=> Float
# resp.labels[0].label.instances[0].bounding_box.left #=> Float
# resp.labels[0].label.instances[0].bounding_box.top #=> Float
# resp.labels[0].label.instances[0].confidence #=> Float
# resp.labels[0].label.parents #=> Array
# resp.labels[0].label.parents[0].name #=> String
# resp.label_model_version #=> String
#
# @overload get_label_detection(params = {})
# @param [Hash] params ({})
def get_label_detection(params = {}, options = {})
req = build_request(:get_label_detection, params)
req.send_request(options)
end
# Gets the path tracking results of a Amazon Rekognition Video analysis
# started by StartPersonTracking.
#
# The person path tracking operation is started by a call to
# `StartPersonTracking` which returns a job identifier (`JobId`). When
# the operation finishes, Amazon Rekognition Video publishes a
# completion status to the Amazon Simple Notification Service topic
# registered in the initial call to `StartPersonTracking`.
#
# To get the results of the person path tracking operation, first check
# that the status value published to the Amazon SNS topic is
# `SUCCEEDED`. If so, call GetPersonTracking and pass the job identifier
# (`JobId`) from the initial call to `StartPersonTracking`.
#
# `GetPersonTracking` returns an array, `Persons`, of tracked persons
# and the time(s) their paths were tracked in the video.
#
# <note markdown="1"> `GetPersonTracking` only returns the default facial attributes
# (`BoundingBox`, `Confidence`, `Landmarks`, `Pose`, and `Quality`). The
# other facial attributes listed in the `Face` object of the following
# response syntax are not returned.
#
# For more information, see FaceDetail in the Amazon Rekognition
# Developer Guide.
#
# </note>
#
# By default, the array is sorted by the time(s) a person's path is
# tracked in the video. You can sort by tracked persons by specifying
# `INDEX` for the `SortBy` input parameter.
#
# Use the `MaxResults` parameter to limit the number of items returned.
# If there are more results than specified in `MaxResults`, the value of
# `NextToken` in the operation response contains a pagination token for
# getting the next set of results. To get the next page of results, call
# `GetPersonTracking` and populate the `NextToken` request parameter
# with the token value returned from the previous call to
# `GetPersonTracking`.
#
# @option params [required, String] :job_id
# The identifier for a job that tracks persons in a video. You get the
# `JobId` from a call to `StartPersonTracking`.
#
# @option params [Integer] :max_results
# Maximum number of results to return per paginated call. The largest
# value you can specify is 1000. If you specify a value greater than
# 1000, a maximum of 1000 results is returned. The default value is
# 1000.
#
# @option params [String] :next_token
# If the previous response was incomplete (because there are more
# persons to retrieve), Amazon Rekognition Video returns a pagination
# token in the response. You can use this pagination token to retrieve
# the next set of persons.
#
# @option params [String] :sort_by
# Sort to use for elements in the `Persons` array. Use `TIMESTAMP` to
# sort array elements by the time persons are detected. Use `INDEX` to
# sort by the tracked persons. If you sort by `INDEX`, the array
# elements for each person are sorted by detection confidence. The
# default sort is by `TIMESTAMP`.
#
# @return [Types::GetPersonTrackingResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetPersonTrackingResponse#job_status #job_status} => String
# * {Types::GetPersonTrackingResponse#status_message #status_message} => String
# * {Types::GetPersonTrackingResponse#video_metadata #video_metadata} => Types::VideoMetadata
# * {Types::GetPersonTrackingResponse#next_token #next_token} => String
# * {Types::GetPersonTrackingResponse#persons #persons} => Array<Types::PersonDetection>
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.get_person_tracking({
# job_id: "JobId", # required
# max_results: 1,
# next_token: "PaginationToken",
# sort_by: "INDEX", # accepts INDEX, TIMESTAMP
# })
#
# @example Response structure
#
# resp.job_status #=> String, one of "IN_PROGRESS", "SUCCEEDED", "FAILED"
# resp.status_message #=> String
# resp.video_metadata.codec #=> String
# resp.video_metadata.duration_millis #=> Integer
# resp.video_metadata.format #=> String
# resp.video_metadata.frame_rate #=> Float
# resp.video_metadata.frame_height #=> Integer
# resp.video_metadata.frame_width #=> Integer
# resp.next_token #=> String
# resp.persons #=> Array
# resp.persons[0].timestamp #=> Integer
# resp.persons[0].person.index #=> Integer
# resp.persons[0].person.bounding_box.width #=> Float
# resp.persons[0].person.bounding_box.height #=> Float
# resp.persons[0].person.bounding_box.left #=> Float
# resp.persons[0].person.bounding_box.top #=> Float
# resp.persons[0].person.face.bounding_box.width #=> Float
# resp.persons[0].person.face.bounding_box.height #=> Float
# resp.persons[0].person.face.bounding_box.left #=> Float
# resp.persons[0].person.face.bounding_box.top #=> Float
# resp.persons[0].person.face.age_range.low #=> Integer
# resp.persons[0].person.face.age_range.high #=> Integer
# resp.persons[0].person.face.smile.value #=> Boolean
# resp.persons[0].person.face.smile.confidence #=> Float
# resp.persons[0].person.face.eyeglasses.value #=> Boolean
# resp.persons[0].person.face.eyeglasses.confidence #=> Float
# resp.persons[0].person.face.sunglasses.value #=> Boolean
# resp.persons[0].person.face.sunglasses.confidence #=> Float
# resp.persons[0].person.face.gender.value #=> String, one of "Male", "Female"
# resp.persons[0].person.face.gender.confidence #=> Float
# resp.persons[0].person.face.beard.value #=> Boolean
# resp.persons[0].person.face.beard.confidence #=> Float
# resp.persons[0].person.face.mustache.value #=> Boolean
# resp.persons[0].person.face.mustache.confidence #=> Float
# resp.persons[0].person.face.eyes_open.value #=> Boolean
# resp.persons[0].person.face.eyes_open.confidence #=> Float
# resp.persons[0].person.face.mouth_open.value #=> Boolean
# resp.persons[0].person.face.mouth_open.confidence #=> Float
# resp.persons[0].person.face.emotions #=> Array
# resp.persons[0].person.face.emotions[0].type #=> String, one of "HAPPY", "SAD", "ANGRY", "CONFUSED", "DISGUSTED", "SURPRISED", "CALM", "UNKNOWN", "FEAR"
# resp.persons[0].person.face.emotions[0].confidence #=> Float
# resp.persons[0].person.face.landmarks #=> Array
# resp.persons[0].person.face.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil", "upperJawlineLeft", "midJawlineLeft", "chinBottom", "midJawlineRight", "upperJawlineRight"
# resp.persons[0].person.face.landmarks[0].x #=> Float
# resp.persons[0].person.face.landmarks[0].y #=> Float
# resp.persons[0].person.face.pose.roll #=> Float
# resp.persons[0].person.face.pose.yaw #=> Float
# resp.persons[0].person.face.pose.pitch #=> Float
# resp.persons[0].person.face.quality.brightness #=> Float
# resp.persons[0].person.face.quality.sharpness #=> Float
# resp.persons[0].person.face.confidence #=> Float
#
# @overload get_person_tracking(params = {})
# @param [Hash] params ({})
def get_person_tracking(params = {}, options = {})
req = build_request(:get_person_tracking, params)
req.send_request(options)
end
# Gets the segment detection results of a Amazon Rekognition Video
# analysis started by StartSegmentDetection.
#
# Segment detection with Amazon Rekognition Video is an asynchronous
# operation. You start segment detection by calling
# StartSegmentDetection which returns a job identifier (`JobId`). When
# the segment detection operation finishes, Amazon Rekognition publishes
# a completion status to the Amazon Simple Notification Service topic
# registered in the initial call to `StartSegmentDetection`. To get the
# results of the segment detection operation, first check that the
# status value published to the Amazon SNS topic is `SUCCEEDED`. if so,
# call `GetSegmentDetection` and pass the job identifier (`JobId`) from
# the initial call of `StartSegmentDetection`.
#
# `GetSegmentDetection` returns detected segments in an array
# (`Segments`) of SegmentDetection objects. `Segments` is sorted by the
# segment types specified in the `SegmentTypes` input parameter of
# `StartSegmentDetection`. Each element of the array includes the
# detected segment, the precentage confidence in the acuracy of the
# detected segment, the type of the segment, and the frame in which the
# segment was detected.
#
# Use `SelectedSegmentTypes` to find out the type of segment detection
# requested in the call to `StartSegmentDetection`.
#
# Use the `MaxResults` parameter to limit the number of segment
# detections returned. If there are more results than specified in
# `MaxResults`, the value of `NextToken` in the operation response
# contains a pagination token for getting the next set of results. To
# get the next page of results, call `GetSegmentDetection` and populate
# the `NextToken` request parameter with the token value returned from
# the previous call to `GetSegmentDetection`.
#
# For more information, see Detecting Video Segments in Stored Video in
# the Amazon Rekognition Developer Guide.
#
# @option params [required, String] :job_id
# Job identifier for the text detection operation for which you want
# results returned. You get the job identifer from an initial call to
# `StartSegmentDetection`.
#
# @option params [Integer] :max_results
# Maximum number of results to return per paginated call. The largest
# value you can specify is 1000.
#
# @option params [String] :next_token
# If the response is truncated, Amazon Rekognition Video returns this
# token that you can use in the subsequent request to retrieve the next
# set of text.
#
# @return [Types::GetSegmentDetectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetSegmentDetectionResponse#job_status #job_status} => String
# * {Types::GetSegmentDetectionResponse#status_message #status_message} => String
# * {Types::GetSegmentDetectionResponse#video_metadata #video_metadata} => Array<Types::VideoMetadata>
# * {Types::GetSegmentDetectionResponse#audio_metadata #audio_metadata} => Array<Types::AudioMetadata>
# * {Types::GetSegmentDetectionResponse#next_token #next_token} => String
# * {Types::GetSegmentDetectionResponse#segments #segments} => Array<Types::SegmentDetection>
# * {Types::GetSegmentDetectionResponse#selected_segment_types #selected_segment_types} => Array<Types::SegmentTypeInfo>
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.get_segment_detection({
# job_id: "JobId", # required
# max_results: 1,
# next_token: "PaginationToken",
# })
#
# @example Response structure
#
# resp.job_status #=> String, one of "IN_PROGRESS", "SUCCEEDED", "FAILED"
# resp.status_message #=> String
# resp.video_metadata #=> Array
# resp.video_metadata[0].codec #=> String
# resp.video_metadata[0].duration_millis #=> Integer
# resp.video_metadata[0].format #=> String
# resp.video_metadata[0].frame_rate #=> Float
# resp.video_metadata[0].frame_height #=> Integer
# resp.video_metadata[0].frame_width #=> Integer
# resp.audio_metadata #=> Array
# resp.audio_metadata[0].codec #=> String
# resp.audio_metadata[0].duration_millis #=> Integer
# resp.audio_metadata[0].sample_rate #=> Integer
# resp.audio_metadata[0].number_of_channels #=> Integer
# resp.next_token #=> String
# resp.segments #=> Array
# resp.segments[0].type #=> String, one of "TECHNICAL_CUE", "SHOT"
# resp.segments[0].start_timestamp_millis #=> Integer
# resp.segments[0].end_timestamp_millis #=> Integer
# resp.segments[0].duration_millis #=> Integer
# resp.segments[0].start_timecode_smpte #=> String
# resp.segments[0].end_timecode_smpte #=> String
# resp.segments[0].duration_smpte #=> String
# resp.segments[0].technical_cue_segment.type #=> String, one of "ColorBars", "EndCredits", "BlackFrames"
# resp.segments[0].technical_cue_segment.confidence #=> Float
# resp.segments[0].shot_segment.index #=> Integer
# resp.segments[0].shot_segment.confidence #=> Float
# resp.selected_segment_types #=> Array
# resp.selected_segment_types[0].type #=> String, one of "TECHNICAL_CUE", "SHOT"
# resp.selected_segment_types[0].model_version #=> String
#
# @overload get_segment_detection(params = {})
# @param [Hash] params ({})
def get_segment_detection(params = {}, options = {})
req = build_request(:get_segment_detection, params)
req.send_request(options)
end
# Gets the text detection results of a Amazon Rekognition Video analysis
# started by StartTextDetection.
#
# Text detection with Amazon Rekognition Video is an asynchronous
# operation. You start text detection by calling StartTextDetection
# which returns a job identifier (`JobId`) When the text detection
# operation finishes, Amazon Rekognition publishes a completion status
# to the Amazon Simple Notification Service topic registered in the
# initial call to `StartTextDetection`. To get the results of the text
# detection operation, first check that the status value published to
# the Amazon SNS topic is `SUCCEEDED`. if so, call `GetTextDetection`
# and pass the job identifier (`JobId`) from the initial call of
# `StartLabelDetection`.
#
# `GetTextDetection` returns an array of detected text
# (`TextDetections`) sorted by the time the text was detected, up to 50
# words per frame of video.
#
# Each element of the array includes the detected text, the precentage
# confidence in the acuracy of the detected text, the time the text was
# detected, bounding box information for where the text was located, and
# unique identifiers for words and their lines.
#
# Use MaxResults parameter to limit the number of text detections
# returned. If there are more results than specified in `MaxResults`,
# the value of `NextToken` in the operation response contains a
# pagination token for getting the next set of results. To get the next
# page of results, call `GetTextDetection` and populate the `NextToken`
# request parameter with the token value returned from the previous call
# to `GetTextDetection`.
#
# @option params [required, String] :job_id
# Job identifier for the text detection operation for which you want
# results returned. You get the job identifer from an initial call to
# `StartTextDetection`.
#
# @option params [Integer] :max_results
# Maximum number of results to return per paginated call. The largest
# value you can specify is 1000.
#
# @option params [String] :next_token
# If the previous response was incomplete (because there are more labels
# to retrieve), Amazon Rekognition Video returns a pagination token in
# the response. You can use this pagination token to retrieve the next
# set of text.
#
# @return [Types::GetTextDetectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetTextDetectionResponse#job_status #job_status} => String
# * {Types::GetTextDetectionResponse#status_message #status_message} => String
# * {Types::GetTextDetectionResponse#video_metadata #video_metadata} => Types::VideoMetadata
# * {Types::GetTextDetectionResponse#text_detections #text_detections} => Array<Types::TextDetectionResult>
# * {Types::GetTextDetectionResponse#next_token #next_token} => String
# * {Types::GetTextDetectionResponse#text_model_version #text_model_version} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.get_text_detection({
# job_id: "JobId", # required
# max_results: 1,
# next_token: "PaginationToken",
# })
#
# @example Response structure
#
# resp.job_status #=> String, one of "IN_PROGRESS", "SUCCEEDED", "FAILED"
# resp.status_message #=> String
# resp.video_metadata.codec #=> String
# resp.video_metadata.duration_millis #=> Integer
# resp.video_metadata.format #=> String
# resp.video_metadata.frame_rate #=> Float
# resp.video_metadata.frame_height #=> Integer
# resp.video_metadata.frame_width #=> Integer
# resp.text_detections #=> Array
# resp.text_detections[0].timestamp #=> Integer
# resp.text_detections[0].text_detection.detected_text #=> String
# resp.text_detections[0].text_detection.type #=> String, one of "LINE", "WORD"
# resp.text_detections[0].text_detection.id #=> Integer
# resp.text_detections[0].text_detection.parent_id #=> Integer
# resp.text_detections[0].text_detection.confidence #=> Float
# resp.text_detections[0].text_detection.geometry.bounding_box.width #=> Float
# resp.text_detections[0].text_detection.geometry.bounding_box.height #=> Float
# resp.text_detections[0].text_detection.geometry.bounding_box.left #=> Float
# resp.text_detections[0].text_detection.geometry.bounding_box.top #=> Float
# resp.text_detections[0].text_detection.geometry.polygon #=> Array
# resp.text_detections[0].text_detection.geometry.polygon[0].x #=> Float
# resp.text_detections[0].text_detection.geometry.polygon[0].y #=> Float
# resp.next_token #=> String
# resp.text_model_version #=> String
#
# @overload get_text_detection(params = {})
# @param [Hash] params ({})
def get_text_detection(params = {}, options = {})
req = build_request(:get_text_detection, params)
req.send_request(options)
end
# Detects faces in the input image and adds them to the specified
# collection.
#
# Amazon Rekognition doesn't save the actual faces that are detected.
# Instead, the underlying detection algorithm first detects the faces in
# the input image. For each face, the algorithm extracts facial features
# into a feature vector, and stores it in the backend database. Amazon
# Rekognition uses feature vectors when it performs face match and
# search operations using the SearchFaces and SearchFacesByImage
# operations.
#
# For more information, see Adding Faces to a Collection in the Amazon
# Rekognition Developer Guide.
#
# To get the number of faces in a collection, call DescribeCollection.
#
# If you're using version 1.0 of the face detection model, `IndexFaces`
# indexes the 15 largest faces in the input image. Later versions of the
# face detection model index the 100 largest faces in the input image.
#
# If you're using version 4 or later of the face model, image
# orientation information is not returned in the `OrientationCorrection`
# field.
#
# To determine which version of the model you're using, call
# DescribeCollection and supply the collection ID. You can also get the
# model version from the value of `FaceModelVersion` in the response
# from `IndexFaces`
#
# For more information, see Model Versioning in the Amazon Rekognition
# Developer Guide.
#
# If you provide the optional `ExternalImageId` for the input image you
# provided, Amazon Rekognition associates this ID with all faces that it
# detects. When you call the ListFaces operation, the response returns
# the external ID. You can use this external image ID to create a
# client-side index to associate the faces with each image. You can then
# use the index to find all faces in an image.
#
# You can specify the maximum number of faces to index with the
# `MaxFaces` input parameter. This is useful when you want to index the
# largest faces in an image and don't want to index smaller faces, such
# as those belonging to people standing in the background.
#
# The `QualityFilter` input parameter allows you to filter out detected
# faces that don’t meet a required quality bar. The quality bar is based
# on a variety of common use cases. By default, `IndexFaces` chooses the
# quality bar that's used to filter faces. You can also explicitly
# choose the quality bar. Use `QualityFilter`, to set the quality bar by
# specifying `LOW`, `MEDIUM`, or `HIGH`. If you do not want to filter
# detected faces, specify `NONE`.
#
# <note markdown="1"> To use quality filtering, you need a collection associated with
# version 3 of the face model or higher. To get the version of the face
# model associated with a collection, call DescribeCollection.
#
# </note>
#
# Information about faces detected in an image, but not indexed, is
# returned in an array of UnindexedFace objects, `UnindexedFaces`. Faces
# aren't indexed for reasons such as:
#
# * The number of faces detected exceeds the value of the `MaxFaces`
# request parameter.
#
# * The face is too small compared to the image dimensions.
#
# * The face is too blurry.
#
# * The image is too dark.
#
# * The face has an extreme pose.
#
# * The face doesn’t have enough detail to be suitable for face search.
#
# In response, the `IndexFaces` operation returns an array of metadata
# for all detected faces, `FaceRecords`. This includes:
#
# * The bounding box, `BoundingBox`, of the detected face.
#
# * A confidence value, `Confidence`, which indicates the confidence
# that the bounding box contains a face.
#
# * A face ID, `FaceId`, assigned by the service for each face that's
# detected and stored.
#
# * An image ID, `ImageId`, assigned by the service for the input image.
#
# If you request all facial attributes (by using the
# `detectionAttributes` parameter), Amazon Rekognition returns detailed
# facial attributes, such as facial landmarks (for example, location of
# eye and mouth) and other facial attributes. If you provide the same
# image, specify the same collection, and use the same external ID in
# the `IndexFaces` operation, Amazon Rekognition doesn't save duplicate
# face metadata.
#
#
#
# The input image is passed either as base64-encoded image bytes, or as
# a reference to an image in an Amazon S3 bucket. If you use the AWS CLI
# to call Amazon Rekognition operations, passing image bytes isn't
# supported. The image must be formatted as a PNG or JPEG file.
#
# This operation requires permissions to perform the
# `rekognition:IndexFaces` action.
#
# @option params [required, String] :collection_id
# The ID of an existing collection to which you want to add the faces
# that are detected in the input images.
#
# @option params [required, Types::Image] :image
# The input image as base64-encoded bytes or an S3 object. If you use
# the AWS CLI to call Amazon Rekognition operations, passing
# base64-encoded image bytes isn't supported.
#
# If you are using an AWS SDK to call Amazon Rekognition, you might not
# need to base64-encode image bytes passed using the `Bytes` field. For
# more information, see Images in the Amazon Rekognition developer
# guide.
#
# @option params [String] :external_image_id
# The ID you want to assign to all the faces detected in the image.
#
# @option params [Array<String>] :detection_attributes
# An array of facial attributes that you want to be returned. This can
# be the default list of attributes or all attributes. If you don't
# specify a value for `Attributes` or if you specify `["DEFAULT"]`, the
# API returns the following subset of facial attributes: `BoundingBox`,
# `Confidence`, `Pose`, `Quality`, and `Landmarks`. If you provide
# `["ALL"]`, all facial attributes are returned, but the operation takes
# longer to complete.
#
# If you provide both, `["ALL", "DEFAULT"]`, the service uses a logical
# AND operator to determine which attributes to return (in this case,
# all attributes).
#
# @option params [Integer] :max_faces
# The maximum number of faces to index. The value of `MaxFaces` must be
# greater than or equal to 1. `IndexFaces` returns no more than 100
# detected faces in an image, even if you specify a larger value for
# `MaxFaces`.
#
# If `IndexFaces` detects more faces than the value of `MaxFaces`, the
# faces with the lowest quality are filtered out first. If there are
# still more faces than the value of `MaxFaces`, the faces with the
# smallest bounding boxes are filtered out (up to the number that's
# needed to satisfy the value of `MaxFaces`). Information about the
# unindexed faces is available in the `UnindexedFaces` array.
#
# The faces that are returned by `IndexFaces` are sorted by the largest
# face bounding box size to the smallest size, in descending order.
#
# `MaxFaces` can be used with a collection associated with any version
# of the face model.
#
# @option params [String] :quality_filter
# A filter that specifies a quality bar for how much filtering is done
# to identify faces. Filtered faces aren't indexed. If you specify
# `AUTO`, Amazon Rekognition chooses the quality bar. If you specify
# `LOW`, `MEDIUM`, or `HIGH`, filtering removes all faces that don’t
# meet the chosen quality bar. The default value is `AUTO`. The quality
# bar is based on a variety of common use cases. Low-quality detections
# can occur for a number of reasons. Some examples are an object that's
# misidentified as a face, a face that's too blurry, or a face with a
# pose that's too extreme to use. If you specify `NONE`, no filtering
# is performed.
#
# To use quality filtering, the collection you are using must be
# associated with version 3 of the face model or higher.
#
# @return [Types::IndexFacesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::IndexFacesResponse#face_records #face_records} => Array<Types::FaceRecord>
# * {Types::IndexFacesResponse#orientation_correction #orientation_correction} => String
# * {Types::IndexFacesResponse#face_model_version #face_model_version} => String
# * {Types::IndexFacesResponse#unindexed_faces #unindexed_faces} => Array<Types::UnindexedFace>
#
#
# @example Example: To add a face to a collection
#
# # This operation detects faces in an image and adds them to the specified Rekognition collection.
#
# resp = client.index_faces({
# collection_id: "myphotos",
# detection_attributes: [
# ],
# external_image_id: "myphotoid",
# image: {
# s3_object: {
# bucket: "mybucket",
# name: "myphoto",
# },
# },
# })
#
# resp.to_h outputs the following:
# {
# face_records: [
# {
# face: {
# bounding_box: {
# height: 0.33481481671333313,
# left: 0.31888890266418457,
# top: 0.4933333396911621,
# width: 0.25,
# },
# confidence: 99.9991226196289,
# face_id: "ff43d742-0c13-5d16-a3e8-03d3f58e980b",
# image_id: "465f4e93-763e-51d0-b030-b9667a2d94b1",
# },
# face_detail: {
# bounding_box: {
# height: 0.33481481671333313,
# left: 0.31888890266418457,
# top: 0.4933333396911621,
# width: 0.25,
# },
# confidence: 99.9991226196289,
# landmarks: [
# {
# type: "eyeLeft",
# x: 0.3976764678955078,
# y: 0.6248345971107483,
# },
# {
# type: "eyeRight",
# x: 0.4810936450958252,
# y: 0.6317117214202881,
# },
# {
# type: "noseLeft",
# x: 0.41986238956451416,
# y: 0.7111940383911133,
# },
# {
# type: "mouthDown",
# x: 0.40525302290916443,
# y: 0.7497701048851013,
# },
# {
# type: "mouthUp",
# x: 0.4753248989582062,
# y: 0.7558549642562866,
# },
# ],
# pose: {
# pitch: -9.713645935058594,
# roll: 4.707281112670898,
# yaw: -24.438663482666016,
# },
# quality: {
# brightness: 29.23358917236328,
# sharpness: 80,
# },
# },
# },
# {
# face: {
# bounding_box: {
# height: 0.32592591643333435,
# left: 0.5144444704055786,
# top: 0.15111111104488373,
# width: 0.24444444477558136,
# },
# confidence: 99.99950408935547,
# face_id: "8be04dba-4e58-520d-850e-9eae4af70eb2",
# image_id: "465f4e93-763e-51d0-b030-b9667a2d94b1",
# },
# face_detail: {
# bounding_box: {
# height: 0.32592591643333435,
# left: 0.5144444704055786,
# top: 0.15111111104488373,
# width: 0.24444444477558136,
# },
# confidence: 99.99950408935547,
# landmarks: [
# {
# type: "eyeLeft",
# x: 0.6006892323493958,
# y: 0.290842205286026,
# },
# {
# type: "eyeRight",
# x: 0.6808141469955444,
# y: 0.29609042406082153,
# },
# {
# type: "noseLeft",
# x: 0.6395332217216492,
# y: 0.3522595763206482,
# },
# {
# type: "mouthDown",
# x: 0.5892083048820496,
# y: 0.38689887523651123,
# },
# {
# type: "mouthUp",
# x: 0.674560010433197,
# y: 0.394125759601593,
# },
# ],
# pose: {
# pitch: -4.683138370513916,
# roll: 2.1029529571533203,
# yaw: 6.716655254364014,
# },
# quality: {
# brightness: 34.951698303222656,
# sharpness: 160,
# },
# },
# },
# ],
# orientation_correction: "ROTATE_0",
# }
#
# @example Request syntax with placeholder values
#
# resp = client.index_faces({
# collection_id: "CollectionId", # required
# image: { # required
# bytes: "data",
# s3_object: {
# bucket: "S3Bucket",
# name: "S3ObjectName",
# version: "S3ObjectVersion",
# },
# },
# external_image_id: "ExternalImageId",
# detection_attributes: ["DEFAULT"], # accepts DEFAULT, ALL
# max_faces: 1,
# quality_filter: "NONE", # accepts NONE, AUTO, LOW, MEDIUM, HIGH
# })
#
# @example Response structure
#
# resp.face_records #=> Array
# resp.face_records[0].face.face_id #=> String
# resp.face_records[0].face.bounding_box.width #=> Float
# resp.face_records[0].face.bounding_box.height #=> Float
# resp.face_records[0].face.bounding_box.left #=> Float
# resp.face_records[0].face.bounding_box.top #=> Float
# resp.face_records[0].face.image_id #=> String
# resp.face_records[0].face.external_image_id #=> String
# resp.face_records[0].face.confidence #=> Float
# resp.face_records[0].face_detail.bounding_box.width #=> Float
# resp.face_records[0].face_detail.bounding_box.height #=> Float
# resp.face_records[0].face_detail.bounding_box.left #=> Float
# resp.face_records[0].face_detail.bounding_box.top #=> Float
# resp.face_records[0].face_detail.age_range.low #=> Integer
# resp.face_records[0].face_detail.age_range.high #=> Integer
# resp.face_records[0].face_detail.smile.value #=> Boolean
# resp.face_records[0].face_detail.smile.confidence #=> Float
# resp.face_records[0].face_detail.eyeglasses.value #=> Boolean
# resp.face_records[0].face_detail.eyeglasses.confidence #=> Float
# resp.face_records[0].face_detail.sunglasses.value #=> Boolean
# resp.face_records[0].face_detail.sunglasses.confidence #=> Float
# resp.face_records[0].face_detail.gender.value #=> String, one of "Male", "Female"
# resp.face_records[0].face_detail.gender.confidence #=> Float
# resp.face_records[0].face_detail.beard.value #=> Boolean
# resp.face_records[0].face_detail.beard.confidence #=> Float
# resp.face_records[0].face_detail.mustache.value #=> Boolean
# resp.face_records[0].face_detail.mustache.confidence #=> Float
# resp.face_records[0].face_detail.eyes_open.value #=> Boolean
# resp.face_records[0].face_detail.eyes_open.confidence #=> Float
# resp.face_records[0].face_detail.mouth_open.value #=> Boolean
# resp.face_records[0].face_detail.mouth_open.confidence #=> Float
# resp.face_records[0].face_detail.emotions #=> Array
# resp.face_records[0].face_detail.emotions[0].type #=> String, one of "HAPPY", "SAD", "ANGRY", "CONFUSED", "DISGUSTED", "SURPRISED", "CALM", "UNKNOWN", "FEAR"
# resp.face_records[0].face_detail.emotions[0].confidence #=> Float
# resp.face_records[0].face_detail.landmarks #=> Array
# resp.face_records[0].face_detail.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil", "upperJawlineLeft", "midJawlineLeft", "chinBottom", "midJawlineRight", "upperJawlineRight"
# resp.face_records[0].face_detail.landmarks[0].x #=> Float
# resp.face_records[0].face_detail.landmarks[0].y #=> Float
# resp.face_records[0].face_detail.pose.roll #=> Float
# resp.face_records[0].face_detail.pose.yaw #=> Float
# resp.face_records[0].face_detail.pose.pitch #=> Float
# resp.face_records[0].face_detail.quality.brightness #=> Float
# resp.face_records[0].face_detail.quality.sharpness #=> Float
# resp.face_records[0].face_detail.confidence #=> Float
# resp.orientation_correction #=> String, one of "ROTATE_0", "ROTATE_90", "ROTATE_180", "ROTATE_270"
# resp.face_model_version #=> String
# resp.unindexed_faces #=> Array
# resp.unindexed_faces[0].reasons #=> Array
# resp.unindexed_faces[0].reasons[0] #=> String, one of "EXCEEDS_MAX_FACES", "EXTREME_POSE", "LOW_BRIGHTNESS", "LOW_SHARPNESS", "LOW_CONFIDENCE", "SMALL_BOUNDING_BOX", "LOW_FACE_QUALITY"
# resp.unindexed_faces[0].face_detail.bounding_box.width #=> Float
# resp.unindexed_faces[0].face_detail.bounding_box.height #=> Float
# resp.unindexed_faces[0].face_detail.bounding_box.left #=> Float
# resp.unindexed_faces[0].face_detail.bounding_box.top #=> Float
# resp.unindexed_faces[0].face_detail.age_range.low #=> Integer
# resp.unindexed_faces[0].face_detail.age_range.high #=> Integer
# resp.unindexed_faces[0].face_detail.smile.value #=> Boolean
# resp.unindexed_faces[0].face_detail.smile.confidence #=> Float
# resp.unindexed_faces[0].face_detail.eyeglasses.value #=> Boolean
# resp.unindexed_faces[0].face_detail.eyeglasses.confidence #=> Float
# resp.unindexed_faces[0].face_detail.sunglasses.value #=> Boolean
# resp.unindexed_faces[0].face_detail.sunglasses.confidence #=> Float
# resp.unindexed_faces[0].face_detail.gender.value #=> String, one of "Male", "Female"
# resp.unindexed_faces[0].face_detail.gender.confidence #=> Float
# resp.unindexed_faces[0].face_detail.beard.value #=> Boolean
# resp.unindexed_faces[0].face_detail.beard.confidence #=> Float
# resp.unindexed_faces[0].face_detail.mustache.value #=> Boolean
# resp.unindexed_faces[0].face_detail.mustache.confidence #=> Float
# resp.unindexed_faces[0].face_detail.eyes_open.value #=> Boolean
# resp.unindexed_faces[0].face_detail.eyes_open.confidence #=> Float
# resp.unindexed_faces[0].face_detail.mouth_open.value #=> Boolean
# resp.unindexed_faces[0].face_detail.mouth_open.confidence #=> Float
# resp.unindexed_faces[0].face_detail.emotions #=> Array
# resp.unindexed_faces[0].face_detail.emotions[0].type #=> String, one of "HAPPY", "SAD", "ANGRY", "CONFUSED", "DISGUSTED", "SURPRISED", "CALM", "UNKNOWN", "FEAR"
# resp.unindexed_faces[0].face_detail.emotions[0].confidence #=> Float
# resp.unindexed_faces[0].face_detail.landmarks #=> Array
# resp.unindexed_faces[0].face_detail.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil", "upperJawlineLeft", "midJawlineLeft", "chinBottom", "midJawlineRight", "upperJawlineRight"
# resp.unindexed_faces[0].face_detail.landmarks[0].x #=> Float
# resp.unindexed_faces[0].face_detail.landmarks[0].y #=> Float
# resp.unindexed_faces[0].face_detail.pose.roll #=> Float
# resp.unindexed_faces[0].face_detail.pose.yaw #=> Float
# resp.unindexed_faces[0].face_detail.pose.pitch #=> Float
# resp.unindexed_faces[0].face_detail.quality.brightness #=> Float
# resp.unindexed_faces[0].face_detail.quality.sharpness #=> Float
# resp.unindexed_faces[0].face_detail.confidence #=> Float
#
# @overload index_faces(params = {})
# @param [Hash] params ({})
def index_faces(params = {}, options = {})
req = build_request(:index_faces, params)
req.send_request(options)
end
# Returns list of collection IDs in your account. If the result is
# truncated, the response also provides a `NextToken` that you can use
# in the subsequent request to fetch the next set of collection IDs.
#
# For an example, see Listing Collections in the Amazon Rekognition
# Developer Guide.
#
# This operation requires permissions to perform the
# `rekognition:ListCollections` action.
#
# @option params [String] :next_token
# Pagination token from the previous response.
#
# @option params [Integer] :max_results
# Maximum number of collection IDs to return.
#
# @return [Types::ListCollectionsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListCollectionsResponse#collection_ids #collection_ids} => Array<String>
# * {Types::ListCollectionsResponse#next_token #next_token} => String
# * {Types::ListCollectionsResponse#face_model_versions #face_model_versions} => Array<String>
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
#
# @example Example: To list the collections
#
# # This operation returns a list of Rekognition collections.
#
# resp = client.list_collections({
# })
#
# resp.to_h outputs the following:
# {
# collection_ids: [
# "myphotos",
# ],
# }
#
# @example Request syntax with placeholder values
#
# resp = client.list_collections({
# next_token: "PaginationToken",
# max_results: 1,
# })
#
# @example Response structure
#
# resp.collection_ids #=> Array
# resp.collection_ids[0] #=> String
# resp.next_token #=> String
# resp.face_model_versions #=> Array
# resp.face_model_versions[0] #=> String
#
# @overload list_collections(params = {})
# @param [Hash] params ({})
def list_collections(params = {}, options = {})
req = build_request(:list_collections, params)
req.send_request(options)
end
# Returns metadata for faces in the specified collection. This metadata
# includes information such as the bounding box coordinates, the
# confidence (that the bounding box contains a face), and face ID. For
# an example, see Listing Faces in a Collection in the Amazon
# Rekognition Developer Guide.
#
# This operation requires permissions to perform the
# `rekognition:ListFaces` action.
#
# @option params [required, String] :collection_id
# ID of the collection from which to list the faces.
#
# @option params [String] :next_token
# If the previous response was incomplete (because there is more data to
# retrieve), Amazon Rekognition returns a pagination token in the
# response. You can use this pagination token to retrieve the next set
# of faces.
#
# @option params [Integer] :max_results
# Maximum number of faces to return.
#
# @return [Types::ListFacesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListFacesResponse#faces #faces} => Array<Types::Face>
# * {Types::ListFacesResponse#next_token #next_token} => String
# * {Types::ListFacesResponse#face_model_version #face_model_version} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
#
# @example Example: To list the faces in a collection
#
# # This operation lists the faces in a Rekognition collection.
#
# resp = client.list_faces({
# collection_id: "myphotos",
# max_results: 20,
# })
#
# resp.to_h outputs the following:
# {
# faces: [
# {
# bounding_box: {
# height: 0.18000000715255737,
# left: 0.5555559992790222,
# top: 0.336667001247406,
# width: 0.23999999463558197,
# },
# confidence: 100,
# face_id: "1c62e8b5-69a7-5b7d-b3cd-db4338a8a7e7",
# image_id: "147fdf82-7a71-52cf-819b-e786c7b9746e",
# },
# {
# bounding_box: {
# height: 0.16555599868297577,
# left: 0.30963000655174255,
# top: 0.7066670060157776,
# width: 0.22074100375175476,
# },
# confidence: 100,
# face_id: "29a75abe-397b-5101-ba4f-706783b2246c",
# image_id: "147fdf82-7a71-52cf-819b-e786c7b9746e",
# },
# {
# bounding_box: {
# height: 0.3234420120716095,
# left: 0.3233329951763153,
# top: 0.5,
# width: 0.24222199618816376,
# },
# confidence: 99.99829864501953,
# face_id: "38271d79-7bc2-5efb-b752-398a8d575b85",
# image_id: "d5631190-d039-54e4-b267-abd22c8647c5",
# },
# {
# bounding_box: {
# height: 0.03555560111999512,
# left: 0.37388700246810913,
# top: 0.2477779984474182,
# width: 0.04747769981622696,
# },
# confidence: 99.99210357666016,
# face_id: "3b01bef0-c883-5654-ba42-d5ad28b720b3",
# image_id: "812d9f04-86f9-54fc-9275-8d0dcbcb6784",
# },
# {
# bounding_box: {
# height: 0.05333330109715462,
# left: 0.2937690019607544,
# top: 0.35666701197624207,
# width: 0.07121659815311432,
# },
# confidence: 99.99919891357422,
# face_id: "4839a608-49d0-566c-8301-509d71b534d1",
# image_id: "812d9f04-86f9-54fc-9275-8d0dcbcb6784",
# },
# {
# bounding_box: {
# height: 0.3249259889125824,
# left: 0.5155559778213501,
# top: 0.1513350009918213,
# width: 0.24333299696445465,
# },
# confidence: 99.99949645996094,
# face_id: "70008e50-75e4-55d0-8e80-363fb73b3a14",
# image_id: "d5631190-d039-54e4-b267-abd22c8647c5",
# },
# {
# bounding_box: {
# height: 0.03777780011296272,
# left: 0.7002969980239868,
# top: 0.18777799606323242,
# width: 0.05044509842991829,
# },
# confidence: 99.92639923095703,
# face_id: "7f5f88ed-d684-5a88-b0df-01e4a521552b",
# image_id: "812d9f04-86f9-54fc-9275-8d0dcbcb6784",
# },
# {
# bounding_box: {
# height: 0.05555560067296028,
# left: 0.13946600258350372,
# top: 0.46333301067352295,
# width: 0.07270029932260513,
# },
# confidence: 99.99469757080078,
# face_id: "895b4e2c-81de-5902-a4bd-d1792bda00b2",
# image_id: "812d9f04-86f9-54fc-9275-8d0dcbcb6784",
# },
# {
# bounding_box: {
# height: 0.3259260058403015,
# left: 0.5144439935684204,
# top: 0.15111100673675537,
# width: 0.24444399774074554,
# },
# confidence: 99.99949645996094,
# face_id: "8be04dba-4e58-520d-850e-9eae4af70eb2",
# image_id: "465f4e93-763e-51d0-b030-b9667a2d94b1",
# },
# {
# bounding_box: {
# height: 0.18888899683952332,
# left: 0.3783380091190338,
# top: 0.2355560064315796,
# width: 0.25222599506378174,
# },
# confidence: 99.9999008178711,
# face_id: "908544ad-edc3-59df-8faf-6a87cc256cf5",
# image_id: "3c731605-d772-541a-a5e7-0375dbc68a07",
# },
# {
# bounding_box: {
# height: 0.33481499552726746,
# left: 0.31888899207115173,
# top: 0.49333301186561584,
# width: 0.25,
# },
# confidence: 99.99909973144531,
# face_id: "ff43d742-0c13-5d16-a3e8-03d3f58e980b",
# image_id: "465f4e93-763e-51d0-b030-b9667a2d94b1",
# },
# ],
# }
#
# @example Request syntax with placeholder values
#
# resp = client.list_faces({
# collection_id: "CollectionId", # required
# next_token: "PaginationToken",
# max_results: 1,
# })
#
# @example Response structure
#
# resp.faces #=> Array
# resp.faces[0].face_id #=> String
# resp.faces[0].bounding_box.width #=> Float
# resp.faces[0].bounding_box.height #=> Float
# resp.faces[0].bounding_box.left #=> Float
# resp.faces[0].bounding_box.top #=> Float
# resp.faces[0].image_id #=> String
# resp.faces[0].external_image_id #=> String
# resp.faces[0].confidence #=> Float
# resp.next_token #=> String
# resp.face_model_version #=> String
#
# @overload list_faces(params = {})
# @param [Hash] params ({})
def list_faces(params = {}, options = {})
req = build_request(:list_faces, params)
req.send_request(options)
end
# Gets a list of stream processors that you have created with
# CreateStreamProcessor.
#
# @option params [String] :next_token
# If the previous response was incomplete (because there are more stream
# processors to retrieve), Amazon Rekognition Video returns a pagination
# token in the response. You can use this pagination token to retrieve
# the next set of stream processors.
#
# @option params [Integer] :max_results
# Maximum number of stream processors you want Amazon Rekognition Video
# to return in the response. The default is 1000.
#
# @return [Types::ListStreamProcessorsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListStreamProcessorsResponse#next_token #next_token} => String
# * {Types::ListStreamProcessorsResponse#stream_processors #stream_processors} => Array<Types::StreamProcessor>
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.list_stream_processors({
# next_token: "PaginationToken",
# max_results: 1,
# })
#
# @example Response structure
#
# resp.next_token #=> String
# resp.stream_processors #=> Array
# resp.stream_processors[0].name #=> String
# resp.stream_processors[0].status #=> String, one of "STOPPED", "STARTING", "RUNNING", "FAILED", "STOPPING"
#
# @overload list_stream_processors(params = {})
# @param [Hash] params ({})
def list_stream_processors(params = {}, options = {})
req = build_request(:list_stream_processors, params)
req.send_request(options)
end
# Returns an array of celebrities recognized in the input image. For
# more information, see Recognizing Celebrities in the Amazon
# Rekognition Developer Guide.
#
# `RecognizeCelebrities` returns the 64 largest faces in the image. It
# lists recognized celebrities in the `CelebrityFaces` array and
# unrecognized faces in the `UnrecognizedFaces` array.
# `RecognizeCelebrities` doesn't return celebrities whose faces aren't
# among the largest 64 faces in the image.
#
# For each celebrity recognized, `RecognizeCelebrities` returns a
# `Celebrity` object. The `Celebrity` object contains the celebrity
# name, ID, URL links to additional information, match confidence, and a
# `ComparedFace` object that you can use to locate the celebrity's face
# on the image.
#
# Amazon Rekognition doesn't retain information about which images a
# celebrity has been recognized in. Your application must store this
# information and use the `Celebrity` ID property as a unique identifier
# for the celebrity. If you don't store the celebrity name or
# additional information URLs returned by `RecognizeCelebrities`, you
# will need the ID to identify the celebrity in a call to the
# GetCelebrityInfo operation.
#
# You pass the input image either as base64-encoded image bytes or as a
# reference to an image in an Amazon S3 bucket. If you use the AWS CLI
# to call Amazon Rekognition operations, passing image bytes is not
# supported. The image must be either a PNG or JPEG formatted file.
#
# For an example, see Recognizing Celebrities in an Image in the Amazon
# Rekognition Developer Guide.
#
# This operation requires permissions to perform the
# `rekognition:RecognizeCelebrities` operation.
#
# @option params [required, Types::Image] :image
# The input image as base64-encoded bytes or an S3 object. If you use
# the AWS CLI to call Amazon Rekognition operations, passing
# base64-encoded image bytes is not supported.
#
# If you are using an AWS SDK to call Amazon Rekognition, you might not
# need to base64-encode image bytes passed using the `Bytes` field. For
# more information, see Images in the Amazon Rekognition developer
# guide.
#
# @return [Types::RecognizeCelebritiesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::RecognizeCelebritiesResponse#celebrity_faces #celebrity_faces} => Array<Types::Celebrity>
# * {Types::RecognizeCelebritiesResponse#unrecognized_faces #unrecognized_faces} => Array<Types::ComparedFace>
# * {Types::RecognizeCelebritiesResponse#orientation_correction #orientation_correction} => String
#
# @example Request syntax with placeholder values
#
# resp = client.recognize_celebrities({
# image: { # required
# bytes: "data",
# s3_object: {
# bucket: "S3Bucket",
# name: "S3ObjectName",
# version: "S3ObjectVersion",
# },
# },
# })
#
# @example Response structure
#
# resp.celebrity_faces #=> Array
# resp.celebrity_faces[0].urls #=> Array
# resp.celebrity_faces[0].urls[0] #=> String
# resp.celebrity_faces[0].name #=> String
# resp.celebrity_faces[0].id #=> String
# resp.celebrity_faces[0].face.bounding_box.width #=> Float
# resp.celebrity_faces[0].face.bounding_box.height #=> Float
# resp.celebrity_faces[0].face.bounding_box.left #=> Float
# resp.celebrity_faces[0].face.bounding_box.top #=> Float
# resp.celebrity_faces[0].face.confidence #=> Float
# resp.celebrity_faces[0].face.landmarks #=> Array
# resp.celebrity_faces[0].face.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil", "upperJawlineLeft", "midJawlineLeft", "chinBottom", "midJawlineRight", "upperJawlineRight"
# resp.celebrity_faces[0].face.landmarks[0].x #=> Float
# resp.celebrity_faces[0].face.landmarks[0].y #=> Float
# resp.celebrity_faces[0].face.pose.roll #=> Float
# resp.celebrity_faces[0].face.pose.yaw #=> Float
# resp.celebrity_faces[0].face.pose.pitch #=> Float
# resp.celebrity_faces[0].face.quality.brightness #=> Float
# resp.celebrity_faces[0].face.quality.sharpness #=> Float
# resp.celebrity_faces[0].match_confidence #=> Float
# resp.unrecognized_faces #=> Array
# resp.unrecognized_faces[0].bounding_box.width #=> Float
# resp.unrecognized_faces[0].bounding_box.height #=> Float
# resp.unrecognized_faces[0].bounding_box.left #=> Float
# resp.unrecognized_faces[0].bounding_box.top #=> Float
# resp.unrecognized_faces[0].confidence #=> Float
# resp.unrecognized_faces[0].landmarks #=> Array
# resp.unrecognized_faces[0].landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil", "upperJawlineLeft", "midJawlineLeft", "chinBottom", "midJawlineRight", "upperJawlineRight"
# resp.unrecognized_faces[0].landmarks[0].x #=> Float
# resp.unrecognized_faces[0].landmarks[0].y #=> Float
# resp.unrecognized_faces[0].pose.roll #=> Float
# resp.unrecognized_faces[0].pose.yaw #=> Float
# resp.unrecognized_faces[0].pose.pitch #=> Float
# resp.unrecognized_faces[0].quality.brightness #=> Float
# resp.unrecognized_faces[0].quality.sharpness #=> Float
# resp.orientation_correction #=> String, one of "ROTATE_0", "ROTATE_90", "ROTATE_180", "ROTATE_270"
#
# @overload recognize_celebrities(params = {})
# @param [Hash] params ({})
def recognize_celebrities(params = {}, options = {})
req = build_request(:recognize_celebrities, params)
req.send_request(options)
end
# For a given input face ID, searches for matching faces in the
# collection the face belongs to. You get a face ID when you add a face
# to the collection using the IndexFaces operation. The operation
# compares the features of the input face with faces in the specified
# collection.
#
# <note markdown="1"> You can also search faces without indexing faces by using the
# `SearchFacesByImage` operation.
#
# </note>
#
# The operation response returns an array of faces that match, ordered
# by similarity score with the highest similarity first. More
# specifically, it is an array of metadata for each face match that is
# found. Along with the metadata, the response also includes a
# `confidence` value for each face match, indicating the confidence that
# the specific face matches the input face.
#
# For an example, see Searching for a Face Using Its Face ID in the
# Amazon Rekognition Developer Guide.
#
# This operation requires permissions to perform the
# `rekognition:SearchFaces` action.
#
# @option params [required, String] :collection_id
# ID of the collection the face belongs to.
#
# @option params [required, String] :face_id
# ID of a face to find matches for in the collection.
#
# @option params [Integer] :max_faces
# Maximum number of faces to return. The operation returns the maximum
# number of faces with the highest confidence in the match.
#
# @option params [Float] :face_match_threshold
# Optional value specifying the minimum confidence in the face match to
# return. For example, don't return any matches where confidence in
# matches is less than 70%. The default value is 80%.
#
# @return [Types::SearchFacesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::SearchFacesResponse#searched_face_id #searched_face_id} => String
# * {Types::SearchFacesResponse#face_matches #face_matches} => Array<Types::FaceMatch>
# * {Types::SearchFacesResponse#face_model_version #face_model_version} => String
#
#
# @example Example: To delete a face
#
# # This operation searches for matching faces in the collection the supplied face belongs to.
#
# resp = client.search_faces({
# collection_id: "myphotos",
# face_id: "70008e50-75e4-55d0-8e80-363fb73b3a14",
# face_match_threshold: 90,
# max_faces: 10,
# })
#
# resp.to_h outputs the following:
# {
# face_matches: [
# {
# face: {
# bounding_box: {
# height: 0.3259260058403015,
# left: 0.5144439935684204,
# top: 0.15111100673675537,
# width: 0.24444399774074554,
# },
# confidence: 99.99949645996094,
# face_id: "8be04dba-4e58-520d-850e-9eae4af70eb2",
# image_id: "465f4e93-763e-51d0-b030-b9667a2d94b1",
# },
# similarity: 99.97222137451172,
# },
# {
# face: {
# bounding_box: {
# height: 0.16555599868297577,
# left: 0.30963000655174255,
# top: 0.7066670060157776,
# width: 0.22074100375175476,
# },
# confidence: 100,
# face_id: "29a75abe-397b-5101-ba4f-706783b2246c",
# image_id: "147fdf82-7a71-52cf-819b-e786c7b9746e",
# },
# similarity: 97.04154968261719,
# },
# {
# face: {
# bounding_box: {
# height: 0.18888899683952332,
# left: 0.3783380091190338,
# top: 0.2355560064315796,
# width: 0.25222599506378174,
# },
# confidence: 99.9999008178711,
# face_id: "908544ad-edc3-59df-8faf-6a87cc256cf5",
# image_id: "3c731605-d772-541a-a5e7-0375dbc68a07",
# },
# similarity: 95.94520568847656,
# },
# ],
# searched_face_id: "70008e50-75e4-55d0-8e80-363fb73b3a14",
# }
#
# @example Request syntax with placeholder values
#
# resp = client.search_faces({
# collection_id: "CollectionId", # required
# face_id: "FaceId", # required
# max_faces: 1,
# face_match_threshold: 1.0,
# })
#
# @example Response structure
#
# resp.searched_face_id #=> String
# resp.face_matches #=> Array
# resp.face_matches[0].similarity #=> Float
# resp.face_matches[0].face.face_id #=> String
# resp.face_matches[0].face.bounding_box.width #=> Float
# resp.face_matches[0].face.bounding_box.height #=> Float
# resp.face_matches[0].face.bounding_box.left #=> Float
# resp.face_matches[0].face.bounding_box.top #=> Float
# resp.face_matches[0].face.image_id #=> String
# resp.face_matches[0].face.external_image_id #=> String
# resp.face_matches[0].face.confidence #=> Float
# resp.face_model_version #=> String
#
# @overload search_faces(params = {})
# @param [Hash] params ({})
def search_faces(params = {}, options = {})
req = build_request(:search_faces, params)
req.send_request(options)
end
# For a given input image, first detects the largest face in the image,
# and then searches the specified collection for matching faces. The
# operation compares the features of the input face with faces in the
# specified collection.
#
# <note markdown="1"> To search for all faces in an input image, you might first call the
# IndexFaces operation, and then use the face IDs returned in subsequent
# calls to the SearchFaces operation.
#
# You can also call the `DetectFaces` operation and use the bounding
# boxes in the response to make face crops, which then you can pass in
# to the `SearchFacesByImage` operation.
#
# </note>
#
# You pass the input image either as base64-encoded image bytes or as a
# reference to an image in an Amazon S3 bucket. If you use the AWS CLI
# to call Amazon Rekognition operations, passing image bytes is not
# supported. The image must be either a PNG or JPEG formatted file.
#
# The response returns an array of faces that match, ordered by
# similarity score with the highest similarity first. More specifically,
# it is an array of metadata for each face match found. Along with the
# metadata, the response also includes a `similarity` indicating how
# similar the face is to the input face. In the response, the operation
# also returns the bounding box (and a confidence level that the
# bounding box contains a face) of the face that Amazon Rekognition used
# for the input image.
#
# For an example, Searching for a Face Using an Image in the Amazon
# Rekognition Developer Guide.
#
# The `QualityFilter` input parameter allows you to filter out detected
# faces that don’t meet a required quality bar. The quality bar is based
# on a variety of common use cases. Use `QualityFilter` to set the
# quality bar for filtering by specifying `LOW`, `MEDIUM`, or `HIGH`. If
# you do not want to filter detected faces, specify `NONE`. The default
# value is `NONE`.
#
# <note markdown="1"> To use quality filtering, you need a collection associated with
# version 3 of the face model or higher. To get the version of the face
# model associated with a collection, call DescribeCollection.
#
# </note>
#
# This operation requires permissions to perform the
# `rekognition:SearchFacesByImage` action.
#
# @option params [required, String] :collection_id
# ID of the collection to search.
#
# @option params [required, Types::Image] :image
# The input image as base64-encoded bytes or an S3 object. If you use
# the AWS CLI to call Amazon Rekognition operations, passing
# base64-encoded image bytes is not supported.
#
# If you are using an AWS SDK to call Amazon Rekognition, you might not
# need to base64-encode image bytes passed using the `Bytes` field. For
# more information, see Images in the Amazon Rekognition developer
# guide.
#
# @option params [Integer] :max_faces
# Maximum number of faces to return. The operation returns the maximum
# number of faces with the highest confidence in the match.
#
# @option params [Float] :face_match_threshold
# (Optional) Specifies the minimum confidence in the face match to
# return. For example, don't return any matches where confidence in
# matches is less than 70%. The default value is 80%.
#
# @option params [String] :quality_filter
# A filter that specifies a quality bar for how much filtering is done
# to identify faces. Filtered faces aren't searched for in the
# collection. If you specify `AUTO`, Amazon Rekognition chooses the
# quality bar. If you specify `LOW`, `MEDIUM`, or `HIGH`, filtering
# removes all faces that don’t meet the chosen quality bar. The quality
# bar is based on a variety of common use cases. Low-quality detections
# can occur for a number of reasons. Some examples are an object that's
# misidentified as a face, a face that's too blurry, or a face with a
# pose that's too extreme to use. If you specify `NONE`, no filtering
# is performed. The default value is `NONE`.
#
# To use quality filtering, the collection you are using must be
# associated with version 3 of the face model or higher.
#
# @return [Types::SearchFacesByImageResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::SearchFacesByImageResponse#searched_face_bounding_box #searched_face_bounding_box} => Types::BoundingBox
# * {Types::SearchFacesByImageResponse#searched_face_confidence #searched_face_confidence} => Float
# * {Types::SearchFacesByImageResponse#face_matches #face_matches} => Array<Types::FaceMatch>
# * {Types::SearchFacesByImageResponse#face_model_version #face_model_version} => String
#
#
# @example Example: To search for faces matching a supplied image
#
# # This operation searches for faces in a Rekognition collection that match the largest face in an S3 bucket stored image.
#
# resp = client.search_faces_by_image({
# collection_id: "myphotos",
# face_match_threshold: 95,
# image: {
# s3_object: {
# bucket: "mybucket",
# name: "myphoto",
# },
# },
# max_faces: 5,
# })
#
# resp.to_h outputs the following:
# {
# face_matches: [
# {
# face: {
# bounding_box: {
# height: 0.3234420120716095,
# left: 0.3233329951763153,
# top: 0.5,
# width: 0.24222199618816376,
# },
# confidence: 99.99829864501953,
# face_id: "38271d79-7bc2-5efb-b752-398a8d575b85",
# image_id: "d5631190-d039-54e4-b267-abd22c8647c5",
# },
# similarity: 99.97036743164062,
# },
# ],
# searched_face_bounding_box: {
# height: 0.33481481671333313,
# left: 0.31888890266418457,
# top: 0.4933333396911621,
# width: 0.25,
# },
# searched_face_confidence: 99.9991226196289,
# }
#
# @example Request syntax with placeholder values
#
# resp = client.search_faces_by_image({
# collection_id: "CollectionId", # required
# image: { # required
# bytes: "data",
# s3_object: {
# bucket: "S3Bucket",
# name: "S3ObjectName",
# version: "S3ObjectVersion",
# },
# },
# max_faces: 1,
# face_match_threshold: 1.0,
# quality_filter: "NONE", # accepts NONE, AUTO, LOW, MEDIUM, HIGH
# })
#
# @example Response structure
#
# resp.searched_face_bounding_box.width #=> Float
# resp.searched_face_bounding_box.height #=> Float
# resp.searched_face_bounding_box.left #=> Float
# resp.searched_face_bounding_box.top #=> Float
# resp.searched_face_confidence #=> Float
# resp.face_matches #=> Array
# resp.face_matches[0].similarity #=> Float
# resp.face_matches[0].face.face_id #=> String
# resp.face_matches[0].face.bounding_box.width #=> Float
# resp.face_matches[0].face.bounding_box.height #=> Float
# resp.face_matches[0].face.bounding_box.left #=> Float
# resp.face_matches[0].face.bounding_box.top #=> Float
# resp.face_matches[0].face.image_id #=> String
# resp.face_matches[0].face.external_image_id #=> String
# resp.face_matches[0].face.confidence #=> Float
# resp.face_model_version #=> String
#
# @overload search_faces_by_image(params = {})
# @param [Hash] params ({})
def search_faces_by_image(params = {}, options = {})
req = build_request(:search_faces_by_image, params)
req.send_request(options)
end
# Starts asynchronous recognition of celebrities in a stored video.
#
# Amazon Rekognition Video can detect celebrities in a video must be
# stored in an Amazon S3 bucket. Use Video to specify the bucket name
# and the filename of the video. `StartCelebrityRecognition` returns a
# job identifier (`JobId`) which you use to get the results of the
# analysis. When celebrity recognition analysis is finished, Amazon
# Rekognition Video publishes a completion status to the Amazon Simple
# Notification Service topic that you specify in `NotificationChannel`.
# To get the results of the celebrity recognition analysis, first check
# that the status value published to the Amazon SNS topic is
# `SUCCEEDED`. If so, call GetCelebrityRecognition and pass the job
# identifier (`JobId`) from the initial call to
# `StartCelebrityRecognition`.
#
# For more information, see Recognizing Celebrities in the Amazon
# Rekognition Developer Guide.
#
# @option params [required, Types::Video] :video
# The video in which you want to recognize celebrities. The video must
# be stored in an Amazon S3 bucket.
#
# @option params [String] :client_request_token
# Idempotent token used to identify the start request. If you use the
# same token with multiple `StartCelebrityRecognition` requests, the
# same `JobId` is returned. Use `ClientRequestToken` to prevent the same
# job from being accidently started more than once.
#
# @option params [Types::NotificationChannel] :notification_channel
# The Amazon SNS topic ARN that you want Amazon Rekognition Video to
# publish the completion status of the celebrity recognition analysis
# to.
#
# @option params [String] :job_tag
# An identifier you specify that's returned in the completion
# notification that's published to your Amazon Simple Notification
# Service topic. For example, you can use `JobTag` to group related jobs
# and identify them in the completion notification.
#
# @return [Types::StartCelebrityRecognitionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::StartCelebrityRecognitionResponse#job_id #job_id} => String
#
# @example Request syntax with placeholder values
#
# resp = client.start_celebrity_recognition({
# video: { # required
# s3_object: {
# bucket: "S3Bucket",
# name: "S3ObjectName",
# version: "S3ObjectVersion",
# },
# },
# client_request_token: "ClientRequestToken",
# notification_channel: {
# sns_topic_arn: "SNSTopicArn", # required
# role_arn: "RoleArn", # required
# },
# job_tag: "JobTag",
# })
#
# @example Response structure
#
# resp.job_id #=> String
#
# @overload start_celebrity_recognition(params = {})
# @param [Hash] params ({})
def start_celebrity_recognition(params = {}, options = {})
req = build_request(:start_celebrity_recognition, params)
req.send_request(options)
end
# Starts asynchronous detection of unsafe content in a stored video.
#
# Amazon Rekognition Video can moderate content in a video stored in an
# Amazon S3 bucket. Use Video to specify the bucket name and the
# filename of the video. `StartContentModeration` returns a job
# identifier (`JobId`) which you use to get the results of the analysis.
# When unsafe content analysis is finished, Amazon Rekognition Video
# publishes a completion status to the Amazon Simple Notification
# Service topic that you specify in `NotificationChannel`.
#
# To get the results of the unsafe content analysis, first check that
# the status value published to the Amazon SNS topic is `SUCCEEDED`. If
# so, call GetContentModeration and pass the job identifier (`JobId`)
# from the initial call to `StartContentModeration`.
#
# For more information, see Detecting Unsafe Content in the Amazon
# Rekognition Developer Guide.
#
# @option params [required, Types::Video] :video
# The video in which you want to detect unsafe content. The video must
# be stored in an Amazon S3 bucket.
#
# @option params [Float] :min_confidence
# Specifies the minimum confidence that Amazon Rekognition must have in
# order to return a moderated content label. Confidence represents how
# certain Amazon Rekognition is that the moderated content is correctly
# identified. 0 is the lowest confidence. 100 is the highest confidence.
# Amazon Rekognition doesn't return any moderated content labels with a
# confidence level lower than this specified value. If you don't
# specify `MinConfidence`, `GetContentModeration` returns labels with
# confidence values greater than or equal to 50 percent.
#
# @option params [String] :client_request_token
# Idempotent token used to identify the start request. If you use the
# same token with multiple `StartContentModeration` requests, the same
# `JobId` is returned. Use `ClientRequestToken` to prevent the same job
# from being accidently started more than once.
#
# @option params [Types::NotificationChannel] :notification_channel
# The Amazon SNS topic ARN that you want Amazon Rekognition Video to
# publish the completion status of the unsafe content analysis to.
#
# @option params [String] :job_tag
# An identifier you specify that's returned in the completion
# notification that's published to your Amazon Simple Notification
# Service topic. For example, you can use `JobTag` to group related jobs
# and identify them in the completion notification.
#
# @return [Types::StartContentModerationResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::StartContentModerationResponse#job_id #job_id} => String
#
# @example Request syntax with placeholder values
#
# resp = client.start_content_moderation({
# video: { # required
# s3_object: {
# bucket: "S3Bucket",
# name: "S3ObjectName",
# version: "S3ObjectVersion",
# },
# },
# min_confidence: 1.0,
# client_request_token: "ClientRequestToken",
# notification_channel: {
# sns_topic_arn: "SNSTopicArn", # required
# role_arn: "RoleArn", # required
# },
# job_tag: "JobTag",
# })
#
# @example Response structure
#
# resp.job_id #=> String
#
# @overload start_content_moderation(params = {})
# @param [Hash] params ({})
def start_content_moderation(params = {}, options = {})
req = build_request(:start_content_moderation, params)
req.send_request(options)
end
# Starts asynchronous detection of faces in a stored video.
#
# Amazon Rekognition Video can detect faces in a video stored in an
# Amazon S3 bucket. Use Video to specify the bucket name and the
# filename of the video. `StartFaceDetection` returns a job identifier
# (`JobId`) that you use to get the results of the operation. When face
# detection is finished, Amazon Rekognition Video publishes a completion
# status to the Amazon Simple Notification Service topic that you
# specify in `NotificationChannel`. To get the results of the face
# detection operation, first check that the status value published to
# the Amazon SNS topic is `SUCCEEDED`. If so, call GetFaceDetection and
# pass the job identifier (`JobId`) from the initial call to
# `StartFaceDetection`.
#
# For more information, see Detecting Faces in a Stored Video in the
# Amazon Rekognition Developer Guide.
#
# @option params [required, Types::Video] :video
# The video in which you want to detect faces. The video must be stored
# in an Amazon S3 bucket.
#
# @option params [String] :client_request_token
# Idempotent token used to identify the start request. If you use the
# same token with multiple `StartFaceDetection` requests, the same
# `JobId` is returned. Use `ClientRequestToken` to prevent the same job
# from being accidently started more than once.
#
# @option params [Types::NotificationChannel] :notification_channel
# The ARN of the Amazon SNS topic to which you want Amazon Rekognition
# Video to publish the completion status of the face detection
# operation.
#
# @option params [String] :face_attributes
# The face attributes you want returned.
#
# `DEFAULT` - The following subset of facial attributes are returned:
# BoundingBox, Confidence, Pose, Quality and Landmarks.
#
# `ALL` - All facial attributes are returned.
#
# @option params [String] :job_tag
# An identifier you specify that's returned in the completion
# notification that's published to your Amazon Simple Notification
# Service topic. For example, you can use `JobTag` to group related jobs
# and identify them in the completion notification.
#
# @return [Types::StartFaceDetectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::StartFaceDetectionResponse#job_id #job_id} => String
#
# @example Request syntax with placeholder values
#
# resp = client.start_face_detection({
# video: { # required
# s3_object: {
# bucket: "S3Bucket",
# name: "S3ObjectName",
# version: "S3ObjectVersion",
# },
# },
# client_request_token: "ClientRequestToken",
# notification_channel: {
# sns_topic_arn: "SNSTopicArn", # required
# role_arn: "RoleArn", # required
# },
# face_attributes: "DEFAULT", # accepts DEFAULT, ALL
# job_tag: "JobTag",
# })
#
# @example Response structure
#
# resp.job_id #=> String
#
# @overload start_face_detection(params = {})
# @param [Hash] params ({})
def start_face_detection(params = {}, options = {})
req = build_request(:start_face_detection, params)
req.send_request(options)
end
# Starts the asynchronous search for faces in a collection that match
# the faces of persons detected in a stored video.
#
# The video must be stored in an Amazon S3 bucket. Use Video to specify
# the bucket name and the filename of the video. `StartFaceSearch`
# returns a job identifier (`JobId`) which you use to get the search
# results once the search has completed. When searching is finished,
# Amazon Rekognition Video publishes a completion status to the Amazon
# Simple Notification Service topic that you specify in
# `NotificationChannel`. To get the search results, first check that the
# status value published to the Amazon SNS topic is `SUCCEEDED`. If so,
# call GetFaceSearch and pass the job identifier (`JobId`) from the
# initial call to `StartFaceSearch`. For more information, see
# procedure-person-search-videos.
#
# @option params [required, Types::Video] :video
# The video you want to search. The video must be stored in an Amazon S3
# bucket.
#
# @option params [String] :client_request_token
# Idempotent token used to identify the start request. If you use the
# same token with multiple `StartFaceSearch` requests, the same `JobId`
# is returned. Use `ClientRequestToken` to prevent the same job from
# being accidently started more than once.
#
# @option params [Float] :face_match_threshold
# The minimum confidence in the person match to return. For example,
# don't return any matches where confidence in matches is less than
# 70%. The default value is 80%.
#
# @option params [required, String] :collection_id
# ID of the collection that contains the faces you want to search for.
#
# @option params [Types::NotificationChannel] :notification_channel
# The ARN of the Amazon SNS topic to which you want Amazon Rekognition
# Video to publish the completion status of the search.
#
# @option params [String] :job_tag
# An identifier you specify that's returned in the completion
# notification that's published to your Amazon Simple Notification
# Service topic. For example, you can use `JobTag` to group related jobs
# and identify them in the completion notification.
#
# @return [Types::StartFaceSearchResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::StartFaceSearchResponse#job_id #job_id} => String
#
# @example Request syntax with placeholder values
#
# resp = client.start_face_search({
# video: { # required
# s3_object: {
# bucket: "S3Bucket",
# name: "S3ObjectName",
# version: "S3ObjectVersion",
# },
# },
# client_request_token: "ClientRequestToken",
# face_match_threshold: 1.0,
# collection_id: "CollectionId", # required
# notification_channel: {
# sns_topic_arn: "SNSTopicArn", # required
# role_arn: "RoleArn", # required
# },
# job_tag: "JobTag",
# })
#
# @example Response structure
#
# resp.job_id #=> String
#
# @overload start_face_search(params = {})
# @param [Hash] params ({})
def start_face_search(params = {}, options = {})
req = build_request(:start_face_search, params)
req.send_request(options)
end
# Starts asynchronous detection of labels in a stored video.
#
# Amazon Rekognition Video can detect labels in a video. Labels are
# instances of real-world entities. This includes objects like flower,
# tree, and table; events like wedding, graduation, and birthday party;
# concepts like landscape, evening, and nature; and activities like a
# person getting out of a car or a person skiing.
#
# The video must be stored in an Amazon S3 bucket. Use Video to specify
# the bucket name and the filename of the video. `StartLabelDetection`
# returns a job identifier (`JobId`) which you use to get the results of
# the operation. When label detection is finished, Amazon Rekognition
# Video publishes a completion status to the Amazon Simple Notification
# Service topic that you specify in `NotificationChannel`.
#
# To get the results of the label detection operation, first check that
# the status value published to the Amazon SNS topic is `SUCCEEDED`. If
# so, call GetLabelDetection and pass the job identifier (`JobId`) from
# the initial call to `StartLabelDetection`.
#
# @option params [required, Types::Video] :video
# The video in which you want to detect labels. The video must be stored
# in an Amazon S3 bucket.
#
# @option params [String] :client_request_token
# Idempotent token used to identify the start request. If you use the
# same token with multiple `StartLabelDetection` requests, the same
# `JobId` is returned. Use `ClientRequestToken` to prevent the same job
# from being accidently started more than once.
#
# @option params [Float] :min_confidence
# Specifies the minimum confidence that Amazon Rekognition Video must
# have in order to return a detected label. Confidence represents how
# certain Amazon Rekognition is that a label is correctly identified.0
# is the lowest confidence. 100 is the highest confidence. Amazon
# Rekognition Video doesn't return any labels with a confidence level
# lower than this specified value.
#
# If you don't specify `MinConfidence`, the operation returns labels
# with confidence values greater than or equal to 50 percent.
#
# @option params [Types::NotificationChannel] :notification_channel
# The Amazon SNS topic ARN you want Amazon Rekognition Video to publish
# the completion status of the label detection operation to.
#
# @option params [String] :job_tag
# An identifier you specify that's returned in the completion
# notification that's published to your Amazon Simple Notification
# Service topic. For example, you can use `JobTag` to group related jobs
# and identify them in the completion notification.
#
# @return [Types::StartLabelDetectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::StartLabelDetectionResponse#job_id #job_id} => String
#
# @example Request syntax with placeholder values
#
# resp = client.start_label_detection({
# video: { # required
# s3_object: {
# bucket: "S3Bucket",
# name: "S3ObjectName",
# version: "S3ObjectVersion",
# },
# },
# client_request_token: "ClientRequestToken",
# min_confidence: 1.0,
# notification_channel: {
# sns_topic_arn: "SNSTopicArn", # required
# role_arn: "RoleArn", # required
# },
# job_tag: "JobTag",
# })
#
# @example Response structure
#
# resp.job_id #=> String
#
# @overload start_label_detection(params = {})
# @param [Hash] params ({})
def start_label_detection(params = {}, options = {})
req = build_request(:start_label_detection, params)
req.send_request(options)
end
# Starts the asynchronous tracking of a person's path in a stored
# video.
#
# Amazon Rekognition Video can track the path of people in a video
# stored in an Amazon S3 bucket. Use Video to specify the bucket name
# and the filename of the video. `StartPersonTracking` returns a job
# identifier (`JobId`) which you use to get the results of the
# operation. When label detection is finished, Amazon Rekognition
# publishes a completion status to the Amazon Simple Notification
# Service topic that you specify in `NotificationChannel`.
#
# To get the results of the person detection operation, first check that
# the status value published to the Amazon SNS topic is `SUCCEEDED`. If
# so, call GetPersonTracking and pass the job identifier (`JobId`) from
# the initial call to `StartPersonTracking`.
#
# @option params [required, Types::Video] :video
# The video in which you want to detect people. The video must be stored
# in an Amazon S3 bucket.
#
# @option params [String] :client_request_token
# Idempotent token used to identify the start request. If you use the
# same token with multiple `StartPersonTracking` requests, the same
# `JobId` is returned. Use `ClientRequestToken` to prevent the same job
# from being accidently started more than once.
#
# @option params [Types::NotificationChannel] :notification_channel
# The Amazon SNS topic ARN you want Amazon Rekognition Video to publish
# the completion status of the people detection operation to.
#
# @option params [String] :job_tag
# An identifier you specify that's returned in the completion
# notification that's published to your Amazon Simple Notification
# Service topic. For example, you can use `JobTag` to group related jobs
# and identify them in the completion notification.
#
# @return [Types::StartPersonTrackingResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::StartPersonTrackingResponse#job_id #job_id} => String
#
# @example Request syntax with placeholder values
#
# resp = client.start_person_tracking({
# video: { # required
# s3_object: {
# bucket: "S3Bucket",
# name: "S3ObjectName",
# version: "S3ObjectVersion",
# },
# },
# client_request_token: "ClientRequestToken",
# notification_channel: {
# sns_topic_arn: "SNSTopicArn", # required
# role_arn: "RoleArn", # required
# },
# job_tag: "JobTag",
# })
#
# @example Response structure
#
# resp.job_id #=> String
#
# @overload start_person_tracking(params = {})
# @param [Hash] params ({})
def start_person_tracking(params = {}, options = {})
req = build_request(:start_person_tracking, params)
req.send_request(options)
end
# Starts the running of the version of a model. Starting a model takes a
# while to complete. To check the current state of the model, use
# DescribeProjectVersions.
#
# Once the model is running, you can detect custom labels in new images
# by calling DetectCustomLabels.
#
# <note markdown="1"> You are charged for the amount of time that the model is running. To
# stop a running model, call StopProjectVersion.
#
# </note>
#
# This operation requires permissions to perform the
# `rekognition:StartProjectVersion` action.
#
# @option params [required, String] :project_version_arn
# The Amazon Resource Name(ARN) of the model version that you want to
# start.
#
# @option params [required, Integer] :min_inference_units
# The minimum number of inference units to use. A single inference unit
# represents 1 hour of processing and can support up to 5 Transaction
# Pers Second (TPS). Use a higher number to increase the TPS throughput
# of your model. You are charged for the number of inference units that
# you use.
#
# @return [Types::StartProjectVersionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::StartProjectVersionResponse#status #status} => String
#
# @example Request syntax with placeholder values
#
# resp = client.start_project_version({
# project_version_arn: "ProjectVersionArn", # required
# min_inference_units: 1, # required
# })
#
# @example Response structure
#
# resp.status #=> String, one of "TRAINING_IN_PROGRESS", "TRAINING_COMPLETED", "TRAINING_FAILED", "STARTING", "RUNNING", "FAILED", "STOPPING", "STOPPED", "DELETING"
#
# @overload start_project_version(params = {})
# @param [Hash] params ({})
def start_project_version(params = {}, options = {})
req = build_request(:start_project_version, params)
req.send_request(options)
end
# Starts asynchronous detection of segment detection in a stored video.
#
# Amazon Rekognition Video can detect segments in a video stored in an
# Amazon S3 bucket. Use Video to specify the bucket name and the
# filename of the video. `StartSegmentDetection` returns a job
# identifier (`JobId`) which you use to get the results of the
# operation. When segment detection is finished, Amazon Rekognition
# Video publishes a completion status to the Amazon Simple Notification
# Service topic that you specify in `NotificationChannel`.
#
# You can use the `Filters` (StartSegmentDetectionFilters) input
# parameter to specify the minimum detection confidence returned in the
# response. Within `Filters`, use `ShotFilter`
# (StartShotDetectionFilter) to filter detected shots. Use
# `TechnicalCueFilter` (StartTechnicalCueDetectionFilter) to filter
# technical cues.
#
# To get the results of the segment detection operation, first check
# that the status value published to the Amazon SNS topic is
# `SUCCEEDED`. if so, call GetSegmentDetection and pass the job
# identifier (`JobId`) from the initial call to `StartSegmentDetection`.
#
# For more information, see Detecting Video Segments in Stored Video in
# the Amazon Rekognition Developer Guide.
#
# @option params [required, Types::Video] :video
# Video file stored in an Amazon S3 bucket. Amazon Rekognition video
# start operations such as StartLabelDetection use `Video` to specify a
# video for analysis. The supported file formats are .mp4, .mov and
# .avi.
#
# @option params [String] :client_request_token
# Idempotent token used to identify the start request. If you use the
# same token with multiple `StartSegmentDetection` requests, the same
# `JobId` is returned. Use `ClientRequestToken` to prevent the same job
# from being accidently started more than once.
#
# @option params [Types::NotificationChannel] :notification_channel
# The ARN of the Amazon SNS topic to which you want Amazon Rekognition
# Video to publish the completion status of the segment detection
# operation.
#
# @option params [String] :job_tag
# An identifier you specify that's returned in the completion
# notification that's published to your Amazon Simple Notification
# Service topic. For example, you can use `JobTag` to group related jobs
# and identify them in the completion notification.
#
# @option params [Types::StartSegmentDetectionFilters] :filters
# Filters for technical cue or shot detection.
#
# @option params [required, Array<String>] :segment_types
# An array of segment types to detect in the video. Valid values are
# TECHNICAL\_CUE and SHOT.
#
# @return [Types::StartSegmentDetectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::StartSegmentDetectionResponse#job_id #job_id} => String
#
# @example Request syntax with placeholder values
#
# resp = client.start_segment_detection({
# video: { # required
# s3_object: {
# bucket: "S3Bucket",
# name: "S3ObjectName",
# version: "S3ObjectVersion",
# },
# },
# client_request_token: "ClientRequestToken",
# notification_channel: {
# sns_topic_arn: "SNSTopicArn", # required
# role_arn: "RoleArn", # required
# },
# job_tag: "JobTag",
# filters: {
# technical_cue_filter: {
# min_segment_confidence: 1.0,
# },
# shot_filter: {
# min_segment_confidence: 1.0,
# },
# },
# segment_types: ["TECHNICAL_CUE"], # required, accepts TECHNICAL_CUE, SHOT
# })
#
# @example Response structure
#
# resp.job_id #=> String
#
# @overload start_segment_detection(params = {})
# @param [Hash] params ({})
def start_segment_detection(params = {}, options = {})
req = build_request(:start_segment_detection, params)
req.send_request(options)
end
# Starts processing a stream processor. You create a stream processor by
# calling CreateStreamProcessor. To tell `StartStreamProcessor` which
# stream processor to start, use the value of the `Name` field specified
# in the call to `CreateStreamProcessor`.
#
# @option params [required, String] :name
# The name of the stream processor to start processing.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.start_stream_processor({
# name: "StreamProcessorName", # required
# })
#
# @overload start_stream_processor(params = {})
# @param [Hash] params ({})
def start_stream_processor(params = {}, options = {})
req = build_request(:start_stream_processor, params)
req.send_request(options)
end
# Starts asynchronous detection of text in a stored video.
#
# Amazon Rekognition Video can detect text in a video stored in an
# Amazon S3 bucket. Use Video to specify the bucket name and the
# filename of the video. `StartTextDetection` returns a job identifier
# (`JobId`) which you use to get the results of the operation. When text
# detection is finished, Amazon Rekognition Video publishes a completion
# status to the Amazon Simple Notification Service topic that you
# specify in `NotificationChannel`.
#
# To get the results of the text detection operation, first check that
# the status value published to the Amazon SNS topic is `SUCCEEDED`. if
# so, call GetTextDetection and pass the job identifier (`JobId`) from
# the initial call to `StartTextDetection`.
#
# @option params [required, Types::Video] :video
# Video file stored in an Amazon S3 bucket. Amazon Rekognition video
# start operations such as StartLabelDetection use `Video` to specify a
# video for analysis. The supported file formats are .mp4, .mov and
# .avi.
#
# @option params [String] :client_request_token
# Idempotent token used to identify the start request. If you use the
# same token with multiple `StartTextDetection` requests, the same
# `JobId` is returned. Use `ClientRequestToken` to prevent the same job
# from being accidentaly started more than once.
#
# @option params [Types::NotificationChannel] :notification_channel
# The Amazon Simple Notification Service topic to which Amazon
# Rekognition publishes the completion status of a video analysis
# operation. For more information, see api-video.
#
# @option params [String] :job_tag
# An identifier returned in the completion status published by your
# Amazon Simple Notification Service topic. For example, you can use
# `JobTag` to group related jobs and identify them in the completion
# notification.
#
# @option params [Types::StartTextDetectionFilters] :filters
# Optional parameters that let you set criteria the text must meet to be
# included in your response.
#
# @return [Types::StartTextDetectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::StartTextDetectionResponse#job_id #job_id} => String
#
# @example Request syntax with placeholder values
#
# resp = client.start_text_detection({
# video: { # required
# s3_object: {
# bucket: "S3Bucket",
# name: "S3ObjectName",
# version: "S3ObjectVersion",
# },
# },
# client_request_token: "ClientRequestToken",
# notification_channel: {
# sns_topic_arn: "SNSTopicArn", # required
# role_arn: "RoleArn", # required
# },
# job_tag: "JobTag",
# filters: {
# word_filter: {
# min_confidence: 1.0,
# min_bounding_box_height: 1.0,
# min_bounding_box_width: 1.0,
# },
# regions_of_interest: [
# {
# bounding_box: {
# width: 1.0,
# height: 1.0,
# left: 1.0,
# top: 1.0,
# },
# },
# ],
# },
# })
#
# @example Response structure
#
# resp.job_id #=> String
#
# @overload start_text_detection(params = {})
# @param [Hash] params ({})
def start_text_detection(params = {}, options = {})
req = build_request(:start_text_detection, params)
req.send_request(options)
end
# Stops a running model. The operation might take a while to complete.
# To check the current status, call DescribeProjectVersions.
#
# @option params [required, String] :project_version_arn
# The Amazon Resource Name (ARN) of the model version that you want to
# delete.
#
# This operation requires permissions to perform the
# `rekognition:StopProjectVersion` action.
#
# @return [Types::StopProjectVersionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::StopProjectVersionResponse#status #status} => String
#
# @example Request syntax with placeholder values
#
# resp = client.stop_project_version({
# project_version_arn: "ProjectVersionArn", # required
# })
#
# @example Response structure
#
# resp.status #=> String, one of "TRAINING_IN_PROGRESS", "TRAINING_COMPLETED", "TRAINING_FAILED", "STARTING", "RUNNING", "FAILED", "STOPPING", "STOPPED", "DELETING"
#
# @overload stop_project_version(params = {})
# @param [Hash] params ({})
def stop_project_version(params = {}, options = {})
req = build_request(:stop_project_version, params)
req.send_request(options)
end
# Stops a running stream processor that was created by
# CreateStreamProcessor.
#
# @option params [required, String] :name
# The name of a stream processor created by CreateStreamProcessor.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.stop_stream_processor({
# name: "StreamProcessorName", # required
# })
#
# @overload stop_stream_processor(params = {})
# @param [Hash] params ({})
def stop_stream_processor(params = {}, options = {})
req = build_request(:stop_stream_processor, params)
req.send_request(options)
end
# @!endgroup
# @param params ({})
# @api private
def build_request(operation_name, params = {})
handlers = @handlers.for(operation_name)
context = Seahorse::Client::RequestContext.new(
operation_name: operation_name,
operation: config.api.operation(operation_name),
client: self,
params: params,
config: config)
context[:gem_name] = 'aws-sdk-rekognition'
context[:gem_version] = '1.48.0'
Seahorse::Client::Request.new(handlers, context)
end
# Polls an API operation until a resource enters a desired state.
#
# ## Basic Usage
#
# A waiter will call an API operation until:
#
# * It is successful
# * It enters a terminal state
# * It makes the maximum number of attempts
#
# In between attempts, the waiter will sleep.
#
# # polls in a loop, sleeping between attempts
# client.wait_until(waiter_name, params)
#
# ## Configuration
#
# You can configure the maximum number of polling attempts, and the
# delay (in seconds) between each polling attempt. You can pass
# configuration as the final arguments hash.
#
# # poll for ~25 seconds
# client.wait_until(waiter_name, params, {
# max_attempts: 5,
# delay: 5,
# })
#
# ## Callbacks
#
# You can be notified before each polling attempt and before each
# delay. If you throw `:success` or `:failure` from these callbacks,
# it will terminate the waiter.
#
# started_at = Time.now
# client.wait_until(waiter_name, params, {
#
# # disable max attempts
# max_attempts: nil,
#
# # poll for 1 hour, instead of a number of attempts
# before_wait: -> (attempts, response) do
# throw :failure if Time.now - started_at > 3600
# end
# })
#
# ## Handling Errors
#
# When a waiter is unsuccessful, it will raise an error.
# All of the failure errors extend from
# {Aws::Waiters::Errors::WaiterFailed}.
#
# begin
# client.wait_until(...)
# rescue Aws::Waiters::Errors::WaiterFailed
# # resource did not enter the desired state in time
# end
#
# ## Valid Waiters
#
# The following table lists the valid waiter names, the operations they call,
# and the default `:delay` and `:max_attempts` values.
#
# | waiter_name | params | :delay | :max_attempts |
# | ---------------------------------- | ---------------------------------- | -------- | ------------- |
# | project_version_running | {Client#describe_project_versions} | 30 | 40 |
# | project_version_training_completed | {Client#describe_project_versions} | 120 | 360 |
#
# @raise [Errors::FailureStateError] Raised when the waiter terminates
# because the waiter has entered a state that it will not transition
# out of, preventing success.
#
# @raise [Errors::TooManyAttemptsError] Raised when the configured
# maximum number of attempts have been made, and the waiter is not
# yet successful.
#
# @raise [Errors::UnexpectedError] Raised when an error is encounted
# while polling for a resource that is not expected.
#
# @raise [Errors::NoSuchWaiterError] Raised when you request to wait
# for an unknown state.
#
# @return [Boolean] Returns `true` if the waiter was successful.
# @param [Symbol] waiter_name
# @param [Hash] params ({})
# @param [Hash] options ({})
# @option options [Integer] :max_attempts
# @option options [Integer] :delay
# @option options [Proc] :before_attempt
# @option options [Proc] :before_wait
def wait_until(waiter_name, params = {}, options = {})
w = waiter(waiter_name, options)
yield(w.waiter) if block_given? # deprecated
w.wait(params)
end
# @api private
# @deprecated
def waiter_names
waiters.keys
end
private
# @param [Symbol] waiter_name
# @param [Hash] options ({})
def waiter(waiter_name, options = {})
waiter_class = waiters[waiter_name]
if waiter_class
waiter_class.new(options.merge(client: self))
else
raise Aws::Waiters::Errors::NoSuchWaiterError.new(waiter_name, waiters.keys)
end
end
def waiters
{
project_version_running: Waiters::ProjectVersionRunning,
project_version_training_completed: Waiters::ProjectVersionTrainingCompleted
}
end
class << self
# @api private
attr_reader :identifier
# @api private
def errors_module
Errors
end
end
end
end
| 47.031518 | 542 | 0.657602 |
1ae2363185a5868b3f575223939f1f55e7bbd49a | 1,147 | require "spec_helper"
RSpec.describe FakeCloudinary::Overrides do
describe ".stub_download_prefix" do
it "allows to use custom host for cloudinary cdn" do
host = "http://localhost:3001"
cloud_name = "test"
allow(FakeCloudinary).to receive(:host).and_return(host)
described_class.stub_download_prefix
result = ::Cloudinary::Utils.
unsigned_download_url_prefix("", cloud_name, "")
expect(result).to eq("#{host}/#{cloud_name}")
end
end
describe ".remove_stub_download_prefix" do
it "calls original method" do
host = "http://localhost:3001"
cloud_name = "test"
allow(FakeCloudinary).to receive(:host).and_return(host)
described_class.stub_download_prefix
result = ::Cloudinary::Utils.
unsigned_download_url_prefix
expect(result).to eq("#{host}/#{cloud_name}")
described_class.remove_stub_download_prefix
# original method will raise argument error
# because original method expects 8 arguments
expect { Cloudinary::Utils.unsigned_download_url_prefix }.
to raise_error(ArgumentError)
end
end
end
| 26.674419 | 64 | 0.691369 |
b9c2d8feee1e0a4bbab05c8004d49d3ac13b8528 | 42,739 | require 'date'
require 'fileutils'
module TZInfo
module Data
# Utility methods used by TZDataParser and associated classes.
#
# @private
module TZDataParserUtils #:nodoc:
# Wrapper for File.open that supports passing hash options for specifying
# encodings on Ruby 1.9+. The options are ignored on earlier versions of
# Ruby.
if RUBY_VERSION =~ /\A1\.[0-8]\./
def open_file(file_name, mode, opts, &block)
File.open(file_name, mode, &block)
end
else
def open_file(file_name, mode, opts, &block)
File.open(file_name, mode, opts, &block)
end
end
private :open_file
private
# Parses a month specified in the tz data and converts it to a number
# between 1 and 12 representing January to December.
def parse_month(month)
lower = month.downcase
if lower =~ /^jan/
@month = 1
elsif lower =~ /^feb/
@month = 2
elsif lower =~ /^mar/
@month = 3
elsif lower =~ /^apr/
@month = 4
elsif lower =~ /^may/
@month = 5
elsif lower =~ /^jun/
@month = 6
elsif lower =~ /^jul/
@month = 7
elsif lower =~ /^aug/
@month = 8
elsif lower =~ /^sep/
@month = 9
elsif lower =~ /^oct/
@month = 10
elsif lower =~ /^nov/
@month = 11
elsif lower =~ /^dec/
@month = 12
else
raise "Invalid month: #{month}"
end
end
# Parses an offset string [-]h:m:s (minutes and seconds are optional). Returns
# the offset in seconds.
def parse_offset(offset)
raise "Invalid time: #{offset}" if offset !~ /^(-)?(?:([0-9]+)(?::([0-9]+)(?::([0-9]+))?)?)?$/
negative = !$1.nil?
hour = $2.nil? ? 0 : $2.to_i
minute = $3.nil? ? 0 : $3.to_i
second = $4.nil? ? 0 : $4.to_i
seconds = hour
seconds = seconds * 60
seconds = seconds + minute
seconds = seconds * 60
seconds = seconds + second
seconds = -seconds if negative
seconds
end
# Encloses the string in single quotes and escapes any single quotes in
# the content.
def quote_str(str)
"'#{str.gsub('\'', '\\\\\'')}'"
end
end
# Parses Time Zone Data from the IANA Time Zone Database and transforms it
# into a set of Ruby modules that can be used with TZInfo.
#
# Normally, this class wouldn't be used. It is only run to update the
# timezone data and index modules.
class TZDataParser
include TZDataParserUtils
# Default earliest year that will be considered.
DEFAULT_MIN_YEAR = 1800
# Default number of future years data to generate (not including the
# current year).
DEFAULT_FUTURE_YEARS = 50
# Earliest year that will be considered. Defaults to DEFAULT_MIN_YEAR.
attr_accessor :min_year
# Latest year that will be considered. Defaults to the current year plus
# FUTURE_YEARS.
attr_accessor :max_year
# Whether to generate zone definitions (set to false to stop zones being
# generated).
attr_accessor :generate_zones
# Whether to generate country definitions (set to false to stop countries
# being generated).
attr_accessor :generate_countries
# Limit the set of zones to generate (set to an array containing zone
# identifiers).
attr_accessor :only_zones
# Zones to exclude from generation when not using only_zones (set to an
# array containing zone identifiers).
attr_accessor :exclude_zones
# Initializes a new TZDataParser. input_dir must contain the extracted
# tzdata tarball. output_dir is the location to output the modules
# (in definitions and indexes directories).
def initialize(input_dir, output_dir)
super()
@input_dir = input_dir
@output_dir = output_dir
@min_year = DEFAULT_MIN_YEAR
@max_year = Time.now.year + DEFAULT_FUTURE_YEARS
@rule_sets = {}
@zones = {}
@countries = {}
@no_rules = TZDataNoRules.new
@generate_zones = true
@generate_countries = true
@only_zones = []
@exclude_zones = []
end
# Reads the tzdata source and generates the classes. Progress information
# is written to standard out.
def execute
# Note that the backzone file is ignored. backzone contains alternative
# definitions of certain zones, primarily for pre-1970 data. It is not
# recommended for ordinary use and the tzdata Makefile does not
# install its entries by default.
files = Dir.entries(@input_dir).select do |file|
file =~ /\A[^\.]+\z/ &&
!%w(backzone checktab.awk leapseconds leapseconds.awk leap-seconds.list CONTRIBUTING Makefile NEWS README SOURCE Theory zoneinfo2tdf.pl).include?(file) &&
File.file?(File.join(@input_dir, file))
end
files.each {|file| load_rules(file) }
files.each {|file| load_zones(file) }
files.each {|file| load_links(file) }
load_countries
if @generate_zones
modules = []
if @only_zones.nil? || @only_zones.empty?
@zones.each_value {|zone|
zone.write_module(@output_dir) unless @exclude_zones.include?(zone.name)
}
else
@only_zones.each {|id|
zone = @zones[id]
zone.write_module(@output_dir)
}
end
write_timezones_index
end
if @generate_countries
write_countries_index
end
end
private
# Loads all the Rule definitions from the tz data and stores them in
# the rule_sets instance variable.
def load_rules(file)
puts 'load_rules: ' + file
# Files are in ASCII, but may change to UTF-8 (a superset of ASCII)
# in the future.
open_file(File.join(@input_dir, file), 'r', :external_encoding => 'UTF-8', :internal_encoding => 'UTF-8') do |file|
file.each_line do |line|
line = line.gsub(/#.*$/, '')
line = line.gsub(/\s+$/, '')
if line =~ /^Rule\s+([^\s]+)\s+([^\s]+)\s+([^\s]+)\s+([^\s]+)\s+([^\s]+)\s+([^\s]+)\s+([^\s]+)\s+([^\s]+)\s+([^\s]+)/
name = $1
if @rule_sets[name].nil?
@rule_sets[name] = TZDataRuleSet.new(name)
end
@rule_sets[name].add_rule(TZDataRule.new($2, $3, $4, $5, $6, $7, $8, $9))
end
end
end
end
# Gets a rules object for the given reference. Might be a named rule set,
# a fixed offset or an empty ruleset.
def get_rules(ref)
if ref == '-'
@no_rules
elsif ref =~ /^[0-9]+:[0-9]+$/
TZDataFixedOffsetRules.new(parse_offset(ref))
else
rule_set = @rule_sets[ref]
raise "Ruleset not found: #{ref}" if rule_set.nil?
rule_set
end
end
# Loads in the Zone definitions from the tz data and stores them in @zones.
def load_zones(file)
puts 'load_zones: ' + file
in_zone = nil
# Files are in ASCII, but may change to UTF-8 (a superset of ASCII)
# in the future.
open_file(File.join(@input_dir, file), 'r', :external_encoding => 'UTF-8', :internal_encoding => 'UTF-8') do |file|
file.each_line do |line|
line = line.gsub(/#.*$/, '')
line = line.gsub(/\s+$/, '')
if in_zone
if line =~ /^\s+([^\s]+)\s+([^\s]+)\s+([^\s]+)(\s+([0-9]+(\s+.*)?))?$/
in_zone.add_observance(TZDataObservance.new($1, get_rules($2), $3, $5))
in_zone = nil if $4.nil?
end
else
if line =~ /^Zone\s+([^\s]+)\s+([^\s]+)\s+([^\s]+)\s+([^\s]+)(\s+([0-9]+(\s+.*)?))?$/
name = $1
if @zones[name].nil?
@zones[name] = TZDataZone.new(name, @min_year..@max_year)
end
@zones[name].add_observance(TZDataObservance.new($2, get_rules($3), $4, $6))
in_zone = @zones[name] if !$5.nil?
end
end
end
end
end
# Loads in the links and stores them in @zones.
def load_links(file)
puts 'load_links: ' + file
# Files are in ASCII, but may change to UTF-8 (a superset of ASCII)
# in the future.
open_file(File.join(@input_dir, file), 'r', :external_encoding => 'UTF-8', :internal_encoding => 'UTF-8') do |file|
file.each_line do |line|
line = line.gsub(/#.*$/, '')
line = line.gsub(/\s+$/, '')
if line =~ /^Link\s+([^\s]+)\s+([^\s]+)/
name = $2
link_to = @zones[$1]
raise "Link to zone not found (#{name}->#{link_to})" if link_to.nil?
raise "Zone already defined: #{name}" if !@zones[name].nil?
@zones[name] = TZDataLink.new(name, link_to)
end
end
end
end
# Loads countries from iso3166.tab and zone1970.tab and stores the
# result in the countries instance variable.
def load_countries
puts 'load_countries'
# iso3166.tab is ASCII encoded, but is planned to change to UTF-8 (a
# superset of ASCII) in the future.
open_file(File.join(@input_dir, 'iso3166.tab'), 'r', :external_encoding => 'UTF-8', :internal_encoding => 'UTF-8') do |file|
file.each_line do |line|
if line =~ /^([A-Z]{2})\t(.*)$/
code = $1
name = $2
@countries[code] = TZDataCountry.new(code, name)
end
end
end
primary_zones = {}
secondary_zones = {}
# zone1970.tab is UTF-8 encoded.
open_file(File.join(@input_dir, 'zone1970.tab'), 'r', :external_encoding => 'UTF-8', :internal_encoding => 'UTF-8') do |file|
file.each_line do |line|
line.chomp!
if line =~ /^([A-Z]{2}(?:,[A-Z]{2})*)\t([^\t]+)\t([^\t]+)(\t(.*))?$/
codes = $1
location_str = $2
zone_name = $3
description = $5
location = TZDataLocation.new(location_str)
zone = @zones[zone_name]
raise "Zone not found: #{zone_name}" if zone.nil?
description = nil if description == ''
country_timezone = TZDataCountryTimezone.new(zone, description, location)
codes = codes.split(',')
(primary_zones[codes.first] ||= []) << country_timezone
codes[1..-1].each do |code|
(secondary_zones[code] ||= []) << country_timezone
end
end
end
end
[primary_zones, secondary_zones].each do |zones|
zones.each_pair do |code, country_timezones|
country = @countries[code]
raise "Country not found: #{code}" if country.nil?
country_timezones.each do |country_timezone|
country.add_zone(country_timezone)
end
end
end
end
# Writes a country index file.
def write_countries_index
dir = File.join(@output_dir, 'indexes')
FileUtils.mkdir_p(dir)
open_file(File.join(dir, 'countries.rb'), 'w', :external_encoding => 'UTF-8', :universal_newline => true) do |file|
file.puts('# encoding: UTF-8')
file.puts('')
file.puts('# This file contains data derived from the IANA Time Zone Database')
file.puts('# (http://www.iana.org/time-zones).')
file.puts('')
file.puts('module TZInfo')
file.puts(' module Data')
file.puts(' module Indexes')
file.puts(' module Countries')
file.puts(' include CountryIndexDefinition')
file.puts('')
countries = @countries.values.sort {|c1,c2| c1.code <=> c2.code}
countries.each {|country| country.write_index_record(file)}
file.puts(' end') # end module Countries
file.puts(' end') # end module Indexes
file.puts(' end') # end module Data
file.puts('end') # end module TZInfo
end
end
# Writes a timezone index file.
def write_timezones_index
dir = File.join(@output_dir, 'indexes')
FileUtils.mkdir_p(dir)
open_file(File.join(dir, 'timezones.rb'), 'w', :external_encoding => 'UTF-8', :universal_newline => true) do |file|
file.puts('# encoding: UTF-8')
file.puts('')
file.puts('# This file contains data derived from the IANA Time Zone Database')
file.puts('# (http://www.iana.org/time-zones).')
file.puts('')
file.puts('module TZInfo')
file.puts(' module Data')
file.puts(' module Indexes')
file.puts(' module Timezones')
file.puts(' include TimezoneIndexDefinition')
file.puts('')
zones = @zones.values.sort {|t1,t2| t1.name <=> t2.name}
zones.each {|zone| zone.write_index_record(file)}
file.puts(' end') # end module Timezones
file.puts(' end') # end module Indexes
file.puts(' end') # end module Data
file.puts('end') # end module TZInfo
end
end
end
# Base class for all rule sets.
#
# @private
class TZDataRules #:nodoc:
# Name of the rule set, e.g. EU.
attr_reader :name
def initialize(name)
@name = name
end
def count
0
end
end
# Empty rule set with a fixed daylight savings (std) offset.
#
# @private
class TZDataFixedOffsetRules < TZDataRules #:nodoc:
attr_reader :offset
def initialize(offset)
super(offset.to_s)
@offset = offset
end
end
# An empty set of rules.
#
# @private
class TZDataNoRules < TZDataRules #:nodoc:
def initialize
super('-')
end
end
# A rule set (as defined by Rule name in the tz data).
#
# @private
class TZDataRuleSet < TZDataRules #:nodoc:
attr_reader :rules
def initialize(name)
super
@rules = []
end
# Adds a new rule to the set.
def add_rule(rule)
@rules << rule
end
def count
@rules.length
end
def each
@rules.each {|rule| yield rule}
end
end
# A rule in a RuleSet (a single Rule line in the tz data).
#
# @private
class TZDataRule #:nodoc:
include TZDataParserUtils
attr_reader :from
attr_reader :to
attr_reader :type
attr_reader :in_month
attr_reader :on_day
attr_reader :at_time
attr_reader :save
attr_reader :letter
def initialize(from, to, type, in_month, on_day, at_time, save, letter)
@from = parse_from(from)
@to = parse_to(to)
# replace a to of :only with the from year
raise 'to cannot be only if from is minimum' if @to == :only && @from == :min
@to = @from if @to == :only
@type = parse_type(type)
@in_month = parse_month(in_month)
@on_day = TZDataDayOfMonth.new(on_day)
@at_time = TZDataTime.new(at_time)
@save = parse_offset(save)
@letter = parse_letter(letter)
end
def activate(year)
# The following test ignores yearistype at present (currently unused in
# the data. parse_type currently excepts on encountering a year type.
if (@from == :min || @from <= year) && (@to == :max || @to >= year)
TZDataActivatedRule.new(self, year)
else
nil
end
end
def at_utc_time(year, utc_offset, std_offset)
@at_time.to_utc(utc_offset, std_offset,
year, @in_month, @on_day.to_absolute(year, @in_month))
end
private
def parse_from(from)
lower = from.downcase
if lower =~ /^min/
:min
elsif lower =~ /^[0-9]+$/
lower.to_i
else
raise "Invalid from: #{from}"
end
end
def parse_to(to)
lower = to.downcase
if lower =~ /^max/
:max
elsif lower =~ /^o/
:only
elsif lower =~ /^[0-9]+$/
lower.to_i
else
raise "Invalid to: #{to}"
end
end
def parse_type(type)
raise "Unsupported rule type: #{type}" if type != '-'
nil
end
def parse_letter(letter)
if letter == '-'
nil
else
letter
end
end
end
# Base class for Zones and Links.
#
# @private
class TZDataDefinition #:nodoc:
include TZDataParserUtils
attr_reader :name
attr_reader :name_elements
attr_reader :path_elements
def initialize(name)
@name = name
# + and - aren't allowed in class names
@name_elements = name.gsub(/-/, '__m__').gsub(/\+/, '__p__').split(/\//)
@path_elements = @name_elements.clone
@path_elements.pop
end
# Creates necessary directories, the file, writes the class header and footer
# and yields to a block to write the content.
def create_file(output_dir)
dir = File.join(output_dir, 'definitions', @path_elements.join(File::SEPARATOR))
FileUtils.mkdir_p(dir)
open_file(File.join(output_dir, 'definitions', @name_elements.join(File::SEPARATOR)) + '.rb', 'w', :external_encoding => 'UTF-8', :universal_newline => true) do |file|
file.instance_variable_set(:@tz_indent, 0)
def file.indent(by)
@tz_indent += by
end
def file.puts(s)
super("#{' ' * @tz_indent}#{s}")
end
file.puts('# encoding: UTF-8')
file.puts('')
file.puts('# This file contains data derived from the IANA Time Zone Database')
file.puts('# (http://www.iana.org/time-zones).')
file.puts('')
file.puts('module TZInfo')
file.indent(2)
file.puts('module Data')
file.indent(2)
file.puts('module Definitions')
file.indent(2)
@name_elements.each do |part|
file.puts("module #{part}")
file.indent(2)
end
file.puts('include TimezoneDefinition')
file.puts('')
yield file
@name_elements.each do
file.indent(-2)
file.puts('end')
end
file.indent(-2)
file.puts('end') # end module Definitions
file.indent(-2)
file.puts('end') # end module Data
file.indent(-2)
file.puts('end') # end module TZInfo
end
end
end
# A tz data Link.
#
# @private
class TZDataLink < TZDataDefinition #:nodoc:
include TZDataParserUtils
attr_reader :link_to
def initialize(name, link_to)
super(name)
@link_to = link_to
end
# Writes a module for this link.
def write_module(output_dir)
puts "writing link #{name}"
create_file(output_dir) {|file|
file.puts("linked_timezone #{quote_str(@name)}, #{quote_str(@link_to.name)}")
}
end
# Writes an index record for this link.
def write_index_record(file)
file.puts(" linked_timezone #{quote_str(@name)}")
end
end
# A tz data Zone. Each line from the tz data is loaded as a TZDataObservance.
#
# @private
class TZDataZone < TZDataDefinition #:nodoc:
include TZDataParserUtils
attr_reader :observances
def initialize(name, years)
super(name)
@years = years
@observances = []
end
def add_observance(observance)
@observances << observance
end
# Writes the module for the zone. Iterates all the periods and asks them
# to write all periods in the timezone.
def write_module(output_dir)
puts "writing zone #{name}"
create_file(output_dir) {|file|
file.puts("timezone #{quote_str(@name)} do |tz|")
file.indent(2)
transitions = find_transitions
transitions.output_module(file)
file.indent(-2)
file.puts('end')
}
end
# Writes an index record for this zone.
def write_index_record(file)
file.puts(" timezone #{quote_str(@name)}")
end
private
def find_transitions
transitions = TZDataTransitions.new(@years)
# algorithm from zic.c outzone
start_time = nil
until_time = nil
@observances.each_with_index {|observance, i|
std_offset = 0
use_start = i > 0
use_until = i < @observances.length - 1
utc_offset = observance.utc_offset
start_zone_id = nil
start_utc_offset = observance.utc_offset
start_std_offset = 0
if observance.rule_set.count == 0
std_offset = observance.std_offset
start_zone_id = observance.format.expand(std_offset, nil)
if use_start
transitions << TZDataTransition.new(start_time, utc_offset, std_offset, start_zone_id)
use_start = false
else
# zic algorithm only outputs this if std_offset is non-zero
# to get the initial LMT range, we output this regardless
transitions << TZDataTransition.new(nil, utc_offset, std_offset, start_zone_id)
end
else
@years.each {|year|
if use_until && year > observance.valid_until.year
break
end
activated_rules = []
observance.rule_set.each {|rule|
activated_rule = rule.activate(year)
activated_rules << activated_rule unless activated_rule.nil?
}
while true
# turn until_time into UTC using the current utc_offset and std_offset
until_time = observance.valid_until.to_utc(utc_offset, std_offset) if use_until
earliest = nil
activated_rules.each {|activated_rule|
# recalculate the time using the current std_offset
activated_rule.calculate_time(utc_offset, std_offset)
earliest = activated_rule if earliest.nil? || activated_rule.at < earliest.at
}
break if earliest.nil?
activated_rules.delete(earliest)
break if use_until && earliest.at >= until_time
std_offset = earliest.rule.save
use_start = false if use_start && earliest.at == start_time
if use_start
if earliest.at < start_time
start_utc_offset = observance.utc_offset
start_std_offset = std_offset
start_zone_id = observance.format.expand(earliest.rule.save, earliest.rule.letter)
next
end
if start_zone_id.nil? && start_utc_offset + start_std_offset == observance.utc_offset + std_offset
start_zone_id = observance.format.expand(earliest.rule.save, earliest.rule.letter)
end
end
zone_id = observance.format.expand(earliest.rule.save, earliest.rule.letter)
transitions << TZDataTransition.new(earliest.at, observance.utc_offset, earliest.rule.save, zone_id)
end
}
end
if use_start
start_zone_id = observance.format.expand(nil, nil) if start_zone_id.nil? && observance.format.fixed?
raise 'Could not determine time zone abbreviation to use just after until time' if start_zone_id.nil?
transitions << TZDataTransition.new(start_time, start_utc_offset, start_std_offset, start_zone_id)
end
start_time = observance.valid_until.to_utc(utc_offset, std_offset) if use_until
}
transitions
end
end
# A observance within a zone (a line within the zone definition).
#
# @private
class TZDataObservance #:nodoc:
include TZDataParserUtils
attr_reader :utc_offset
attr_reader :rule_set
attr_reader :format
attr_reader :valid_until
def initialize(utc_offset, rule_set, format, valid_until)
@utc_offset = parse_offset(utc_offset)
@rule_set = rule_set
@format = TZDataFormat.new(format)
@valid_until = valid_until.nil? ? nil : TZDataUntil.new(valid_until)
end
def std_offset
if @rule_set.kind_of?(TZDataFixedOffsetRules)
@rule_set.offset
else
0
end
end
end
# Collection of TZDataTransition instances used when building a zone class.
#
# @private
class TZDataTransitions #:nodoc:
include TZDataParserUtils
def initialize(years)
@years = years
@transitions = []
end
def << (transition)
@transitions << transition
end
def output_module(file)
optimize
# Try and end on a transition to std if one happens in the last year.
if @transitions.length > 1 &&
@transitions.last.std_offset != 0 &&
@transitions[@transitions.length - 2].std_offset == 0 &&
@transitions[@transitions.length - 2].at_utc.year == @years.max
transitions = @transitions[[email protected] - 2]
else
transitions = @transitions
end
process_offsets(file)
file.puts('')
transitions.each do |t|
t.write(file)
end
end
private
def optimize
@transitions.sort!
# Optimization logic from zic.c writezone.
from_i = 0
to_i = 0
while from_i < @transitions.length
if to_i > 1 &&
!@transitions[from_i].at_utc.nil? &&
!@transitions[to_i - 1].at_utc.nil? &&
@transitions[from_i].at_utc + Rational(@transitions[to_i - 1].total_offset, 86400) <=
@transitions[to_i - 1].at_utc + Rational(@transitions[to_i - 2].total_offset, 86400)
@transitions[to_i - 1] = @transitions[from_i].clone_with_at(@transitions[to_i - 1].at_utc)
from_i += 1
next
end
# Shuffle transitions up, eliminating any redundant transitions
# along the way.
if to_i == 0 ||
@transitions[to_i - 1].utc_offset != @transitions[from_i].utc_offset ||
@transitions[to_i - 1].std_offset != @transitions[from_i].std_offset ||
@transitions[to_i - 1].zone_id != @transitions[from_i].zone_id
@transitions[to_i] = @transitions[from_i]
to_i += 1
end
from_i += 1
end
if to_i > 0
@transitions = @transitions[0..to_i - 1]
else
@transitions = []
end
end
def quote_zone_id(zone_id)
if zone_id =~ %r{[\-+']}
":#{quote_str(zone_id)}"
else
":#{zone_id}"
end
end
def process_offsets(file)
# A bit of a hack at the moment. The offset used to be output with
# each period (pair of transitions). They are now separated from the
# transition data. The code should probably be changed at some point to
# setup the offsets at an earlier stage.
# Assume that when this is called, the first transition is the Local
# Mean Time initial rule or a transition with no time that defines the
# offset for the entire zone.
offsets = []
# Find the first std offset. Timezones always start in std.
@transitions.each do |t|
if t.std_offset == 0
offset = {:utc_offset => t.utc_offset,
:std_offset => t.std_offset,
:zone_id => t.zone_id,
:name => 'o0'}
offsets << offset
break
end
end
@transitions.each do |t|
offset = offsets.find do |o|
o[:utc_offset] == t.utc_offset &&
o[:std_offset] == t.std_offset &&
o[:zone_id] == t.zone_id
end
unless offset
offset = {:utc_offset => t.utc_offset,
:std_offset => t.std_offset,
:zone_id => t.zone_id,
:name => "o#{offsets.length}"}
offsets << offset
end
t.offset_name = offset[:name]
end
offsets.each do |offset|
file.puts("tz.offset :#{offset[:name]}, #{offset[:utc_offset]}, #{offset[:std_offset]}, #{quote_zone_id(offset[:zone_id])}")
end
end
end
# A transition that will be used to write the periods in a zone class.
#
# @private
class TZDataTransition #:nodoc:
include Comparable
EPOCH = DateTime.new(1970, 1, 1)
attr_reader :at_utc
attr_reader :utc_offset
attr_reader :std_offset
attr_reader :zone_id
attr_accessor :offset_name
def initialize(at_utc, utc_offset, std_offset, zone_id)
@at_utc = at_utc
@utc_offset = utc_offset
@std_offset = std_offset
@zone_id = zone_id
@offset_name = nil
end
def to_s
"At #{at_utc} UTC switch to UTC offset #{@utc_offset} with std offset #{@std_offset}, zone id #{@zone_id}"
end
def <=>(transition)
if @at_utc == transition.at_utc
0
elsif @at_utc.nil?
-1
elsif transition.nil?
1
else
@at_utc - transition.at_utc
end
end
def total_offset
@utc_offset + @std_offset
end
def clone_with_at(at_utc)
TZDataTransition.new(at_utc, @utc_offset, @std_offset, @zone_id)
end
def write(file)
if @at_utc
file.puts "tz.transition #{@at_utc.year}, #{@at_utc.mon}, :#{@offset_name}, #{timestamp_parameters(@at_utc)}"
end
end
private
def timestamp_parameters(datetime)
timestamp = ((datetime - EPOCH) * 86400).to_i
if timestamp < 0 || timestamp > 2147483647
# Output DateTime parameters as well as a timestamp for platforms
# where Time doesn't support negative or 64-bit values.
"#{timestamp}, #{datetime.ajd.numerator}, #{datetime.ajd.denominator}"
else
timestamp
end
end
end
# An instance of a rule for a year.
#
# @private
class TZDataActivatedRule #:nodoc:
attr_reader :rule
attr_reader :year
attr_reader :at
def initialize(rule, year)
@rule = rule
@year = year
@at = nil
end
def calculate_time(utc_offset, std_offset)
@at = @rule.at_utc_time(@year, utc_offset, std_offset)
end
end
# A tz data time definition - an hour, minute, second and reference. Reference
# is either :utc, :standard or :wall_clock.
#
# @private
class TZDataTime #:nodoc:
attr_reader :hour
attr_reader :minute
attr_reader :second
attr_reader :ref
def initialize(spec)
raise "Invalid time: #{spec}" if spec !~ /^([0-9]+)(:([0-9]+)(:([0-9]+))?)?([wguzs])?$/
@hour = $1.to_i
@minute = $3.nil? ? 0 : $3.to_i
@second = $5.nil? ? 0 : $5.to_i
if $6 == 's'
@ref = :standard
elsif $6 == 'g' || $6 == 'u' || $6 == 'z'
@ref = :utc
else
@ref = :wall_clock
end
end
# Converts the time to UTC given a utc_offset and std_offset.
def to_utc(utc_offset, std_offset, year, month, day)
result = DateTime.new(year, month, day, @hour, @minute, @second)
offset = 0
offset = offset + utc_offset if @ref == :standard || @ref == :wall_clock
offset = offset + std_offset if @ref == :wall_clock
result - Rational(offset, 86400)
end
end
# A tz data day of the month reference. Can either be an absolute day,
# a last week day or a week day >= or <= than a specific day of month.
#
# @private
class TZDataDayOfMonth #:nodoc:
attr_reader :type
attr_reader :day_of_month
attr_reader :day_of_week
attr_reader :operator
def initialize(spec)
raise "Invalid on: #{spec}" if spec !~ /^([0-9]+)|(last([A-Za-z]+))|(([A-Za-z]+)([<>]=)([0-9]+))$/
if $1
@type = :absolute
@day_of_month = $1.to_i
elsif $3
@type = :last
@day_of_week = parse_day_of_week($3)
else
@type = :comparison
@day_of_week = parse_day_of_week($5)
@operator = parse_operator($6)
@day_of_month = $7.to_i
end
end
# Returns the absolute day of month for the given year and month.
def to_absolute(year, month)
case @type
when :last
last_day_in_month = (Date.new(year, month, 1) >> 1) - 1
offset = last_day_in_month.wday - @day_of_week
offset = offset + 7 if offset < 0
(last_day_in_month - offset).day
when :comparison
pivot = Date.new(year, month, @day_of_month)
offset = @day_of_week - pivot.wday
offset = -offset if @operator == :less_equal
offset = offset + 7 if offset < 0
offset = -offset if @operator == :less_equal
result = pivot + offset
if result.month != pivot.month
puts self.inspect
puts year
puts month
end
raise 'No suitable date found' if result.month != pivot.month
result.day
else #absolute
@day_of_month
end
end
private
def parse_day_of_week(day_of_week)
lower = day_of_week.downcase
if lower =~ /^mon/
1
elsif lower =~ /^tue/
2
elsif lower =~ /^wed/
3
elsif lower =~ /^thu/
4
elsif lower =~ /^fri/
5
elsif lower =~ /^sat/
6
elsif lower =~ /^sun/
0
else
raise "Invalid day of week: #{day_of_week}"
end
end
def parse_operator(operator)
if operator == '>='
:greater_equal
elsif operator == '<='
:less_equal
else
raise "Invalid operator: #{operator}"
end
end
end
# A tz data Zone until reference.
#
# @private
class TZDataUntil #:nodoc:
include TZDataParserUtils
attr_reader :year
attr_reader :month
attr_reader :day
attr_reader :time
def initialize(spec)
parts = spec.split(/\s+/)
raise "Invalid until: #{spec}" if parts.length < 1
@year = parts[0].to_i
@month = parts.length > 1 ? parse_month(parts[1]) : 1
@day = TZDataDayOfMonth.new(parts.length > 2 ? parts[2] : '1')
@time = TZDataTime.new(parts.length > 3 ? parts[3] : '00:00')
end
# Converts the reference to a UTC DateTime.
def to_utc(utc_offset, std_offset)
@time.to_utc(utc_offset, std_offset, @year, @month, @day.to_absolute(@year, @month))
end
end
# A tz data Zone format string. Either alternate standard/daylight-savings,
# substitution (%s) format or a fixed string.
#
# @private
class TZDataFormat #:nodoc:
def initialize(spec)
if spec =~ /([A-Z]+)\/([A-Z]+)/i
@type = :alternate
@standard_abbrev = $1
@daylight_abbrev = $2
elsif spec =~ /%s/
@type = :subst
@abbrev = spec
else
@type = :fixed
@abbrev = spec
end
end
# Expands given the current daylight savings offset and Rule string.
def expand(std_offset, rule_string)
if @type == :alternate
if std_offset == 0
@standard_abbrev
else
@daylight_abbrev
end
elsif @type == :subst
sprintf(@abbrev, rule_string)
else
@abbrev
end
end
# True if a string from the rule is required to expand this format.
def requires_rule_string?
@type == :subst
end
# Is a fixed format string.
def fixed?
@type == :fixed
end
end
# A location (latitude + longitude)
#
# @private
class TZDataLocation #:nodoc:
attr_reader :latitude
attr_reader :longitude
# Constructs a new TZDataLocation from a string in ISO 6709
# sign-degrees-minutes-seconds format, either +-DDMM+-DDDMM
# or +-DDMMSS+-DDDMMSS, first latitude (+ is north),
# then longitude (+ is east).
def initialize(coordinates)
if coordinates !~ /^([+\-])([0-9]{2})([0-9]{2})([0-9]{2})?([+\-])([0-9]{3})([0-9]{2})([0-9]{2})?$/
raise "Invalid coordinates: #{coordinates}"
end
@latitude = Rational($2.to_i) + Rational($3.to_i, 60)
@latitude += Rational($4.to_i, 3600) unless $4.nil?
@latitude = -@latitude if $1 == '-'
@longitude = Rational($6.to_i) + Rational($7.to_i, 60)
@longitude += Rational($8.to_i, 3600) unless $8.nil?
@longitude = -@longitude if $5 == '-'
end
end
# @private
TZDataCountryTimezone = Struct.new(:timezone, :description, :location) #:nodoc:
# An ISO 3166 country.
#
# @private
class TZDataCountry #:nodoc:
include TZDataParserUtils
attr_reader :code
attr_reader :name
attr_reader :zones
def initialize(code, name)
@code = code
@name = name
@zones = []
end
# Adds a TZDataCountryTimezone
def add_zone(zone)
@zones << zone
end
def write_index_record(file)
s = " country #{quote_str(@code)}, #{quote_str(@name)}"
s << ' do |c|' if @zones.length > 0
file.puts s
@zones.each do |zone|
file.puts " c.timezone #{quote_str(zone.timezone.name)}, #{zone.location.latitude.numerator}, #{zone.location.latitude.denominator}, #{zone.location.longitude.numerator}, #{zone.location.longitude.denominator}#{zone.description.nil? ? '' : ', ' + quote_str(zone.description)}"
end
file.puts ' end' if @zones.length > 0
end
end
end
end
| 32.775307 | 296 | 0.505112 |
9188f27729a3a48bc6bc97f7bb81e12610965566 | 155 | class ChangeQuantityToBeStringInRecipeFoodsTable < ActiveRecord::Migration[7.0]
def change
change_column :recipe_foods, :quantity, :string
end
end
| 25.833333 | 79 | 0.8 |
269864e56730ab62cc3d971f5120817d4aac35da | 1,796 | ActiveRecord::Schema.define(:version => 0) do
# Users are created and updated by other Users
create_table :users, :force => true do |t|
t.column :name, :string
t.column :creator_id, :integer
t.column :created_on, :datetime
t.column :updater_id, :integer
t.column :updated_at, :datetime
end
# People are created and updated by Users
create_table :people, :force => true do |t|
t.column :name, :string
t.column :creator_id, :integer
t.column :created_on, :datetime
t.column :updater_id, :integer
t.column :updated_at, :datetime
end
# Posts are created and updated by People
create_table :posts, :force => true do |t|
t.column :title, :string
t.column :creator_id, :integer
t.column :created_on, :datetime
t.column :updater_id, :integer
t.column :updated_at, :datetime
t.column :deleter_id, :integer
t.column :deleted_at, :datetime
end
# Comments are created and updated by People
# and also use non-standard foreign keys.
create_table :comments, :force => true do |t|
t.column :post_id, :integer
t.column :comment, :string
t.column :created_by, :integer
t.column :created_at, :datetime
t.column :updated_by, :integer
t.column :updated_at, :datetime
t.column :deleted_by, :integer
t.column :deleted_at, :datetime
end
# only used to test :stampable args
create_table :foos, :force => true do |t|
t.column :created_by, :integer
t.column :created_at, :datetime
t.column :updated_by, :integer
t.column :updated_at, :datetime
t.column :deleted_by, :integer
t.column :deleted_at, :datetime
end
end | 33.259259 | 48 | 0.628062 |
08abd1c6857cc8ddd8c047bb3823aa30a7a7979c | 560 | cask 'darktable' do
version '2.0.0'
sha256 '1019646522c3fde81ce0de905220a88b506c7cec37afe010af7d458980dd08bd'
# github.com is the official download host per the vendor homepage
url "https://github.com/darktable-org/darktable/releases/download/release-#{version}/darktable-#{version}.dmg"
appcast 'https://github.com/darktable-org/darktable/releases.atom',
:checkpoint => 'db63bd878b86eeabbf524a936fb843661b9778d2a3f17b022a7a83de7cc3b076'
name 'darktable'
homepage 'https://www.darktable.org/'
license :gpl
app 'darktable.app'
end
| 37.333333 | 112 | 0.773214 |
39b911304dab722b77173b8a1da74bb26238a99a | 277 | class InspectionResult < ActiveRecord::Base
belongs_to :user
belongs_to :inspection_schedule
has_one :check
has_one :measurement
has_one :note
has_one :approval
accepts_nested_attributes_for :measurement, :check, :note
include Common
after_commit :dump
end
| 19.785714 | 59 | 0.783394 |
39af36ff77a454511ccf44f8af381f8e707017b2 | 4,305 | # encoding: utf-8
# This file is distributed under New Relic's license terms.
# See https://github.com/newrelic/newrelic-ruby-agent/blob/main/LICENSE for complete details.
module NewRelic
module Agent
module Database
module ObfuscationHelpers
COMPONENTS_REGEX_MAP = {
:single_quotes => /'(?:[^']|'')*?(?:\\'.*|'(?!'))/,
:double_quotes => /"(?:[^"]|"")*?(?:\\".*|"(?!"))/,
:dollar_quotes => /(\$(?!\d)[^$]*?\$).*?(?:\1|$)/,
:uuids => /\{?(?:[0-9a-fA-F]\-*){32}\}?/,
:numeric_literals => /-?\b(?:[0-9]+\.)?[0-9]+([eE][+-]?[0-9]+)?\b/,
:boolean_literals => /\b(?:true|false|null)\b/i,
:hexadecimal_literals => /0x[0-9a-fA-F]+/,
:comments => /(?:#|--).*?(?=\r|\n|$)/i,
:multi_line_comments => /\/\*(?:[^\/]|\/[^*])*?(?:\*\/|\/\*.*)/,
:oracle_quoted_strings => /q'\[.*?(?:\]'|$)|q'\{.*?(?:\}'|$)|q'\<.*?(?:\>'|$)|q'\(.*?(?:\)'|$)/
}
DIALECT_COMPONENTS = {
:fallback => COMPONENTS_REGEX_MAP.keys,
:mysql => [:single_quotes, :double_quotes, :numeric_literals, :boolean_literals,
:hexadecimal_literals, :comments, :multi_line_comments],
:postgres => [:single_quotes, :dollar_quotes, :uuids, :numeric_literals,
:boolean_literals, :comments, :multi_line_comments],
:sqlite => [:single_quotes, :numeric_literals, :boolean_literals, :hexadecimal_literals,
:comments, :multi_line_comments],
:oracle => [:single_quotes, :oracle_quoted_strings, :numeric_literals, :comments,
:multi_line_comments],
:cassandra => [:single_quotes, :uuids, :numeric_literals, :boolean_literals,
:hexadecimal_literals, :comments, :multi_line_comments]
}
# We use these to check whether the query contains any quote characters
# after obfuscation. If so, that's a good indication that the original
# query was malformed, and so our obfuscation can't reliably find
# literals. In such a case, we'll replace the entire query with a
# placeholder.
CLEANUP_REGEX = {
:mysql => /'|"|\/\*|\*\//,
:mysql2 => /'|"|\/\*|\*\//,
:postgres => /'|\/\*|\*\/|\$(?!\?)/,
:sqlite => /'|\/\*|\*\//,
:cassandra => /'|\/\*|\*\//,
:oracle => /'|\/\*|\*\//,
:oracle_enhanced => /'|\/\*|\*\//
}
PLACEHOLDER = '?'.freeze
FAILED_TO_OBFUSCATE_MESSAGE = "Failed to obfuscate SQL query - quote characters remained after obfuscation".freeze
def obfuscate_single_quote_literals(sql)
return sql unless sql =~ COMPONENTS_REGEX_MAP[:single_quotes]
sql.gsub(COMPONENTS_REGEX_MAP[:single_quotes], PLACEHOLDER)
end
def self.generate_regex(dialect)
components = DIALECT_COMPONENTS[dialect]
Regexp.union(components.map { |component| COMPONENTS_REGEX_MAP[component] })
end
MYSQL_COMPONENTS_REGEX = self.generate_regex(:mysql)
POSTGRES_COMPONENTS_REGEX = self.generate_regex(:postgres)
SQLITE_COMPONENTS_REGEX = self.generate_regex(:sqlite)
ORACLE_COMPONENTS_REGEX = self.generate_regex(:oracle)
CASSANDRA_COMPONENTS_REGEX = self.generate_regex(:cassandra)
FALLBACK_REGEX = self.generate_regex(:fallback)
def obfuscate(sql, adapter)
case adapter
when :mysql, :mysql2
regex = MYSQL_COMPONENTS_REGEX
when :postgres
regex = POSTGRES_COMPONENTS_REGEX
when :sqlite
regex = SQLITE_COMPONENTS_REGEX
when :oracle, :oracle_enhanced
regex = ORACLE_COMPONENTS_REGEX
when :cassandra
regex = CASSANDRA_COMPONENTS_REGEX
else
regex = FALLBACK_REGEX
end
obfuscated = sql.gsub(regex, PLACEHOLDER)
obfuscated = FAILED_TO_OBFUSCATE_MESSAGE if detect_unmatched_pairs(obfuscated, adapter)
obfuscated
end
def detect_unmatched_pairs(obfuscated, adapter)
if CLEANUP_REGEX[adapter]
CLEANUP_REGEX[adapter].match(obfuscated)
else
CLEANUP_REGEX[:mysql].match(obfuscated)
end
end
end
end
end
end
| 42.205882 | 122 | 0.574448 |
6ab6a3e19727803fca7bfcf15153cfd54c71fc82 | 3,114 | # encoding: utf-8
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
module Azure::MediaServices::Mgmt::V2018_06_01_preview
module Models
#
# Describes all the filtering operations, such as de-interlacing, rotation
# etc. that are to be applied to the input media before encoding.
#
class Filters
include MsRestAzure
# @return [Deinterlace] The de-interlacing settings.
attr_accessor :deinterlace
# @return [Rotation] The rotation, if any, to be applied to the input
# video, before it is encoded. Default is Auto. Possible values include:
# 'Auto', 'None', 'Rotate0', 'Rotate90', 'Rotate180', 'Rotate270'
attr_accessor :rotation
# @return [Rectangle] The parameters for the rectangular window with
# which to crop the input video.
attr_accessor :crop
# @return [Array<Overlay>] The properties of overlays to be applied to
# the input video. These could be audio, image or video overlays.
attr_accessor :overlays
#
# Mapper for Filters class as Ruby Hash.
# This will be used for serialization/deserialization.
#
def self.mapper()
{
client_side_validation: true,
required: false,
serialized_name: 'Filters',
type: {
name: 'Composite',
class_name: 'Filters',
model_properties: {
deinterlace: {
client_side_validation: true,
required: false,
serialized_name: 'deinterlace',
type: {
name: 'Composite',
class_name: 'Deinterlace'
}
},
rotation: {
client_side_validation: true,
required: false,
serialized_name: 'rotation',
type: {
name: 'Enum',
module: 'Rotation'
}
},
crop: {
client_side_validation: true,
required: false,
serialized_name: 'crop',
type: {
name: 'Composite',
class_name: 'Rectangle'
}
},
overlays: {
client_side_validation: true,
required: false,
serialized_name: 'overlays',
type: {
name: 'Sequence',
element: {
client_side_validation: true,
required: false,
serialized_name: 'OverlayElementType',
type: {
name: 'Composite',
polymorphic_discriminator: '@odata.type',
uber_parent: 'Overlay',
class_name: 'Overlay'
}
}
}
}
}
}
}
end
end
end
end
| 31.454545 | 78 | 0.498073 |
bf57758b21ef062d8e1ffe5a9bb4dd4c37488feb | 4,673 | class MathUtils
def vectors_add(vector1, vector2)
result = Array.new
if vector1.size == vector2.size
for i in 0...vector1.size
result[i] = vector1[i] + vector2[i]
end
else
puts 'ERROR VECTOR ADD: Vectors size is not equal.'
end
result
end
def vectors_sub(vector1, vector2)
result = Array.new
if vector1.size == vector2.size
for i in 0...vector1.size
result[i] = vector1[i] - vector2[i]
end
else
puts 'ERROR VECTOR SUB: Vectors size is not equal.'
end
result
end
def vectors_mult(vector1, vector2)
result = Array.new
if vector1.size == vector2.size
for i in 0...vector1.size
result[i] = vector1[i] * vector2[i]
end
else
puts 'ERROR VECTOR MULT: Vectors size is not equal.'
end
result
end
def vectors_div(vector1, vector2)
result = Array.new
if vector1.size == vector2.size
for i in 0...vector1.size
if !vector2[i].zero?
result[i] = vector1[i] / vector2[i]
else
puts 'ERROR VECTOR DIV: Vector two value equals to zero.'
end
end
else
puts 'ERROR VECTOR DIV: Vectors size is not equal.'
end
result
end
def vectors_dot(vector1, vector2, alpha = 0)
result = 0
if vector1.size == vector2.size
for i in 0...vector1.size
result[i] += vector1[i] * vector2[i] * Math.cos(alpha)
end
else
puts 'ERROR VECTOR DOT: Vectors size is not equal.'
end
result
end
def matrices_add(matrix1, matrix2)
result = Array.new
if matrix1.size == matrix2.size
if matrix1.first.size == matrix2.first.size
for i in 0...matrix1.size
result[i] = vectors_add(matrix1[i], matrix2[i])
end
else
puts 'ERROR MATRIX ADD: Matrices columns not match.'
end
else
puts 'ERROR MATRIX ADD: Matrices rows not match.'
end
result
end
def matrices_sub(matrix1, matrix2)
result = Array.new
if matrix1.size == matrix2.size
if matrix1.first.size == matrix2.first.size
for i in 0...matrix1.size
result[i] = vectors_sub(matrix1[i], matrix2[i])
end
else
puts 'ERROR MATRIX SUB: Matrices columns not match.'
end
else
puts 'ERROR MATRIX SUB: Matrices rows not match.'
end
result
end
def matrices_mult(matrix1, matrix2)
result = Array.new
if matrix1.first.size == matrix2.size
for i in 0...matrix1.size
result[i] = Array.new
for j in 0...matrix2.first.size
sum = 0
for k in 0...matrix1.first.size
sum += matrix1[i][k] * matrix2[k][j]
end
result[i] << sum
end
end
else
puts 'ERROR MATRIX MULT: Dimensions mismatch.'
end
result
end
def identity_function(vector)
vector
end
def identity_derivative(vector)
vector.map { |x| 1.0 }
end
def logistic_function(vector)
vector.map { |x| 1.0 / (1.0 + Math.exp(-x)) }
end
def logistic_derivative(vector)
vector.map { |x| (1.0 / (1.0 + Math.exp(-x))) * (1.0 - (1.0 / (1.0 + Math.exp(-x)))) }
end
def tanh_function(vector)
vector.map { |x| Math.tanh x }
end
def tanh_derivative(vector)
vector.map { |x| 1.0 - (Math.tanh(x))**2 }
end
def arctan_function(vector)
vector.map { |x| Math.atan(x) }
end
def arctan_derivative(vector)
vector.map { |x| 1.0 / (x**2 + 1) }
end
def softsign_function(vector)
vector.map { |x| x / (1.0 + x.abs) }
end
def softsign_derivative(vector)
vector.map { |x| 1.0 / (1.0 + x.abs)**2 }
end
def relu_function(vector)
vector.map do |x|
if x <= 0.0
0.0
else
x
end
end
end
def relu_derivative(vector)
vector.map do |x|
if x <= 0.0
0.0
else
1.0
end
end
end
def softplus_function(vector)
vector.map { |x| Math.log(1.0 + Math.exp(x)) }
end
def softplus_derivative(vector)
vector.map { |x| 1.0 / (1.0 + Math.exp(-x)) }
end
def cost_function(y_predicted, y_assumed)
result = Array.new
if y_predicted.size == y_assumed.size
for i in 0...y_predicted.size
result[i] = (y_predicted[i] - y_assumed[i])**2
end
result = -1.0 * (1.0 / y_assumed.size) * result.inject(0, :+)
else
puts 'ERROR COST FUNCTION: y size not match.'
end
result
end
end
| 24.465969 | 91 | 0.55703 |
ed5cf2878b60ff4018c11db59e97b72f74d510ba | 31,522 | #
#--
# Copyright (c) 2006-2008, John Mettraux, [email protected]
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#++
#
#
# "made in Japan"
#
# John Mettraux at openwfe.org
#
require 'thread'
require 'monitor'
require 'rufus/otime'
require 'rufus/cronline'
module Rufus
#
# The Scheduler is used by OpenWFEru for registering 'at' and 'cron' jobs.
# 'at' jobs to execute once at a given point in time. 'cron' jobs
# execute a specified intervals.
# The two main methods are thus schedule_at() and schedule().
#
# schedule_at() and schedule() await either a Schedulable instance and
# params (usually an array or nil), either a block, which is more in the
# Ruby way.
#
# == The gem "rufus-scheduler"
#
# This scheduler was previously known as the "openwferu-scheduler" gem.
#
# To ensure that code tapping the previous gem still runs fine with
# "rufus-scheduler", this new gem has 'pointers' for the old class
# names.
#
# require 'rubygems'
# require 'openwfe/util/scheduler'
# s = OpenWFE::Scheduler.new
#
# will still run OK with "rufus-scheduler".
#
# == Examples
#
# require 'rubygems'
# require 'rufus/scheduler'
#
# scheduler = Rufus::Scheduler.start_new
#
# scheduler.schedule_in("3d") do
# regenerate_monthly_report()
# end
# #
# # will call the regenerate_monthly_report method
# # in 3 days from now
#
# scheduler.schedule "0 22 * * 1-5" do
# log.info "activating security system..."
# activate_security_system()
# end
#
# job_id = scheduler.schedule_at "Sun Oct 07 14:24:01 +0900 2009" do
# init_self_destruction_sequence()
# end
#
# scheduler.join # join the scheduler (prevents exiting)
#
#
# an example that uses a Schedulable class :
#
# class Regenerator < Schedulable
# def trigger (frequency)
# self.send(frequency)
# end
# def monthly
# # ...
# end
# def yearly
# # ...
# end
# end
#
# regenerator = Regenerator.new
#
# scheduler.schedule_in("4d", regenerator)
# #
# # will regenerate the report in four days
#
# scheduler.schedule_in(
# "5d",
# { :schedulable => regenerator, :scope => :month })
# #
# # will regenerate the monthly report in 5 days
#
# There is also schedule_every() :
#
# scheduler.schedule_every("1h20m") do
# regenerate_latest_report()
# end
#
# (note : a schedule every isn't triggered immediately, thus this example
# will first trigger 1 hour and 20 minutes after being scheduled)
#
# The scheduler has a "exit_when_no_more_jobs" attribute. When set to
# 'true', the scheduler will exit as soon as there are no more jobs to
# run.
# Use with care though, if you create a scheduler, set this attribute
# to true and start the scheduler, the scheduler will immediately exit.
# This attribute is best used indirectly : the method
# join_until_no_more_jobs() wraps it.
#
# The :scheduler_precision can be set when instantiating the scheduler.
#
# scheduler = Rufus::Scheduler.new(:scheduler_precision => 0.500)
# scheduler.start
# #
# # instatiates a scheduler that checks its jobs twice per second
# # (the default is 4 times per second (0.250))
#
# Note that rufus-scheduler places a constraint on the values for the
# precision : 0.0 < p <= 1.0
# Thus
#
# scheduler.precision = 4.0
#
# or
#
# scheduler = Rufus::Scheduler.new :scheduler_precision => 5.0
#
# will raise an exception.
#
#
# == Tags
#
# Tags can be attached to jobs scheduled :
#
# scheduler.schedule_in "2h", :tags => "backup" do
# init_backup_sequence()
# end
#
# scheduler.schedule "0 24 * * *", :tags => "new_day" do
# do_this_or_that()
# end
#
# jobs = find_jobs 'backup'
# jobs.each { |job| job.unschedule }
#
# Multiple tags may be attached to a single job :
#
# scheduler.schedule_in "2h", :tags => [ "backup", "important" ] do
# init_backup_sequence()
# end
#
# The vanilla case for tags assume they are String instances, but nothing
# prevents you from using anything else. The scheduler has no persistence
# by itself, so no serialization issue.
#
#
# == Cron up to the second
#
# A cron schedule can be set at the second level :
#
# scheduler.schedule "7 * * * * *" do
# puts "it's now the seventh second of the minute"
# end
#
# The rufus scheduler recognizes an optional first column for second
# scheduling. This column can, like for the other columns, specify a
# value ("7"), a list of values ("7,8,9,27") or a range ("7-12").
#
#
# == information passed to schedule blocks
#
# When calling schedule_every(), schedule_in() or schedule_at(), the block
# expects zero or 3 parameters like in
#
# scheduler.schedule_every("1h20m") do |job_id, at, params|
# puts "my job_id is #{job_id}"
# end
#
# For schedule(), zero or two parameters can get passed
#
# scheduler.schedule "7 * * * * *" do |job_id, cron_line, params|
# puts "my job_id is #{job_id}"
# end
#
# In both cases, params corresponds to the params passed to the schedule
# method (:tags, :first_at, :first_in, :dont_reschedule, ...)
#
#
# == Exceptions
#
# The rufus scheduler will output a stacktrace to the STDOUT in
# case of exception. There are two ways to change that behaviour.
#
# # 1 - providing a lwarn method to the scheduler instance :
#
# class << scheduler
# def lwarn (&block)
# puts "oops, something wrong happened : "
# puts block.call
# end
# end
#
# # 2 - overriding the [protected] method log_exception(e) :
#
# class << scheduler
# def log_exception (e)
# puts "something wrong happened : "+e.to_s
# end
# end
#
# == 'Every jobs' and rescheduling
#
# Every jobs can reschedule/unschedule themselves. A reschedule example :
#
# schedule.schedule_every "5h" do |job_id, at, params|
#
# mails = $inbox.fetch_mails
# mails.each { |m| $inbox.mark_as_spam(m) if is_spam(m) }
#
# params[:every] = if mails.size > 100
# "1h" # lots of spam, check every hour
# else
# "5h" # normal schedule, every 5 hours
# end
# end
#
# Unschedule example :
#
# schedule.schedule_every "10s" do |job_id, at, params|
# #
# # polls every 10 seconds until a mail arrives
#
# $mail = $inbox.fetch_last_mail
#
# params[:dont_reschedule] = true if $mail
# end
#
# == 'Every jobs', :first_at and :first_in
#
# Since rufus-scheduler 1.0.2, the schedule_every methods recognizes two
# optional parameters, :first_at and :first_in
#
# scheduler.schedule_every "2d", :first_in => "5h" do
# # schedule something every two days, start in 5 hours...
# end
#
# scheduler.schedule_every "2d", :first_at => "5h" do
# # schedule something every two days, start in 5 hours...
# end
#
# == job.next_time()
#
# Jobs, be they at, every or cron have a next_time() method, which tells
# when the job will be fired next time (for at and in jobs, this is also the
# last time).
#
# For cron jobs, the current implementation is quite brutal. It takes three
# seconds on my 2006 macbook to reach a cron schedule 1 year away.
#
# When is the next friday 13th ?
#
# require 'rubygems'
# require 'rufus/scheduler'
#
# puts Rufus::CronLine.new("* * 13 * fri").next_time
#
#
# == :thread_name option
#
# You can specify the name of the scheduler's thread. Should make
# it easier in some debugging situations.
#
# scheduler.new :thread_name => "the crazy scheduler"
#
#
# == job.trigger_thread
#
# Since rufus-scheduler 1.0.8, you can have access to the thread of
# a job currently being triggered.
#
# job = scheduler.get_job(job_id)
# thread = job.trigger_thread
#
# This new method will return nil if the job is not currently being
# triggered. Not that in case of an every or cron job, this method
# will return the thread of the last triggered instance, thus, in case
# of overlapping executions, you only get the most recent thread.
#
class Scheduler
VERSION = '1.0.11'
#
# By default, the precision is 0.250, with means the scheduler
# will check for jobs to execute 4 times per second.
#
attr_reader :precision
#
# Setting the precision ( 0.0 < p <= 1.0 )
#
def precision= (f)
raise "precision must be 0.0 < p <= 1.0" \
if f <= 0.0 or f > 1.0
@precision = f
end
#--
# Set by default at 0.00045, it's meant to minimize drift
#
#attr_accessor :correction
#++
#
# As its name implies.
#
attr_accessor :stopped
def initialize (params={})
super()
@pending_jobs = []
@cron_jobs = {}
@non_cron_jobs = {}
@schedule_queue = Queue.new
@unschedule_queue = Queue.new
#
# sync between the step() method and the [un]schedule
# methods is done via these queues, no more mutex
@scheduler_thread = nil
@precision = 0.250
# every 250ms, the scheduler wakes up (default value)
begin
self.precision = Float(params[:scheduler_precision])
rescue Exception => e
# let precision at its default value
end
@thread_name = params[:thread_name] || "rufus scheduler"
#@correction = 0.00045
@exit_when_no_more_jobs = false
#@dont_reschedule_every = false
@last_cron_second = -1
@stopped = true
end
#
# Starts this scheduler (or restart it if it was previously stopped)
#
def start
@stopped = false
@scheduler_thread = Thread.new do
Thread.current[:name] = @thread_name
if defined?(JRUBY_VERSION)
require 'java'
java.lang.Thread.current_thread.name = @thread_name
end
loop do
break if @stopped
t0 = Time.now.to_f
step
d = Time.now.to_f - t0 # + @correction
next if d > @precision
sleep (@precision - d)
end
end
end
#
# Instantiates a new Rufus::Scheduler instance, starts it and returns it
#
def self.start_new (params = {})
s = self.new(params)
s.start
s
end
#
# The scheduler is stoppable via sstop()
#
def stop
@stopped = true
end
# (for backward compatibility)
#
alias :sstart :start
# (for backward compatibility)
#
alias :sstop :stop
#
# Joins on the scheduler thread
#
def join
@scheduler_thread.join
end
#
# Like join() but takes care of setting the 'exit_when_no_more_jobs'
# attribute of this scheduler to true before joining.
# Thus the scheduler will exit (and the join terminates) as soon as
# there aren't no more 'at' (or 'every') jobs in the scheduler.
#
# Currently used only in unit tests.
#
def join_until_no_more_jobs
@exit_when_no_more_jobs = true
join
end
#
# Ensures that a duration is a expressed as a Float instance.
#
# duration_to_f("10s")
#
# will yield 10.0
#
def duration_to_f (s)
Rufus.duration_to_f(s)
end
#--
#
# The scheduling methods
#
#++
#
# Schedules a job by specifying at which time it should trigger.
# Returns the a job_id that can be used to unschedule the job.
#
# This method returns a job identifier which can be used to unschedule()
# the job.
#
# If the job is specified in the past, it will be triggered immediately
# but not scheduled.
# To avoid the triggering, the parameter :discard_past may be set to
# true :
#
# jobid = scheduler.schedule_at(yesterday, :discard_past => true) do
# puts "you'll never read this message"
# end
#
# And 'jobid' will hold a nil (not scheduled).
#
#
def schedule_at (at, params={}, &block)
do_schedule_at(
at,
prepare_params(params),
&block)
end
#
# a shortcut for schedule_at
#
alias :at :schedule_at
#
# Schedules a job by stating in how much time it should trigger.
# Returns the a job_id that can be used to unschedule the job.
#
# This method returns a job identifier which can be used to unschedule()
# the job.
#
def schedule_in (duration, params={}, &block)
do_schedule_at(
Time.new.to_f + Rufus.duration_to_f(duration),
prepare_params(params),
&block)
end
#
# a shortcut for schedule_in
#
alias :in :schedule_in
#
# Schedules a job in a loop. After an execution, it will not execute
# before the time specified in 'freq'.
#
# This method returns a job identifier which can be used to unschedule()
# the job.
#
# In case of exception in the job, it will be rescheduled. If you don't
# want the job to be rescheduled, set the parameter :try_again to false.
#
# scheduler.schedule_every "500", :try_again => false do
# do_some_prone_to_error_stuff()
# # won't get rescheduled in case of exception
# end
#
# Since rufus-scheduler 1.0.2, the params :first_at and :first_in are
# accepted.
#
# scheduler.schedule_every "2d", :first_in => "5h" do
# # schedule something every two days, start in 5 hours...
# end
#
# (without setting a :first_in (or :first_at), our example schedule would
# have had been triggered after two days).
#
def schedule_every (freq, params={}, &block)
params = prepare_params params
params[:every] = freq
first_at = params[:first_at]
first_in = params[:first_in]
first_at = if first_at
at_to_f(first_at)
elsif first_in
Time.now.to_f + Rufus.duration_to_f(first_in)
else
Time.now.to_f + Rufus.duration_to_f(freq) # not triggering immediately
end
do_schedule_at(first_at, params, &block)
end
#
# a shortcut for schedule_every
#
alias :every :schedule_every
#
# Schedules a cron job, the 'cron_line' is a string
# following the Unix cron standard (see "man 5 crontab" in your command
# line, or http://www.google.com/search?q=man%205%20crontab).
#
# For example :
#
# scheduler.schedule("5 0 * * *", s)
# # will trigger the schedulable s every day
# # five minutes after midnight
#
# scheduler.schedule("15 14 1 * *", s)
# # will trigger s at 14:15 on the first of every month
#
# scheduler.schedule("0 22 * * 1-5") do
# puts "it's break time..."
# end
# # outputs a message every weekday at 10pm
#
# Returns the job id attributed to this 'cron job', this id can
# be used to unschedule the job.
#
# This method returns a job identifier which can be used to unschedule()
# the job.
#
def schedule (cron_line, params={}, &block)
params = prepare_params(params)
#
# is a job with the same id already scheduled ?
cron_id = params[:cron_id] || params[:job_id]
#@unschedule_queue << cron_id
#
# schedule
b = to_block(params, &block)
job = CronJob.new(self, cron_id, cron_line, params, &b)
@schedule_queue << job
job.job_id
end
#
# an alias for schedule()
#
alias :cron :schedule
#--
#
# The UNscheduling methods
#
#++
#
# Unschedules an 'at' or a 'cron' job identified by the id
# it was given at schedule time.
#
def unschedule (job_id)
@unschedule_queue << job_id
end
#
# Unschedules a cron job
#
# (deprecated : use unschedule(job_id) for all the jobs !)
#
def unschedule_cron_job (job_id)
unschedule(job_id)
end
#--
#
# 'query' methods
#
#++
#
# Returns the job corresponding to job_id, an instance of AtJob
# or CronJob will be returned.
#
def get_job (job_id)
@cron_jobs[job_id] || @non_cron_jobs[job_id]
end
#
# Finds a job (via get_job()) and then returns the wrapped
# schedulable if any.
#
def get_schedulable (job_id)
j = get_job(job_id)
j.respond_to?(:schedulable) ? j.schedulable : nil
end
#
# Returns an array of jobs that have the given tag.
#
def find_jobs (tag=nil)
jobs = @cron_jobs.values + @non_cron_jobs.values
jobs = jobs.select { |job| job.has_tag?(tag) } if tag
jobs
end
#
# Returns all the jobs in the scheduler.
#
def all_jobs
find_jobs()
end
#
# Finds the jobs with the given tag and then returns an array of
# the wrapped Schedulable objects.
# Jobs that haven't a wrapped Schedulable won't be included in the
# result.
#
def find_schedulables (tag)
find_jobs(tag).find_all { |job| job.respond_to?(:schedulable) }
end
#
# Returns the number of currently pending jobs in this scheduler
# ('at' jobs and 'every' jobs).
#
def pending_job_count
@pending_jobs.size
end
#
# Returns the number of cron jobs currently active in this scheduler.
#
def cron_job_count
@cron_jobs.size
end
#
# Returns the current count of 'every' jobs scheduled.
#
def every_job_count
@non_cron_jobs.values.select { |j| j.class == EveryJob }.size
end
#
# Returns the current count of 'at' jobs scheduled (not 'every').
#
def at_job_count
@non_cron_jobs.values.select { |j| j.class == AtJob }.size
end
#
# Returns true if the given string seems to be a cron string.
#
def self.is_cron_string (s)
s.match ".+ .+ .+ .+ .+" # well...
end
private
#
# the unschedule work itself.
#
def do_unschedule (job_id)
job = get_job job_id
return (@cron_jobs.delete(job_id) != nil) if job.is_a?(CronJob)
return false unless job # not found
if job.is_a?(AtJob) # catches AtJob and EveryJob instances
@non_cron_jobs.delete(job_id)
job.params[:dont_reschedule] = true # for AtJob as well, no worries
end
for i in 0...@pending_jobs.length
if @pending_jobs[i].job_id == job_id
@pending_jobs.delete_at i
return true # asap
end
end
true
end
#
# Making sure that params is a Hash.
#
def prepare_params (params)
params.is_a?(Schedulable) ? { :schedulable => params } : params
end
#
# The core method behind schedule_at and schedule_in (and also
# schedule_every). It's protected, don't use it directly.
#
def do_schedule_at (at, params={}, &block)
job = params.delete :job
unless job
jobClass = params[:every] ? EveryJob : AtJob
b = to_block params, &block
job = jobClass.new self, at_to_f(at), params[:job_id], params, &b
end
if jobClass == AtJob && job.at < (Time.new.to_f + @precision)
job.trigger() unless params[:discard_past]
@non_cron_jobs.delete job.job_id # just to be sure
return nil
end
@non_cron_jobs[job.job_id] = job
@schedule_queue << job
job.job_id
end
#
# Ensures an 'at' instance is translated to a float
# (to be compared with the float coming from time.to_f)
#
def at_to_f (at)
at = Rufus::to_ruby_time(at) if at.kind_of?(String)
at = Rufus::to_gm_time(at) if at.kind_of?(DateTime)
at = at.to_f if at.kind_of?(Time)
raise "cannot schedule at : #{at.inspect}" unless at.is_a?(Float)
at
end
#
# Returns a block. If a block is passed, will return it, else,
# if a :schedulable is set in the params, will return a block
# wrapping a call to it.
#
def to_block (params, &block)
return block if block
schedulable = params.delete(:schedulable)
return nil unless schedulable
l = lambda do
schedulable.trigger(params)
end
class << l
attr_accessor :schedulable
end
l.schedulable = schedulable
l
end
#
# Pushes an 'at' job into the pending job list
#
def push_pending_job (job)
old = @pending_jobs.find { |j| j.job_id == job.job_id }
@pending_jobs.delete(old) if old
#
# override previous job with same id
if @pending_jobs.length < 1 or job.at >= @pending_jobs.last.at
@pending_jobs << job
return
end
for i in 0...@pending_jobs.length
if job.at <= @pending_jobs[i].at
@pending_jobs[i, 0] = job
return # right place found
end
end
end
#
# This is the method called each time the scheduler wakes up
# (by default 4 times per second). It's meant to quickly
# determine if there are jobs to trigger else to get back to sleep.
# 'cron' jobs get executed if necessary then 'at' jobs.
#
def step
step_unschedule
# unschedules any job in the unschedule queue before
# they have a chance to get triggered.
step_trigger
# triggers eligible jobs
step_schedule
# schedule new jobs
# done.
end
#
# unschedules jobs in the unschedule_queue
#
def step_unschedule
loop do
break if @unschedule_queue.empty?
do_unschedule(@unschedule_queue.pop)
end
end
#
# adds every job waiting in the @schedule_queue to
# either @pending_jobs or @cron_jobs.
#
def step_schedule
loop do
break if @schedule_queue.empty?
j = @schedule_queue.pop
if j.is_a?(CronJob)
@cron_jobs[j.job_id] = j
else # it's an 'at' job
push_pending_job j
end
end
end
#
# triggers every eligible pending (at or every) jobs, then every eligible
# cron jobs.
#
def step_trigger
now = Time.now
if @exit_when_no_more_jobs && @pending_jobs.size < 1
@stopped = true
return
end
# TODO : eventually consider running cron / pending
# job triggering in two different threads
#
# but well... there's the synchronization issue...
#
# cron jobs
if now.sec != @last_cron_second
@last_cron_second = now.sec
@cron_jobs.each do |cron_id, cron_job|
#trigger(cron_job) if cron_job.matches?(now, @precision)
cron_job.trigger if cron_job.matches?(now)
end
end
#
# pending jobs
now = now.to_f
#
# that's what at jobs do understand
loop do
break if @pending_jobs.length < 1
job = @pending_jobs[0]
break if job.at > now
#if job.at <= now
#
# obviously
job.trigger
@pending_jobs.delete_at 0
end
end
#
# If an error occurs in the job, it well get caught and an error
# message will be displayed to STDOUT.
# If this scheduler provides a lwarn(message) method, it will
# be used insted.
#
# Of course, one can override this method.
#
def log_exception (e)
message =
"trigger() caught exception\n" +
e.to_s + "\n" +
e.backtrace.join("\n")
if self.respond_to?(:lwarn)
lwarn { message }
else
puts message
end
end
end
#
# This module adds a trigger method to any class that includes it.
# The default implementation feature here triggers an exception.
#
module Schedulable
def trigger (params)
raise "trigger() implementation is missing"
end
def reschedule (scheduler)
raise "reschedule() implentation is missing"
end
end
protected
JOB_ID_LOCK = Monitor.new
#
# would it be better to use a Mutex instead of a full-blown
# Monitor ?
#
# The parent class for scheduled jobs.
#
class Job
@@last_given_id = 0
#
# as a scheduler is fully transient, no need to
# have persistent ids, a simple counter is sufficient
#
# The identifier for the job
#
attr_accessor :job_id
#
# An array of tags
#
attr_accessor :tags
#
# The block to execute at trigger time
#
attr_accessor :block
#
# A reference to the scheduler
#
attr_reader :scheduler
#
# Keeping a copy of the initialization params of the job.
#
attr_reader :params
#
# if the job is currently executing, this field points to
# the 'trigger thread'
#
attr_reader :trigger_thread
def initialize (scheduler, job_id, params, &block)
@scheduler = scheduler
@block = block
if job_id
@job_id = job_id
else
JOB_ID_LOCK.synchronize do
@job_id = @@last_given_id
@@last_given_id = @job_id + 1
end
end
@params = params
#@tags = Array(tags).collect { |tag| tag.to_s }
# making sure we have an array of String tags
@tags = Array(params[:tags])
# any tag is OK
end
#
# Returns true if this job sports the given tag
#
def has_tag? (tag)
@tags.include?(tag)
end
#
# Removes (cancels) this job from its scheduler.
#
def unschedule
@scheduler.unschedule(@job_id)
end
#
# Triggers the job (in a dedicated thread).
#
def trigger
Thread.new do
@trigger_thread = Thread.current
# keeping track of the thread
begin
do_trigger
rescue Exception => e
@scheduler.send(:log_exception, e)
end
#@trigger_thread = nil if @trigger_thread = Thread.current
@trigger_thread = nil
# overlapping executions, what to do ?
end
end
end
#
# An 'at' job.
#
class AtJob < Job
#
# The float representation (Time.to_f) of the time at which
# the job should be triggered.
#
attr_accessor :at
def initialize (scheduler, at, at_id, params, &block)
super(scheduler, at_id, params, &block)
@at = at
end
#
# Returns the Time instance at which this job is scheduled.
#
def schedule_info
Time.at(@at)
end
#
# next_time is last_time (except for EveryJob instances). Returns
# a Time instance.
#
def next_time
schedule_info
end
protected
#
# Triggers the job (calls the block)
#
def do_trigger
@block.call @job_id, @at
@scheduler.instance_variable_get(:@non_cron_jobs).delete @job_id
end
end
#
# An 'every' job is simply an extension of an 'at' job.
#
class EveryJob < AtJob
#
# Returns the frequency string used to schedule this EveryJob,
# like for example "3d" or "1M10d3h".
#
def schedule_info
@params[:every]
end
protected
#
# triggers the job, then reschedules it if necessary
#
def do_trigger
hit_exception = false
begin
@block.call @job_id, @at, @params
rescue Exception => e
@scheduler.send(:log_exception, e)
hit_exception = true
end
if \
@scheduler.instance_variable_get(:@exit_when_no_more_jobs) or
(@params[:dont_reschedule] == true) or
(hit_exception and @params[:try_again] == false)
@scheduler.instance_variable_get(:@non_cron_jobs).delete(job_id)
# maybe it'd be better to wipe that reference from here anyway...
return
end
#
# ok, reschedule ...
params[:job] = self
@at = @at + Rufus.duration_to_f(params[:every])
@scheduler.send(:do_schedule_at, @at, params)
end
end
#
# A cron job.
#
class CronJob < Job
#
# The CronLine instance representing the times at which
# the cron job has to be triggered.
#
attr_accessor :cron_line
def initialize (scheduler, cron_id, line, params, &block)
super(scheduler, cron_id, params, &block)
if line.is_a?(String)
@cron_line = CronLine.new(line)
elsif line.is_a?(CronLine)
@cron_line = line
else
raise \
"Cannot initialize a CronJob " +
"with a param of class #{line.class}"
end
end
#
# This is the method called by the scheduler to determine if it
# has to fire this CronJob instance.
#
def matches? (time)
#def matches? (time, precision)
#@cron_line.matches?(time, precision)
@cron_line.matches?(time)
end
#
# Returns the original cron tab string used to schedule this
# Job. Like for example "60/3 * * * Sun".
#
def schedule_info
@cron_line.original
end
#
# Returns a Time instance : the next time this cron job is
# supposed to "fire".
#
# 'from' is used to specify the starting point for determining
# what will be the next time. Defaults to now.
#
def next_time (from=Time.now)
@cron_line.next_time(from)
end
protected
#
# As the name implies.
#
def do_trigger
@block.call @job_id, @cron_line, @params
end
end
end
| 23.700752 | 79 | 0.591587 |
39e913752f931786e48ff1368f2f121ae2ba7aaf | 198 | require 'spec_helper'
describe CakeWalk do
it 'has a version number' do
expect(CakeWalk::VERSION).not_to be nil
end
it 'does something useful' do
expect(false).to eq(true)
end
end
| 16.5 | 43 | 0.707071 |
621a1a48aa234a63b030d34c0d78db77fd350b90 | 496 | # Be sure to restart your server when you modify this file.
# Your secret key for verifying the integrity of signed cookies.
# If you change this key, all old signed cookies will become invalid!
# Make sure the secret is at least 30 characters and all random,
# no regular words or you'll be exposed to dictionary attacks.
Dummy::Application.config.secret_token = '980d46a174e0d083486d1f762d859ce62fc419dbaf5a9aff1597411adc0df44046f3bd14864b419b2840621d124fe8c4a2a66ed44091f846c96362ad3ffbbce5'
| 62 | 171 | 0.832661 |
bfd9cd044798b64f4d93b9bb8a5ab188bce7559a | 859 | class Apktool < Formula
desc "Tool for reverse engineering 3rd party, closed, binary Android apps"
homepage "https://github.com/iBotPeaches/Apktool"
url "https://github.com/iBotPeaches/Apktool/releases/download/v2.4.1/apktool_2.4.1.jar"
sha256 "bdeb66211d1dc1c71f138003ce35f6d0cd19af6f8de7ffbdd5b118d02d825a52"
bottle :unneeded
resource "sample.apk" do
url "https://github.com/downloads/stephanenicolas/RoboDemo/robodemo-sample-1.0.1.apk"
sha256 "bf3ec04631339538c8edb97ebbd5262c3962c5873a2df9022385156c775eb81f"
end
def install
libexec.install "apktool_#{version}.jar"
bin.write_jar_script libexec/"apktool_#{version}.jar", "apktool"
end
test do
resource("sample.apk").stage do
system bin/"apktool", "d", "robodemo-sample-1.0.1.apk"
system bin/"apktool", "b", "robodemo-sample-1.0.1"
end
end
end
| 33.038462 | 89 | 0.743888 |
bb2d02e81c066964eb0df34dd97aed3d47bbadb7 | 450 | class NotesController < ApplicationController
before_action :require_user!
def create
note = current_user.notes.new(note_params)
note.save
flash[:errors] = note.errors.full_messages
redirect_to track_url(note.track_id)
end
def destroy
note = current_user.notes.find(params[:id])
redirect_to track_url(note.track_id)
end
private
def note_params
params.require(:note).permit(:content, :track_id)
end
end
| 20.454545 | 53 | 0.733333 |
bbf7dcc2de6a1c49027454a29ca4ac8da86feccb | 3,559 | require_relative '../../spec_helper_min'
require 'models/user_table_shared_examples'
describe Carto::UserTable do
include UniqueNamesHelper
let(:user) { create(:carto_user) }
before(:all) do
@user = user
@carto_user = user
@user_table = Carto::UserTable.new
@user_table.user = user
@user_table.name = unique_name('user_table')
@user_table.save
# The dependent visualization models are in the UserTable class for the AR model
@dependent_test_object = @user_table
end
it_behaves_like 'user table models' do
def build_user_table(attrs = {})
ut = Carto::UserTable.new
ut.assign_attributes(attrs, without_protection: true)
ut
end
end
describe 'table_id column' do
it 'supports values larger than 2^31-1' do
column = Carto::UserTable.columns.find{|c| c.name=='table_id'}
expect { column.type_cast_for_database(2164557046) }.to_not raise_error
end
end
describe 'canonical visualization' do
it 'contains 1 data layer and creates a named map template if default basemap supports labels on top' do
Carto::LayerFactory.build_default_base_layer(user).supports_labels_layer?.should be_true
# FIXME: passes in local but not in CI
# Carto::NamedMaps::Api.any_instance.expects(:create).once
table = user.tables.create!
expect(table.reload.visualization.data_layers.count).to eq(1)
end
it 'contains 1 data layer and creates a named map template if default basemap does not support labels on top' do
user.update_attribute(:google_maps_key, 'wadus')
Carto::LayerFactory.build_default_base_layer(user).supports_labels_layer?.should be_false
# FIXME: passes in local but not in CI
# Carto::NamedMaps::Api.any_instance.expects(:create).once
table = user.tables.create!
expect(table.reload.visualization.data_layers.count).to eq(1)
end
end
describe '#default_privacy' do
it 'sets privacy to nil by default' do
expect(Carto::UserTable.new.privacy).to be_nil
end
it 'lets caller specify privacy' do
[UserTable::PRIVACY_PRIVATE, UserTable::PRIVACY_LINK, UserTable::PRIVACY_PUBLIC].each do |privacy|
expect(Carto::UserTable.new(privacy: privacy).privacy).to eq privacy
end
end
end
describe '#readable_by?' do
include_context 'organization with users helper'
it 'returns true for shared tables' do
@table = create_table(privacy: UserTable::PRIVACY_PRIVATE, name: "a_table_name", user_id: @org_user_1.id)
user_table = Carto::UserTable.find(@table.id)
share_table_with_user(@table, @org_user_2)
user_table.readable_by?(@carto_org_user_2).should be_true
end
end
describe('#affected_visualizations') do
before(:each) do
# We recreate an inconsistent state where a layer has no visualization
@user_table.stubs(:layers).returns([Carto::Layer.new])
end
describe('#fully_dependent_visualizations') do
it 'resists layers without visualizations' do
expect { @user_table.fully_dependent_visualizations }.to_not raise_error
end
end
describe('#accessible_dependent_derived_maps') do
it 'resists layers without visualizations' do
expect { @user_table.accessible_dependent_derived_maps }.to_not raise_error
end
end
describe('#partially_dependent_visualizations') do
it 'resists layers without visualizations' do
expect { @user_table.partially_dependent_visualizations }.to_not raise_error
end
end
end
end
| 32.354545 | 116 | 0.716493 |
28ee33497d6a051054dbb746f19d997c8297c09e | 2,506 | module Mhc
module PropertyValue
class Range < Base
include Comparable
ITEM_SEPARATOR = "-"
attr_reader :first, :last
def initialize(item_class, prefix = nil, first = nil, last = nil)
@item_class, @prefix = item_class, prefix
@first, @last = first, last
end
# our Range acceps these 3 forms:
# (1) A-B : first, last = A, B
# (2) A : first, last = A, A
# (3) A- : first, last = A, nil
# (4) -B : first, last = nil, B
#
# nil means range is open (infinite).
#
def parse(string)
@first, @last = nil, nil
first, last = string.split(ITEM_SEPARATOR, 2)
last = first if last.nil? # single "A" means "A-A"
@first = @item_class.parse(first) unless first.to_s == ""
@last = @item_class.parse(last, @first) unless last.to_s == ""
return self.class.new(@item_class, @prefix, @first, @last)
end
def to_a
array = []
i = first
while i <= last
array << i
i = i.succ
end
return array
end
def each
i = first
while i <= last
yield(i)
i = i.succ
end
end
def narrow(from, to)
from = @first if from.nil? or (@first and from < @first)
to = @last if to.nil? or (@last and to > @last)
self.class.new(@item_class, @prefix, from, to)
end
def <=>(o)
o = o.first if o.respond_to?(:first)
safe_comp(self.first, o)
end
def infinit?
return @first.nil? || @last.nil?
end
def blank?
@first.nil? && @last.nil?
end
def to_mhc_string
first = @first.nil? ? "" : @first.to_mhc_string
last = @last.nil? ? "" : @last.to_mhc_string
if first == last
return @prefix.to_s + first
else
return @prefix.to_s + [first, last].join(ITEM_SEPARATOR)
end
end
alias_method :to_s, :to_mhc_string
private
def cover?(item)
return false if @first && item < @first
return false if @last && item > @last
return true
end
def safe_comp(a, o)
# nil is minimum
return (a <=> o) if a and o
return -1 if !a and o
return 1 if a and !o
return 0 if !a and !o
end
end # class Range
end # module PropertyValue
end # module Mhc
| 24.811881 | 73 | 0.501995 |
26eb28bb99ac971c28817abdcdc23d996bcad4ef | 606 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require "google/apis/jobs_v3"
| 37.875 | 74 | 0.762376 |
e9f7d4646c16869adb08f39cde0bd430095ac2b4 | 527 | # frozen_string_literal: true
Rails.application.routes.draw do
# For details on the DSL available within this file, see https://guides.rubyonrails.org/routing.html
root "test#index"
namespace "api" do
namespace "v1" do
post "image_annotate", to: "image_annotate#show"
resources :users do
resources :items, module: :users
collection do
post :signup, to: "users#create"
post :login, to: "users#login"
get :current_user
end
end
end
end
end
| 25.095238 | 102 | 0.641366 |
91e5381c9667f21cec6a3f59a76c0a4906e50c83 | 1,034 | # -*- encoding: utf-8 -*-
require File.expand_path('../lib/reditor/version', __FILE__)
Gem::Specification.new do |gem|
gem.authors = ['hibariya']
gem.email = ['[email protected]']
gem.description = %q{Open a ruby library with $EDITOR. Reditor supports rubygems, bundler, and stdlib (pure ruby).}
gem.summary = %q{Open a ruby library with $EDITOR.}
gem.homepage = 'https://github.com/hibariya/reditor'
gem.licenses = ['MIT']
gem.files = `git ls-files`.split($\)
gem.executables = gem.files.grep(%r{^bin/}).map{ |f| File.basename(f) }
gem.test_files = gem.files.grep(%r{^(test|spec|features)/})
gem.name = 'reditor'
gem.require_paths = ['lib']
gem.version = Reditor::VERSION
gem.add_runtime_dependency 'hotwater', '~> 0.1.2'
gem.add_runtime_dependency 'pepin', '~> 0.1.1'
gem.add_runtime_dependency 'thor', '~> 0.19.1'
gem.add_development_dependency 'rake', '~> 10.4.2'
gem.add_development_dependency 'rspec', '~> 3.3.0'
end
| 39.769231 | 119 | 0.634429 |
01b123a552ee8f54547fca4cccb107dda0542e38 | 1,427 | require File.dirname(__FILE__) + '/../../spec_helper'
require File.dirname(__FILE__) + '/fixtures/classes'
describe "Math.tan" do
it "returns a float" do
Math.tan(1.35).class.should == Float
end
it "returns the tangent of the argument" do
Math.tan(0.0).should == 0.0
Math.tan(-0.0).should == -0.0
Math.tan(4.22).should be_close(1.86406937682395, TOLERANCE)
Math.tan(-9.65).should be_close(-0.229109052606441, TOLERANCE)
end
it "returns NaN if called with +-Infinitty" do
Math.tan(1.0/0.0).nan?.should == true
Math.tan(1.0/-0.0).nan?.should == true
end
ruby_version_is ""..."1.9" do
it "raises an ArgumentError if the argument cannot be coerced with Float()" do
lambda { Math.tan("test") }.should raise_error(ArgumentError)
end
end
ruby_version_is "1.9" do
it "raises a TypeError if the argument cannot be coerced with Float()" do
lambda { Math.tan("test") }.should raise_error(TypeError)
end
end
it "raises a TypeError if the argument is nil" do
lambda { Math.tan(nil) }.should raise_error(TypeError)
end
it "accepts any argument that can be coerced with Float()" do
Math.tan(MathSpecs::Float.new).should be_close(1.5574077246549, TOLERANCE)
end
end
describe "Math#tan" do
it "is accessible as a private instance method" do
IncludesMath.new.send(:tan, 1.0).should be_close(1.5574077246549, TOLERANCE)
end
end
| 30.361702 | 82 | 0.683252 |
1ab2a17b260b96159899502cd12a9f9628bb81a5 | 1,890 | # -*- encoding : utf-8 -*-
module TextExtractor
module Formats
module Pdf
extend ActiveSupport::Concern
def extract_text_from_pdf(original_file_path)
parsed_text = extract_as_text_from_pdf(original_file_path)
if parsed_text.length < TextExtractor.configuration.min_text_length
# png_pathes = extract_images(original_file_path)
output_folder = temp_folder_for_parsed
png_pathes = extract_to_ppm_from_pdf(original_file_path, output_folder)
parsed_text = extract_text_from_image(png_pathes)
::FileUtils.rm_rf(output_folder)
end
parsed_text
end
private
def extract_as_text_from_pdf(original_file_path)
run_shell(%{pdftotext -enc UTF-8 #{ to_shell(original_file_path) } #{ to_shell(text_file_path) }})
extract_text_from_txt(text_file_path)
end
def extract_to_ppm_from_pdf(original_file_path, output_folder)
run_shell(%{pdftoppm -r 300 -gray #{ to_shell(original_file_path) } #{ File.join(output_folder, 'temp_file') }})
::Dir["#{ output_folder }/*"]
end
def self.formats
{
'.pdf' => 'pdf'
}
end
def self.is_pdf_file?(file_path)
File.open(file_path, 'rb',&:readline) =~ /\A\%PDF-\d+(\.\d+)?/
end
=begin
def get_count_pages(_file_path)
pdfinfo = run_shell(%{pdfinfo #{_file_path}})
pdfinfo.out =~ /Pages:\s+(\d+)/i
$1.to_i
end
def get_page_size(_file_path)
pdfinfo = run_shell(%{pdfinfo #{_file_path}})
pdfinfo.out =~ /Page size:\s+(\d+)\sx\s(\d+)/i
[$1.to_i, $2.to_i]
end
def extract_images(_file_path)
output_file = temp_folder_for_parsed
run_shell(%{pdfimages #{ _file_path } #{ output_file } })
::Dir["#{output_file}*"]
end
=end
end
end
end
| 28.636364 | 120 | 0.627513 |
2812901f0414f3d8142077e010f76cc30c3ce488 | 11,469 | require 'spec_helper'
require 'ddtrace'
require 'ddtrace/propagation/http_propagator'
RSpec.describe Datadog::HTTPPropagator do
around do |example|
# Reset before and after each example; don't allow global state to linger.
Datadog.configuration.reset_options!
example.run
Datadog.configuration.reset_options!
end
let(:tracer) { get_test_tracer }
describe '#inject!' do
let(:context) { nil }
let(:env) { { 'something' => 'alien' } }
before(:each) { described_class.inject!(context, env) }
context 'with default settings' do
context 'given a nil context' do
it { expect(env).to eq('something' => 'alien') }
end
context 'given a context and env' do
context 'without any explicit sampling priority or origin' do
let(:context) { Datadog::Context.new(trace_id: 1000, span_id: 2000) }
it do
expect(env).to eq('something' => 'alien',
'x-datadog-trace-id' => '1000',
'x-datadog-parent-id' => '2000')
end
end
context 'with a sampling priority' do
context 'of 0' do
let(:context) { Datadog::Context.new(trace_id: 1000, span_id: 2000, sampling_priority: 0) }
it do
expect(env).to eq('something' => 'alien',
'x-datadog-sampling-priority' => '0',
'x-datadog-trace-id' => '1000',
'x-datadog-parent-id' => '2000')
end
end
context 'as nil' do
let(:context) { Datadog::Context.new(trace_id: 1000, span_id: 2000, sampling_priority: nil) }
it do
expect(env).to eq('something' => 'alien',
'x-datadog-trace-id' => '1000',
'x-datadog-parent-id' => '2000')
end
end
end
context 'with an origin' do
context 'of "synthetics"' do
let(:context) { Datadog::Context.new(trace_id: 1000, span_id: 2000, origin: 'synthetics') }
it do
expect(env).to eq('something' => 'alien',
'x-datadog-origin' => 'synthetics',
'x-datadog-trace-id' => '1000',
'x-datadog-parent-id' => '2000')
end
end
context 'as nil' do
let(:context) { Datadog::Context.new(trace_id: 1000, span_id: 2000, origin: nil) }
it do
expect(env).to eq('something' => 'alien',
'x-datadog-trace-id' => '1000',
'x-datadog-parent-id' => '2000')
end
end
end
end
end
end
describe '#extract' do
subject(:context) { described_class.extract(env) }
context 'given a blank env' do
let(:env) { {} }
it do
expect(context.trace_id).to be nil
expect(context.span_id).to be nil
expect(context.sampling_priority).to be nil
expect(context.origin).to be nil
end
end
context 'given an env containing' do
context 'datadog trace id and parent id' do
let(:env) do
{
'HTTP_X_DATADOG_TRACE_ID' => '123',
'HTTP_X_DATADOG_PARENT_ID' => '456'
}
end
it do
expect(context.trace_id).to eq(123)
expect(context.span_id).to eq(456)
expect(context.sampling_priority).to be_nil
expect(context.origin).to be_nil
end
context 'and sampling priority' do
let(:env) do
{
'HTTP_X_DATADOG_TRACE_ID' => '7',
'HTTP_X_DATADOG_PARENT_ID' => '8',
'HTTP_X_DATADOG_SAMPLING_PRIORITY' => '0'
}
end
it do
expect(context.trace_id).to eq(7)
expect(context.span_id).to eq(8)
expect(context.sampling_priority).to eq(0)
expect(context.origin).to be_nil
end
context 'and origin' do
let(:env) do
{
'HTTP_X_DATADOG_TRACE_ID' => '7',
'HTTP_X_DATADOG_PARENT_ID' => '8',
'HTTP_X_DATADOG_SAMPLING_PRIORITY' => '0',
'HTTP_X_DATADOG_ORIGIN' => 'synthetics'
}
end
it do
expect(context.trace_id).to eq(7)
expect(context.span_id).to eq(8)
expect(context.sampling_priority).to be(0)
expect(context.origin).to eq('synthetics')
end
end
end
context 'and origin' do
let(:env) do
{
'HTTP_X_DATADOG_TRACE_ID' => '7',
'HTTP_X_DATADOG_PARENT_ID' => '8',
'HTTP_X_DATADOG_ORIGIN' => 'synthetics'
}
end
it do
expect(context.trace_id).to eq(7)
expect(context.span_id).to eq(8)
expect(context.sampling_priority).to be_nil
expect(context.origin).to eq('synthetics')
end
end
end
context 'B3 trace id and parent id' do
let(:env) do
{
'HTTP_X_B3_TRACEID' => '00ef01',
'HTTP_X_B3_SPANID' => '011ef0'
}
end
it do
expect(context.trace_id).to eq(61185)
expect(context.span_id).to eq(73456)
expect(context.sampling_priority).to be_nil
end
context 'and sampling priority' do
let(:env) do
{
'HTTP_X_B3_TRACEID' => '00ef01',
'HTTP_X_B3_SPANID' => '011ef0',
'HTTP_X_B3_SAMPLED' => '0'
}
end
it do
expect(context.trace_id).to eq(61185)
expect(context.span_id).to eq(73456)
expect(context.sampling_priority).to eq(0)
end
end
end
context 'B3 single trace id and parent id' do
let(:env) do
{
'HTTP_B3' => '00ef01-011ef0'
}
end
it do
expect(context.trace_id).to eq(61185)
expect(context.span_id).to eq(73456)
expect(context.sampling_priority).to be_nil
end
context 'and sampling priority' do
let(:env) do
{
'HTTP_B3' => '00ef01-011ef0-0'
}
end
it do
expect(context.trace_id).to eq(61185)
expect(context.span_id).to eq(73456)
expect(context.sampling_priority).to eq(0)
end
end
end
context 'datadog, and b3 header' do
let(:env) do
{
'HTTP_X_DATADOG_TRACE_ID' => '61185',
'HTTP_X_DATADOG_PARENT_ID' => '73456',
'HTTP_X_B3_TRACEID' => '00ef01',
'HTTP_X_B3_SPANID' => '011ef0'
}
end
it do
expect(context.trace_id).to eq(61185)
expect(context.span_id).to eq(73456)
expect(context.sampling_priority).to be_nil
end
context 'and sampling priority' do
let(:env) do
{
'HTTP_X_DATADOG_TRACE_ID' => '61185',
'HTTP_X_DATADOG_PARENT_ID' => '73456',
'HTTP_X_DATADOG_SAMPLING_PRIORITY' => '1',
'HTTP_X_B3_TRACEID' => '00ef01',
'HTTP_X_B3_SPANID' => '011ef0',
'HTTP_X_B3_SAMPLED' => '1'
}
end
it do
expect(context.trace_id).to eq(61185)
expect(context.span_id).to eq(73456)
expect(context.sampling_priority).to be(1)
end
end
context 'with mismatched values' do
let(:env) do
{
'HTTP_X_DATADOG_TRACE_ID' => '7',
'HTTP_X_DATADOG_PARENT_ID' => '8',
'HTTP_X_B3_TRACEID' => '00ef01',
'HTTP_X_B3_SPANID' => '011ef0'
}
end
it do
expect(context.trace_id).to be_nil
expect(context.span_id).to be_nil
expect(context.sampling_priority).to be_nil
end
end
end
context 'datadog, b3, and b3 single header' do
let(:env) do
{
'HTTP_X_DATADOG_TRACE_ID' => '61185',
'HTTP_X_DATADOG_PARENT_ID' => '73456',
'HTTP_X_B3_TRACEID' => '00ef01',
'HTTP_X_B3_SPANID' => '011ef0',
'HTTP_B3' => '00ef01-011ef0'
}
end
it do
expect(context.trace_id).to eq(61185)
expect(context.span_id).to eq(73456)
expect(context.sampling_priority).to be_nil
end
context 'and sampling priority' do
let(:env) do
{
'HTTP_X_DATADOG_TRACE_ID' => '61185',
'HTTP_X_DATADOG_PARENT_ID' => '73456',
'HTTP_X_DATADOG_SAMPLING_PRIORITY' => '1',
'HTTP_X_B3_TRACEID' => '00ef01',
'HTTP_X_B3_SPANID' => '011ef0',
'HTTP_X_B3_SAMPLED' => '1',
'HTTP_B3' => '00ef01-011ef0-1'
}
end
it do
expect(context.trace_id).to eq(61185)
expect(context.span_id).to eq(73456)
expect(context.sampling_priority).to be(1)
end
end
context 'with mismatched values' do
let(:env) do
# DEV: We only need 1 to be mismatched
{
'HTTP_X_DATADOG_TRACE_ID' => '7',
'HTTP_X_DATADOG_PARENT_ID' => '8',
'HTTP_X_B3_TRACEID' => '00ef01',
'HTTP_X_B3_SPANID' => '011ef0',
'HTTP_B3' => '00ef01-011ef0'
}
end
it do
expect(context.trace_id).to be_nil
expect(context.span_id).to be_nil
expect(context.sampling_priority).to be_nil
end
end
end
context 'datadog, and b3 single header' do
let(:env) do
{
'HTTP_X_DATADOG_TRACE_ID' => '61185',
'HTTP_X_DATADOG_PARENT_ID' => '73456',
'HTTP_B3' => '00ef01-011ef0'
}
end
it do
expect(context.trace_id).to eq(61185)
expect(context.span_id).to eq(73456)
expect(context.sampling_priority).to be_nil
end
context 'and sampling priority' do
let(:env) do
{
'HTTP_X_DATADOG_TRACE_ID' => '61185',
'HTTP_X_DATADOG_PARENT_ID' => '73456',
'HTTP_X_DATADOG_SAMPLING_PRIORITY' => '1',
'HTTP_B3' => '00ef01-011ef0-1'
}
end
it do
expect(context.trace_id).to eq(61185)
expect(context.span_id).to eq(73456)
expect(context.sampling_priority).to be(1)
end
end
context 'with mismatched values' do
let(:env) do
{
'HTTP_X_DATADOG_TRACE_ID' => '7',
'HTTP_X_DATADOG_PARENT_ID' => '8',
'HTTP_B3' => '00ef01-011ef0'
}
end
it do
expect(context.trace_id).to be_nil
expect(context.span_id).to be_nil
expect(context.sampling_priority).to be_nil
end
end
end
end
end
end
| 29.407692 | 105 | 0.499259 |
085c84394233c9e5927b41fe8ccd1f46329c65c1 | 2,419 | module Spree
module Api
class PaymentsController < Spree::Api::BaseController
before_filter :find_order
before_filter :find_payment, only: [:update, :show, :authorize, :purchase, :capture, :void, :credit]
def index
@payments = @order.payments.ransack(params[:q]).result.page(params[:page]).per(params[:per_page])
respond_with(@payments)
end
def new
@payment_methods = Spree::PaymentMethod.available
respond_with(@payment_method)
end
def create
@payment = @order.payments.build(payment_params)
if @payment.save
respond_with(@payment, status: 201, default_template: :show)
else
invalid_resource!(@payment)
end
end
def update
authorize! params[:action], @payment
if ! @payment.pending?
render 'update_forbidden', status: 403
elsif @payment.update_attributes(payment_params)
respond_with(@payment, default_template: :show)
else
invalid_resource!(@payment)
end
end
def show
respond_with(@payment)
end
def authorize
perform_payment_action(:authorize)
end
def capture
perform_payment_action(:capture)
end
def purchase
perform_payment_action(:purchase)
end
def void
perform_payment_action(:void_transaction)
end
def credit
if params[:amount].to_f > @payment.credit_allowed
render 'credit_over_limit', status: 422
else
perform_payment_action(:credit, params[:amount])
end
end
private
def find_order
@order = Spree::Order.find_by(number: order_id)
authorize! :read, @order, order_token
end
def find_payment
@payment = @order.payments.find(params[:id])
end
def perform_payment_action(action, *args)
authorize! action, Payment
begin
@payment.send("#{action}!", *args)
respond_with(@payment, :default_template => :show)
rescue Spree::Core::GatewayError => e
@error = e.message
render 'spree/api/errors/gateway_error', status: 422
end
end
def payment_params
params.require(:payment).permit(permitted_payment_attributes)
end
end
end
end
| 25.463158 | 106 | 0.600661 |
bf5793e75e4b69268ba94c07c9f48422e4ca2dd0 | 732 | class Content < ActiveRecord::Base
mount_uploader :file, FileUploader
belongs_to :owner, class_name: 'User'
module Kind
Text = :text
Image = :image
Binary = :binary
end
def kind
@kind ||=
if not file.content_type.blank?
kind_from_content_type(file.content_type)
else
kind_from_filename(file.filename)
end
end
private
def kind_from_content_type(str)
case str
when /image/
Kind::Image
when /text/
Kind::Text
else
Kind::Binary
end
end
def kind_from_filename(filename)
case filename
when /\.(?:jpg|png|gif)\z/
Kind::Image
when /\.(?:txt)\z/
Kind::Text
else
Kind::Binary
end
end
end
| 16.636364 | 49 | 0.606557 |
081718d2b6e8f2e817e81ecc0e31a16927bc67ae | 4,857 | # encoding: UTF-8
# require 'squib'
require_relative '../../lib/squib'
data = { 'name' => ['Thief', 'Grifter', 'Mastermind'],
'level' => [1, 2, 3] }
longtext = "This is left-justified text, with newlines.\nWhat do you know about tweetle beetles? well... When tweetle beetles fight, it's called a tweetle beetle battle. And when they battle in a puddle, it's a tweetle beetle puddle battle. AND when tweetle beetles battle with paddles in a puddle, they call it a tweetle beetle puddle paddle battle. AND... When beetles battle beetles in a puddle paddle battle and the beetle battle puddle is a puddle in a bottle... ..they call this a tweetle beetle bottle puddle paddle battle muddle."
Squib::Deck.new(width: 825, height: 1125, cards: 3) do
background color: :white
rect x: 15, y: 15, width: 795, height: 1095, x_radius: 50, y_radius: 50
rect x: 30, y: 30, width: 128, height: 128, x_radius: 25, y_radius: 25
# Arrays are rendered over each card
text str: data['name'], x: 250, y: 55, font: 'Arial weight=900 18'
text str: data['level'], x: 65, y: 40, font: 'Arial 24', color: :burnt_orange
text str: 'Font strings are expressive!', x:65, y: 200,
font: 'Impact bold italic 12'
text str: 'Font strings are expressive!', x:65, y: 300,
font: 'Arial,Verdana weight=900 style=oblique 12'
text str: 'Font string sizes can be overridden per card.', x: 65, y: 350,
font: 'Impact 12', font_size: [5, 7, 8]
text str: 'This text has fixed width, fixed height, center-aligned, middle-valigned, and has a red hint',
hint: :red,
x: 65, y: 400,
width: 300, height: 125,
align: :center, valign: 'MIDDLE', # these can be specified with case-insenstive strings too
font: 'Serif 5'
extents = text str: 'Ink extent return value',
x: 65, y: 550,
font: 'Sans Bold', font_size: [5, 7, 8]
margin = 10
# Extents come back as an array of hashes, which can get split out like this
ws = extents.inject([]) { |arr, ext| arr << ext[:width] + 10; arr }
hs = extents.inject([]) { |arr, ext| arr << ext[:height] + 10; arr }
rect x: 65 - margin / 2, y: 550 - margin / 2,
width: ws, height: hs,
radius: 10, stroke_color: :black
# If width & height are defined and the text will overflow the box, we can ellipsize.
text str: "Ellipsization!\nThe ultimate question of life, the universe, and everything to life and everything is 42",
hint: :green, font: 'Arial 7',
x: 450, y: 400,
width: 280, height: 180,
ellipsize: true
# Text hints are guides for showing you how your text boxes are laid out exactly
hint text: :cyan
set font: 'Serif 7' # Impacts all future text calls (unless they specify differently)
text str: 'Text hints & fonts are globally togglable!', x: 65, y: 625
set font: :default # back to Squib-wide default
hint text: :off
text str: 'See? No hint here.',
x: 565, y: 625,
font: 'Arial 7'
# Text can be rotated, in radians, about the upper-left corner of the text box.
text str: 'Rotated',
x: 565, y: 675, angle: 0.2,
font: 'Arial 6', hint: :red
# Text can be justified, and have newlines
text str: longtext, font: 'Arial 5',
x: 65, y: 700,
width: '1.5in', height: inches(1),
justify: true, spacing: -6
# Here's how you embed images into text.
# Pass a block to the method call and use the given context
embed_text = 'Embedded icons! Take 1 :tool: and gain 2:health:. If Level 2, take 2 :tool:'
text(str: embed_text, font: 'Sans 6',
x: '1.8in', y: '2.5in', width: '0.85in',
align: :left, ellipsize: false) do |embed|
embed.svg key: ':tool:', width: 28, height: 28, file: 'spanner.svg'
embed.svg key: ':health:', width: 28, height: 28, file: 'glass-heart.svg'
end
text str: 'Fill n <span fgcolor="#ff0000">stroke</span>',
color: :green, stroke_width: 2.0, stroke_color: :blue,
x: '1.8in', y: '2.9in', width: '0.85in', font: 'Sans Bold 9', markup: true
text str: 'Stroke n <span fgcolor="#ff0000">fill</span>',
color: :green, stroke_width: 2.0, stroke_color: :blue, stroke_strategy: :stroke_first,
x: '1.8in', y: '3.0in', width: '0.85in', font: 'Sans Bold 9', markup: true
text str: 'Dotted',
color: :white, stroke_width: 2.0, dash: '4 2', stroke_color: :black,
x: '1.8in', y: '3.1in', width: '0.85in', font: 'Sans Bold 9', markup: true
#
text str: "<b>Markup</b> is <i>quite</i> <s>'easy'</s> <span fgcolor=\"\#ff0000\">awesome</span>. Can't beat those \"smart\" 'quotes', now with 10--20% more en-dashes --- and em-dashes --- with explicit ellipses too...",
markup: true,
x: 50, y: 1000,
width: 750, height: 100,
valign: :bottom,
font: 'Serif 6', hint: :cyan
save prefix: 'text_options_', format: :png
end
| 46.701923 | 538 | 0.634754 |
ff1ae34e141d11395416a37c9bcecaeb4198d6eb | 3,929 | __END__
require "spec_helper"
require 'css_sanitize.rb'
# TODO: this file isn't actually used in the system. Either use it or get rid of it
# TODO: would like to use some kind of generic, databaseless, ActiveRecord::Base derived class here, rather than User.
# some ideas for this: http://stackoverflow.com/questions/315850/rails-model-without-database
class MyModel < User
include CssSanitize
end
describe CssSanitize do
before do
@sanitizable_model = MyModel.new
end
it "disallows evil css" do
bad_strings = [
"div.foo { width: 500px; behavior: url(http://foo.com); height: 200px; }",
".test { color: red; background-image: url('javascript:alert'); border: 1px solid brown; }",
"div.foo { width: 500px; -moz-binding: foo; height: 200px; }",
# no @import for you
"\@import url(javascript:alert('Your cookie:'+document.cookie));",
# no behavior either
"behaviour:expression(function(element){alert('xss');}(this));'>",
# case-sensitivity test
'-Moz-binding: url("http://www.example.comtest.xml");',
# \uxxrl unicode
"background:\75rl('javascript:alert(\"\\75rl\")');",
"background:url(javascript:alert('html &#x75;'))",
"b\nackground: url(javascript:alert('line-broken background '))",
"background:url(javascript:alert('&#xff55;rl(full-width u)'))",
"background:url(javascript:alert(&#117;rl'))",
"background:url(javascript:alert('&#x75;rl'))",
"background:\75rl('javascript:alert(\"\\75rl\")')",
# \\d gets parsed out on ffx and ie
"background:url("javascri\\dpt:alert('injected js goes here')")",
# http://rt.livejournal.org/Ticket/Display.html?id=436
'-\4d oz-binding: url("http://localhost/test.xml#foo");',
# css comments are ignored sometimes
"xss:expr/*XSS*/ession(alert('XSS'));",
# html comments? fail
"background:url(java<!-- -->script:alert('XSS'));",
# weird comments
'color: e/* * / */xpression("r" + "e" + "d");',
# weird comments to really test that regex
'color: e/*/**/xpression("r" + "e" + "d");',
# we're not using a parser, but nonetheless ... if we were..
<<-STR
p {
dummy: '//'; background:url(javascript:alert('XSS'));
}
STR
]
bad_strings.each do |string|
@sanitizable_model.custom_css = string
@sanitizable_model.custom_css.should == "Error: invalid/disallowed characters in CSS"
end
end
it "allows good css" do
good_strings = [
".test { color: red; border: 1px solid brown; }",
"h1 { background: url(http://foobar.com/meh.jpg)}",
"div.foo { width: 500px; height: 200px; }",
"GI b gkljfl kj { { { ********" # gibberish, but should work.
]
good_strings.each do |string|
@sanitizable_model.custom_css = string
@sanitizable_model.custom_css.should == string
end
end
it "does not strip real comments" do
text = <<STR
a.foo { bar: x }
/* Group: header */
a.bar { x: poo }
STR
@sanitizable_model.custom_css = text
@sanitizable_model.custom_css.should == text
end
it "does strip suspicious comments" do
text = <<-STR
a.foo { ba/* hack */r: x }
/* Group: header */
a.bar { x: poo }
STR
@sanitizable_model.custom_css = text
@sanitizable_model.custom_css.should == "Error: invalid/disallowed characters in CSS"
@sanitizable_model.custom_css = "Foo /*/**/ Bar"
@sanitizable_model.custom_css.should == "Error: invalid/disallowed characters in CSS"
end
it "doesn't allow bad css" do
@sanitizable_model.custom_css = <<STR
test{ width: expression(alert("sux 2 be u")); }
a:link { color: red }
STR
@sanitizable_model.custom_css.should == "Error: invalid/disallowed characters in CSS"
end
end
| 31.685484 | 118 | 0.620514 |
18f282cff44138ac5bac6b033f6248fd65f670f5 | 337 | participant_ids = []
direct_message.participants.each do |participant|
if participant.id != current_user.id
participant_ids.push(participant.id)
end
end
json.id direct_message.id
json.createdAt direct_message.created_at
json.updatedAt direct_message.updated_at
json.participantIds do
json.array! participant_ids
end | 25.923077 | 49 | 0.795252 |
5db4a907c390eb19b325d168f275ec3b34feca75 | 1,347 | #
# Copyright 2015, Noah Kantrowitz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Halite
# (see Converter.write)
module Converter
autoload :Chef, 'halite/converter/chef'
autoload :Libraries, 'halite/converter/libraries'
autoload :Metadata, 'halite/converter/metadata'
autoload :Misc, 'halite/converter/misc'
# Convert a cookbook gem to a normal Chef cookbook.
#
# @since 1.0.0
# @param gem_data [Halite::Gem] Gem to convert.
# @param output_path [String] Output path.
# @return [void]
# @example
# Halite::Converter.write(Halite::Gem.new(gemspec), 'dest')
def self.write(gem_data, output_path)
Chef.write(gem_data, output_path)
Libraries.write(gem_data, output_path)
Metadata.write(gem_data, output_path)
Misc.write(gem_data, output_path)
end
end
end
| 32.071429 | 74 | 0.715664 |
3995243b7032c610fc3c4ee11674517e22433797 | 776 | require 'lims-laboratory-app/organization/location'
require 'models/actions/spec_helper'
require 'models/actions/action_examples'
shared_examples_for "creating a resource with a location" do |resource_class|
it_behaves_like "an action"
it "creates a resource with a location" do
Lims::Core::Persistence::Session.any_instance.should_receive(:save_all)
result = subject.call
result.should be_a(Hash)
result.delete(:uuid).should == uuid
model_name = result.keys.first
result[model_name].should be_a(resource_class)
result[model_name].should respond_to(:location)
result[model_name].location.name.should == name
result[model_name].location.address.should == address
result[model_name].location.internal.should == internal
end
end
| 36.952381 | 77 | 0.764175 |
1a94fb834adc2f126892ac90a929972e19f34755 | 19,047 | # This file should contain all the record creation needed to seed the database with its default values.
# The data can then be loaded with the rails db:seed command (or created alongside the database with db:setup).
#
# Examples:
#
# movies = Movie.create([{ name: 'Star Wars' }, { name: 'Lord of the Rings' }])
# Character.create(name: 'Luke', movie: movies.first)
if Rails.env.development? || Rails.env.uat?
require 'active_fedora/cleaner'
require "open-uri"
require 'faker'
# For the main community/collections
THINGS = ['cat', 'dog', 'unicorn', 'hamburger', 'librarian'].freeze
# For padding community/collection lists for pagination (need at least 26, a couple uppercase to confirm sort)
EXTRA_THINGS = ['Library', 'DONAIR', 'magpie', 'toque', 'sombrero', 'yeti', 'mimosa', 'ukulele', 'tourtière',
'falafel', 'calculator', 'papusa'].freeze
puts 'Starting seeding of dev database...'
# start fresh
[Announcement, ActiveStorage::Blob, ActiveStorage::Attachment, JupiterCore::AttachmentShim,
Identity, User, Type, Language, Institution].each(&:destroy_all)
ActiveFedora::Cleaner.clean!
# Seed an admin user
admin = User.create(name: 'Jane Admin', email: '[email protected]', admin: true)
admin.identities.create(provider: 'developer', uid: '[email protected]')
# Seed a non-admin user
non_admin = User.create(name: 'Bill Non-admin', email: '[email protected]', admin: false)
non_admin.identities.create(provider: 'developer', uid: '[email protected]')
# Seed an suspended admin user
bad_admin = User.create(name: 'Joe Bad-admin', email: '[email protected]', admin: true, suspended: true)
bad_admin.identities.create(provider: 'developer', uid: '[email protected]')
# Seed an suspended regular user
bad_user = User.create(name: 'Jill Bad-user', email: '[email protected]', admin: false, suspended: true)
bad_user.identities.create(provider: 'developer', uid: '[email protected]')
# A bunch of non-identity users for to manipulate in the admin interface
100.times do
name = Faker::TvShows::GameOfThrones.unique.character
User.create(name: name, email: "#{name.gsub(/ +/, '.').downcase}@example.edu", admin: false)
end
# Lets pick 10 prolific creators, 10 contributors
creators = 10.times.map { "#{Faker::Creature::Cat.unique.name} #{Faker::Creature::Cat.unique.breed.gsub(/[ ,]+/, '-')}" }
contributors = 10.times.map { Faker::FunnyName.unique.name_with_initial }
institutions = [CONTROLLED_VOCABULARIES[:institution].uofa, CONTROLLED_VOCABULARIES[:institution].st_stephens]
THINGS.each_with_index do |thing, idx|
if idx % 2 == 0
title = "The department of #{thing.capitalize}"
else
title = "Special reports about #{thing.pluralize}"
end
community = Community.new_locked_ldp_object(
owner: admin.id,
title: title,
description: Faker::Lorem.sentence(word_count: 20, supplemental: false, random_words_to_add: 0).chop
).unlock_and_fetch_ldp_object(&:save!)
# Attach logos, if possible
filename = File.expand_path(Rails.root + "tmp/#{thing}.png")
unless File.exist?(filename)
unless ENV['SKIP_DOWNLOAD_COMMUNITY_LOGOS'].present?
set = (thing == 'cat') ? 'set4' : 'set1'
url = Faker::Avatar.image(thing, "100x100", "png", set)
File.open(filename, 'wb') do |fo|
fo.write open(url).read
end
end
end
if File.exist?(filename)
community.logo.attach(io: File.open(filename), filename: "#{thing}.png", content_type: "image/png")
end
item_collection = Collection.new_locked_ldp_object(
owner: admin.id,
title: "The annals of '#{thing.capitalize} International'",
community_id: community.id,
description: Faker::Lorem.sentence(word_count: 40, supplemental: false, random_words_to_add: 0).chop
).unlock_and_fetch_ldp_object(&:save!)
thesis_collection = Collection.new_locked_ldp_object(
owner: admin.id,
title: "Theses about #{thing.pluralize}",
community_id: community.id,
description: Faker::Lorem.sentence(word_count: 40, supplemental: false, random_words_to_add: 0).chop
).unlock_and_fetch_ldp_object(&:save!)
# Items
20.times do |i|
seed = rand(10)
seed2 = rand(10)
base_attributes = {
owner: admin.id,
visibility: JupiterCore::VISIBILITY_PUBLIC,
subject: [thing.capitalize],
doi: "doi:bogus-#{Time.current.utc.iso8601(3)}"
}
# Add an occasional verbose description
description = if i % 10 == 5
Faker::Lorem.sentence(word_count: 100, supplemental: false, random_words_to_add: 0).chop
else
Faker::Lorem.sentence(word_count: 20, supplemental: false, random_words_to_add: 0).chop
end
# Probabilistically about 70% English, 20% French, 10% Ukrainian
languages = if seed % 10 > 2
[CONTROLLED_VOCABULARIES[:language].english]
elsif seed % 10 > 0
[CONTROLLED_VOCABULARIES[:language].french]
else
[CONTROLLED_VOCABULARIES[:language].ukrainian]
end
licence_right = {}
item_attributes = base_attributes.merge({
title: "The effects of #{Faker::Beer.name} on #{thing.pluralize}",
created: rand(20_000).days.ago.to_s,
creators: [creators[seed]],
contributors: [contributors[seed2]],
description: description,
languages: languages,
})
# Add the occasional double-author work
item_attributes[:creators] << creators[(seed + 5) % 10] if i % 7 == 3
if seed % 10 < 6
item_attributes[:license] = CONTROLLED_VOCABULARIES[:license].attribution_4_0_international
elsif seed % 10 < 7
item_attributes[:license] = CONTROLLED_VOCABULARIES[:license].public_domain_mark_1_0
elsif seed % 10 < 8
item_attributes[:license] = CONTROLLED_VOCABULARIES[:old_license].attribution_3_0_international
else
item_attributes[:rights] = 'Share my stuff with everybody'
end
if idx % 3 == 0
item_attributes[:item_type] = CONTROLLED_VOCABULARIES[:item_type].article
item_attributes[:publication_status] = [CONTROLLED_VOCABULARIES[:publication_status].published]
elsif idx % 3 == 1
item_attributes[:item_type] = CONTROLLED_VOCABULARIES[:item_type].article
item_attributes[:publication_status] = [CONTROLLED_VOCABULARIES[:publication_status].draft,
CONTROLLED_VOCABULARIES[:publication_status].submitted]
else
item_attributes[:item_type] = CONTROLLED_VOCABULARIES[:item_type].report
end
# Every once in a while, create a mondo-item with full, rich metadata to help view-related work
if i == 8
item_attributes[:title] = item_attributes[:title].gsub(/^The/, 'The complete')
# Throw in a second language occasionally
item_attributes[:languages] << CONTROLLED_VOCABULARIES[:language].other
# Why 3 and 7 below? Neither number shares a divisor with 10, ensuring a unique set
item_attributes[:creators] += 4.times.map { |j| creators[(seed + 3 * j) % 10] }
item_attributes[:contributors] += 3.times.map { |j| contributors[(seed2 + 7 * j) % 10] }
item_attributes[:subject] += ['Mondo']
item_attributes[:spatial_subjects] = ['Vegreville']
item_attributes[:temporal_subjects] = ['1980s']
item_attributes[:alternative_title] = "A full, holistic, #{thing}-tastic approach"
item_attributes[:related_link] = "http://www.example.com/#{thing}"
item_attributes[:is_version_of] = ["The CDROM titled '#{thing.pluralize.capitalize}!'",
'The original laserdisc series from Orange-on-a-Blue-Background studios']
item_attributes[:source] = "Chapter 5 of '#{thing.pluralize.capitalize} and what they drink'"
end
item = Item.new_locked_ldp_object(item_attributes).unlock_and_fetch_ldp_object do |uo|
if i == 8
uo.add_to_path(community.id, item_collection.id)
uo.add_to_path(community.id, thesis_collection.id)
uo.save!
else
uo.add_to_path(community.id, item_collection.id)
uo.save!
end
end
if i == 8
# Attach two files to the mondo-item
File.open(Rails.root + 'app/assets/images/theses.jpg', 'r') do |file1|
File.open(Rails.root + 'test/fixtures/files/image-sample.jpeg', 'r') do |file2|
# Bit of a hack to fake a long file name ...
def file2.original_filename
'wefksdkhvkasdkfjhwekkjahsdkjkajvbkejfkwejfjkdvkhdkfhw&ükefkhoiekldkfhkdfjhiwuegfugksjdcjbsjkdbw.jpeg'
end
item.add_and_ingest_files([file1, file2])
end
end
end
item.set_thumbnail(item.files.first) if item.files.first.present?
field = Faker::Job.field
level = ["Master's", 'Doctorate'][i % 2]
thesis_attributes = base_attributes.merge({
title: "Thesis about the effects of #{Faker::Beer.name} on #{thing.pluralize}",
graduation_date: "#{rand(20_000).days.ago.year}#{['-06','-11',''][i % 3]}",
dissertant: creators[seed],
abstract: description,
language: languages.first,
specialization: field,
departments: ["Deparment of #{field}"],
supervisors: ["#{contributors[seed]} (#{field})"],
committee_members: ["#{contributors[seed2]} (#{field})"],
rights: 'Share my stuff with everybody',
thesis_level: level,
degree: "#{level} of #{field}",
institution: institutions[(i / 10) % 2]
})
# Every once in a while, create a mondo-thesis with full, rich metadata to help view-related work
if i == 8
thesis_attributes[:title] = thesis_attributes[:title].gsub(/^Thesis/, 'An über-thesis')
thesis_attributes[:subject] += ['Mondo']
thesis_attributes[:alternative_title] = "A full, holistic, #{thing}-tastic approach"
thesis_attributes[:is_version_of] = ["The CDROM titled '#{thing.pluralize.capitalize}!'",
'The original laserdisc series from Orange-on-a-Blue-Background studios']
department2 = 'Department of Everything'
thesis_attributes[:departments] += [department2]
thesis_attributes[:supervisors] += ["#{contributors[(seed + 3 * seed2) % 10]} (#{department2})"]
thesis_attributes[:committee_members] += ["#{contributors[(seed + 7 * seed2) % 10]} (#{department2})"]
end
thesis = Thesis.new_locked_ldp_object(thesis_attributes).unlock_and_fetch_ldp_object do |uo|
if i == 8
uo.add_to_path(community.id, item_collection.id)
uo.add_to_path(community.id, thesis_collection.id)
uo.save!
else
uo.add_to_path(community.id, thesis_collection.id)
uo.save!
end
end
if i == 8
# To test PCDM/list_source ordering, attach three files to the mondo-thesis!
File.open(Rails.root + 'app/assets/images/theses.jpg', 'r') do |file1|
File.open(Rails.root + 'test/fixtures/files/image-sample.jpeg', 'r') do |file2|
File.open(Rails.root + 'app/assets/images/era-logo.png', 'r') do |file3|
thesis.add_and_ingest_files([file1, file2, file3])
end
end
end
end
thesis.set_thumbnail(thesis.files.first) if thesis.files.first.present?
end
# Add a private item
Item.new_locked_ldp_object(
owner: admin.id,
creators: [creators[rand(10)]],
visibility: JupiterCore::VISIBILITY_PRIVATE,
created: rand(20_000).days.ago.to_s,
title: "Private #{thing.pluralize}, public lives: a survey of social media trends",
description: Faker::Lorem.sentence(word_count: 20, supplemental: false, random_words_to_add: 0).chop,
languages: [CONTROLLED_VOCABULARIES[:language].english],
license: CONTROLLED_VOCABULARIES[:license].attribution_4_0_international,
item_type: CONTROLLED_VOCABULARIES[:item_type].chapter,
subject: [thing.capitalize, 'Privacy'],
doi: "doi:bogus-#{Time.current.utc.iso8601(3)}"
).unlock_and_fetch_ldp_object do |uo|
uo.add_to_path(community.id, item_collection.id)
uo.save!
end
# Add a CCID protected item
Item.new_locked_ldp_object(
owner: admin.id,
creators: [creators[rand(10)]],
visibility: JupiterCore::VISIBILITY_AUTHENTICATED,
created: rand(20_000).days.ago.to_s,
title: "Everything You Need To Know About: University of Alberta and #{thing.pluralize}!",
description: Faker::Lorem.sentence(word_count: 20, supplemental: false, random_words_to_add: 0).chop,
languages: [CONTROLLED_VOCABULARIES[:language].english],
license: CONTROLLED_VOCABULARIES[:license].attribution_4_0_international,
item_type: CONTROLLED_VOCABULARIES[:item_type].report,
subject: [thing.capitalize, 'CCID'],
doi: "doi:bogus-#{Time.current.utc.iso8601(3)}"
).unlock_and_fetch_ldp_object do |uo|
uo.add_to_path(community.id, item_collection.id)
uo.save!
end
# Add a currently embargoed item
Item.new_locked_ldp_object(
owner: admin.id,
creators: [creators[rand(10)]],
visibility: Item::VISIBILITY_EMBARGO,
created: rand(20_000).days.ago.to_s,
title: "Embargo and #{Faker::Address.country}: were the #{thing.pluralize} left behind?",
description: Faker::Lorem.sentence(word_count: 20, supplemental: false, random_words_to_add: 0).chop,
languages: [CONTROLLED_VOCABULARIES[:language].english],
license: CONTROLLED_VOCABULARIES[:license].attribution_4_0_international,
item_type: CONTROLLED_VOCABULARIES[:item_type].conference_workshop_presentation,
subject: [thing.capitalize, 'Embargoes'],
doi: "doi:bogus-#{Time.current.utc.iso8601(3)}"
).unlock_and_fetch_ldp_object do |uo|
uo.add_to_path(community.id, item_collection.id)
uo.embargo_end_date = 20.years.from_now.to_date
uo.visibility_after_embargo = CONTROLLED_VOCABULARIES[:visibility].public
uo.save!
end
# Add a formerly embargoed item
Item.new_locked_ldp_object(
owner: admin.id,
creators: [creators[rand(10)]],
visibility: Item::VISIBILITY_EMBARGO,
created: rand(20_000).days.ago.to_s,
title: "Former embargo of #{Faker::Address.country}: the day the #{thing.pluralize} were free",
description: Faker::Lorem.sentence(word_count: 20, supplemental: false, random_words_to_add: 0).chop,
languages: [CONTROLLED_VOCABULARIES[:language].english],
license: CONTROLLED_VOCABULARIES[:license].attribution_4_0_international,
item_type: CONTROLLED_VOCABULARIES[:item_type].dataset,
subject: [thing.capitalize, 'Freedom'],
doi: "doi:bogus-#{Time.current.utc.iso8601(3)}"
).unlock_and_fetch_ldp_object do |uo|
uo.add_to_path(community.id, item_collection.id)
uo.embargo_end_date = 2.days.ago.to_date
uo.visibility_after_embargo = CONTROLLED_VOCABULARIES[:visibility].public
uo.save!
end
# Add an item owned by non-admin
Item.new_locked_ldp_object(
owner: non_admin.id,
creators: [creators[rand(10)]],
visibility: JupiterCore::VISIBILITY_PUBLIC,
created: rand(20_000).days.ago.to_s,
title: "Impact of non-admin users on #{thing.pluralize}",
description: Faker::Lorem.sentence(word_count: 20, supplemental: false, random_words_to_add: 0).chop,
languages: [CONTROLLED_VOCABULARIES[:language].english],
license: CONTROLLED_VOCABULARIES[:license].attribution_4_0_international,
item_type: CONTROLLED_VOCABULARIES[:item_type].learning_object,
subject: [thing.capitalize, 'Equality'],
# Add a temporal subject
temporal_subjects: ['The 1950s'],
doi: "doi:bogus-#{Time.current.utc.iso8601(3)}"
).unlock_and_fetch_ldp_object do |uo|
uo.add_to_path(community.id, item_collection.id)
uo.save!
end
# Want one multi-collection item per community
Item.new_locked_ldp_object(
owner: admin.id,
creators: [creators[rand(10)]],
visibility: JupiterCore::VISIBILITY_PUBLIC,
created: rand(20_000).days.ago.to_s,
title: "Multi-collection random images of #{thing.pluralize}",
description: Faker::Lorem.sentence(word_count: 20, supplemental: false, random_words_to_add: 0).chop,
# No linguistic content
languages: [CONTROLLED_VOCABULARIES[:language].no_linguistic_content],
license: CONTROLLED_VOCABULARIES[:license].attribution_4_0_international,
item_type: CONTROLLED_VOCABULARIES[:item_type].image,
subject: [thing.capitalize, 'Randomness', 'Pictures'],
# Add a spacial subject
spatial_subjects: ['Onoway'],
doi: "doi:bogus-#{Time.current.utc.iso8601(3)}"
).unlock_and_fetch_ldp_object do |uo|
uo.add_to_path(community.id, item_collection.id)
uo.add_to_path(community.id, thesis_collection.id)
uo.save!
end
end
# Pad with empty communities for pagination (starts with Z for sort order)
EXTRA_THINGS.each do |thing|
Community.new_locked_ldp_object(
owner: admin.id,
title: "Zoo#{thing}ology Institute of North-Eastern Upper Alberta (and Saskatchewan)",
description: Faker::Lorem.sentence(word_count: 20, supplemental: false, random_words_to_add: 0).chop
).unlock_and_fetch_ldp_object(&:save!)
end
# One community with a lot of empty restricted collections
community = Community.new_locked_ldp_object(
owner: admin.id,
title: "The Everything Department",
description: Faker::Lorem.sentence(word_count: 20, supplemental: false, random_words_to_add: 0).chop
).unlock_and_fetch_ldp_object(&:save!)
EXTRA_THINGS.each do |thing|
collection = Collection.new_locked_ldp_object(
owner: admin.id,
title: "Articles about the relationship between #{thing.pluralize} and non-#{thing.pluralize}",
community_id: community.id,
restricted: true,
description: "A restricted collection"
).unlock_and_fetch_ldp_object(&:save!)
end
end
# Types
[:book, :book_chapter, :conference_workshop_poster,
:conference_workshop_presentation, :dataset,
:image, :journal_article_draft, :journal_article_published,
:learning_object, :report, :research_material, :review].each do |type_name|
Type.where(name: type_name).first_or_create
end
# Languages
[:english, :french, :spanish, :chinese, :german,
:italian, :russian, :ukrainian, :japanese,
:no_linguistic_content, :other].each do |language_name|
Language.where(name: language_name).first_or_create
end
# Institutions
[:uofa, :st_stephens].each do |institution_name|
Institution.where(name: institution_name).first_or_create
end
puts 'Database seeded successfully!'
| 45.35 | 123 | 0.676852 |
1cefe681909a2712b08a50f50f758fd4235c497c | 3,810 | module Pod
module Generator
module XCConfig
# Generates the private xcconfigs for the pod targets.
#
# The xcconfig file for a Pod target merges the pod target
# configuration values with the default configuration values
# required by CocoaPods.
#
class PodXCConfig
# @return [Target] the target represented by this xcconfig.
#
attr_reader :target
# @return [Boolean] whether this xcconfig is for a test target.
#
attr_reader :test_xcconfig
alias test_xcconfig? test_xcconfig
# Initialize a new instance
#
# @param [Target] target @see #target
#
# @param [Boolean] test_xcconfig
# whether this is an xcconfig for a test native target.
#
def initialize(target, test_xcconfig = false)
@target = target
@test_xcconfig = test_xcconfig
end
# Generates and saves the xcconfig to the given path.
#
# @param [Pathname] path
# the path where the xcconfig should be stored.
#
# @return [Xcodeproj::Config]
#
def save_as(path)
result = generate
result.save_as(path)
result
end
# Generates the xcconfig.
#
# @return [Xcodeproj::Config]
#
def generate
config = {
'FRAMEWORK_SEARCH_PATHS' => '$(inherited) ',
'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) COCOAPODS=1',
'HEADER_SEARCH_PATHS' => '$(inherited) ' + XCConfigHelper.quote(target.header_search_paths(test_xcconfig?)),
'LIBRARY_SEARCH_PATHS' => '$(inherited) ',
'OTHER_CFLAGS' => '$(inherited) ',
'OTHER_LDFLAGS' => XCConfigHelper.default_ld_flags(target, test_xcconfig?),
'OTHER_SWIFT_FLAGS' => '$(inherited) ',
'PODS_ROOT' => '${SRCROOT}',
'PODS_TARGET_SRCROOT' => target.pod_target_srcroot,
'PRODUCT_BUNDLE_IDENTIFIER' => 'org.cocoapods.${PRODUCT_NAME:rfc1034identifier}',
'SKIP_INSTALL' => 'YES',
'SWIFT_ACTIVE_COMPILATION_CONDITIONS' => '$(inherited) ',
'SWIFT_INCLUDE_PATHS' => '$(inherited) ',
}
xcconfig = Xcodeproj::Config.new(config)
XCConfigHelper.add_settings_for_file_accessors_of_target(nil, target, xcconfig, true, test_xcconfig?)
target.file_accessors.each do |file_accessor|
xcconfig.merge!(file_accessor.spec_consumer.pod_target_xcconfig) if test_xcconfig? == file_accessor.spec.test_specification?
end
XCConfigHelper.add_target_specific_settings(target, xcconfig)
recursive_dependent_targets = target.recursive_dependent_targets
xcconfig.merge! XCConfigHelper.search_paths_for_dependent_targets(target, recursive_dependent_targets, test_xcconfig?)
XCConfigHelper.generate_vendored_build_settings(target, recursive_dependent_targets, xcconfig, false, test_xcconfig?)
if test_xcconfig?
test_dependent_targets = [target, *target.recursive_test_dependent_targets].uniq
xcconfig.merge! XCConfigHelper.search_paths_for_dependent_targets(target, test_dependent_targets - recursive_dependent_targets, test_xcconfig?)
XCConfigHelper.generate_vendored_build_settings(nil, target.all_dependent_targets, xcconfig, true, test_xcconfig?)
XCConfigHelper.generate_other_ld_flags(nil, target.all_dependent_targets, xcconfig)
XCConfigHelper.generate_ld_runpath_search_paths(target, false, true, xcconfig)
end
xcconfig
end
#-----------------------------------------------------------------------#
end
end
end
end
| 41.868132 | 155 | 0.630184 |
e9711fcd727e910cf106365ad2830c750c186cd8 | 437 | cask 'qlimagesize' do
version '2.6.1'
sha256 '466c18539653056ccf7eb09eb6c68689fd9a8280a3c2ade9f2d457de53504821'
url "https://github.com/Nyx0uf/qlImageSize/releases/download/#{version}/qlImageSize.qlgenerator.zip"
appcast 'https://github.com/Nyx0uf/qlimagesize/releases.atom'
name 'qlImageSize'
homepage 'https://github.com/Nyx0uf/qlImageSize'
depends_on macos: '>= :high_sierra'
qlplugin 'qlImageSize.qlgenerator'
end
| 31.214286 | 102 | 0.782609 |
1dde656843f63569d846aead54950dc140a90b22 | 2,174 | Rails.application.configure do
# Settings specified here will take precedence over those in config/application.rb.
# In the development environment your application's code is reloaded on
# every request. This slows down response time but is perfect for development
# since you don't have to restart the web server when you make code changes.
config.cache_classes = false
# Do not eager load code on boot.
config.eager_load = false
# Show full error reports.
config.consider_all_requests_local = true
# Enable/disable caching. By default caching is disabled.
# Run rails dev:cache to toggle caching.
if Rails.root.join('tmp', 'caching-dev.txt').exist?
config.action_controller.perform_caching = true
config.cache_store = :memory_store
config.public_file_server.headers = {
'Cache-Control' => "public, max-age=#{2.days.to_i}"
}
else
config.action_controller.perform_caching = false
config.cache_store = :null_store
end
# Store uploaded files on the local file system (see config/storage.yml for options)
config.active_storage.service = :local
# Don't care if the mailer can't send.
config.action_mailer.raise_delivery_errors = false
config.action_mailer.perform_caching = false
# Print deprecation notices to the Rails logger.
config.active_support.deprecation = :log
# Raise an error on page load if there are pending migrations.
config.active_record.migration_error = :page_load
# Highlight code that triggered database queries in logs.
config.active_record.verbose_query_logs = true
# Debug mode disables concatenation and preprocessing of assets.
# This option may cause significant delays in view rendering with a large
# number of complex assets.
config.assets.debug = true
# Suppress logger output for asset requests.
config.assets.quiet = true
# Raises error for missing translations
# config.action_view.raise_on_missing_translations = true
# Use an evented file watcher to asynchronously detect changes in source code,
# routes, locales, etc. This feature depends on the listen gem.
config.file_watcher = ActiveSupport::EventedFileUpdateChecker
end
| 35.639344 | 86 | 0.76219 |
f82c13e15d44c7ffe42f8e74ed73e4a55ca269e6 | 3,177 | require_relative 'base_decorator'
require 'pact_broker/api/decorators/webhook_request_template_decorator'
require 'pact_broker/api/decorators/timestamps'
require 'pact_broker/webhooks/webhook_request_template'
require 'pact_broker/webhooks/webhook_event'
require 'pact_broker/api/decorators/basic_pacticipant_decorator'
require_relative 'pact_pacticipant_decorator'
require_relative 'pacticipant_decorator'
module PactBroker
module Api
module Decorators
class WebhookDecorator < BaseDecorator
class WebhookEventDecorator < BaseDecorator
property :name
end
property :description, getter: lambda { |context| context[:represented].display_description }
property :consumer, :class => PactBroker::Domain::Pacticipant do
property :name
end
property :provider, :class => PactBroker::Domain::Pacticipant do
property :name
end
property :enabled, default: true
property :request, :class => PactBroker::Webhooks::WebhookRequestTemplate, extend: WebhookRequestTemplateDecorator
collection :events, :class => PactBroker::Webhooks::WebhookEvent, extend: WebhookEventDecorator
include Timestamps
link :self do | options |
{
title: represented.display_description,
href: webhook_url(represented.uuid, options[:base_url])
}
end
link :'pb:execute' do | options |
{
title: "Test the execution of the webhook with the latest matching pact or verification by sending a POST request to this URL",
href: webhook_execution_url(represented, options[:base_url])
}
end
link :'pb:consumer' do | options |
if represented.consumer
{
title: "Consumer",
name: represented.consumer.name,
href: pacticipant_url(options.fetch(:base_url), represented.consumer)
}
end
end
link :'pb:provider' do | options |
if represented.provider
{
title: "Provider",
name: represented.provider.name,
href: pacticipant_url(options.fetch(:base_url), represented.provider)
}
end
end
link :'pb:pact-webhooks' do | options |
if represented.consumer && represented.provider
{
title: "All webhooks for consumer #{represented.consumer.name} and provider #{represented.provider.name}",
href: webhooks_for_consumer_and_provider_url(represented.consumer, represented.provider, options[:base_url])
}
end
end
link :'pb:webhooks' do | options |
{
title: "All webhooks",
href: webhooks_url(options[:base_url])
}
end
def from_json represented
super.tap do | webhook |
if webhook.events == nil
webhook.events = [PactBroker::Webhooks::WebhookEvent.new(name: PactBroker::Webhooks::WebhookEvent::DEFAULT_EVENT_NAME)]
end
end
end
end
end
end
end
| 32.418367 | 139 | 0.626377 |
f8e21a89f1c251da5e1b7f10558c34e6ddaba8ea | 1,115 | # frozen_string_literal: true
##
# This file is part of WhatWeb and may be subject to
# redistribution and commercial restrictions. Please see the WhatWeb
# web site for more information on licensing and terms of use.
# http://www.morningstarsecurity.com/research/whatweb
##
# Version 0.2 #
# Updated regex matches and version detection
##
WhatWeb::Plugin.define "BlognPlus" do
@author = "Brendan Coles <bcoles at gmail dot com>" # 2010-06-15
@version = "0.2"
@description = "BlognPlus"
@website = "http://www.blogn.org/"
# 106 results for "powered by BlognPlus" @ 2010-06-15
# Dorks #
@dorks = [
'"powered by BlognPlus"'
]
@matches = [
# Default powered by text
{ regexp: /Powered by[\s]*<a href="http:\/\/www.blogn.org[^>]*>BlognPlus/i },
# Version detection # Meta generator
{ version: /<meta name="generator"[^>]*content="BlognPlus ([0-9\.]+)/ },
]
end
# It may be possible to guess the version depending on the copyright dates:
# <META name="copyright" content="Copyright 2000-2006 CMScontrol-GervaWeb">
# <META NAME='copyright' CONTENT='Copyright © Gerva Srl'>
| 28.589744 | 81 | 0.684305 |
1a7d7ee784dd1ccd536d66feef5e7b0f41e2329c | 561 | require 'fog/vmfusion/core'
module Fog
module Compute
class Vmfusion < Fog::Service
model_path 'fog/vmfusion/models/compute'
model :server
collection :servers
class Mock
def initialize(options={})
Fog::Mock.not_implemented
end
end
class Real
def initialize(options={})
begin
require 'fission'
rescue LoadError => e
retry if require('rubygems')
raise e.message
end
end
end
end
end
end
| 16.5 | 46 | 0.540107 |
2158536e6c3867c7661282687ca2f5402a51d31d | 2,329 | ActiveAdmin.register AdminUser do
# See permitted parameters documentation:
# https://github.com/activeadmin/activeadmin/blob/master/docs/2-resource-customization.md#setting-up-strong-parameters
#
permit_params :email, :password, :password_confirmation
menu priority: 6, label: "管理员"
actions :all, except: [:destroy]
filter :email
filter :current_sign_in_at
filter :last_sign_in_at
filter :current_sign_in_ip
filter :last_sign_in_ip
filter :created_at
index do
selectable_column
column('#', :id, sortable: false) do |user|
link_to_if(current_active_admin_user.id == user.id, user.id, admin_admin_user_path(user)) do
"#{user.id}"
end
end
column(:email, sortable: false) do |user|
link_to_if(current_active_admin_user.id == user.id, user.email, admin_admin_user_path(user)) do
user.email
end
end
column(:current_sign_in_at) do |user|
user.current_sign_in_at.nil? ? "" : user.current_sign_in_at.strftime("%Y-%m-%d %H:%M:%S")
end
column :current_sign_in_ip, sortable: false
column(:last_sign_in_at) do |user|
user.current_sign_in_at.nil? ? "" : user.last_sign_in_at.strftime("%Y-%m-%d %H:%M:%S")
end
column :last_sign_in_ip, sortable: false
column :sign_in_count
actions defaults: false do |user|
item "编辑", edit_admin_admin_user_path(user), class: "action-division" if current_active_admin_user.id == user.id
end
end
show do
attributes_table do
row :id
row :email
row :sign_in_count
row(:current_sign_in_at) do |user|
user.current_sign_in_at.nil? ? "" : user.current_sign_in_at.strftime("%Y-%m-%d %H:%M:%S")
end
row :current_sign_in_ip
row(:last_sign_in_at) do |user|
user.last_sign_in_at.nil? ? "" : user.last_sign_in_at.strftime("%Y-%m-%d %H:%M:%S")
end
row :last_sign_in_ip
end
end
form do |f|
f.inputs "管理员信息" do
f.input :email
f.input :password
f.input :password_confirmation
end
f.actions if resource.new_record? || (!resource.nil? && current_active_admin_user.id == resource.id)
end
csv do
column :email
column :sign_in_count
column :current_sign_in_at
column :current_sign_in_ip
column :last_sign_in_at
column :last_sign_in_ip
end
end | 29.481013 | 118 | 0.680979 |
399db4ac2593acf5888ff11fb5d023ae756c5353 | 2,444 | # frozen_string_literal: true
require 'spec_helper'
require Rails.root.join('db', 'post_migrate', '20190527194900_schedule_calculate_wiki_sizes.rb')
describe ScheduleCalculateWikiSizes, :migration, :sidekiq do
let(:migration_class) { Gitlab::BackgroundMigration::CalculateWikiSizes }
let(:migration_name) { migration_class.to_s.demodulize }
let(:namespaces) { table(:namespaces) }
let(:projects) { table(:projects) }
let(:project_statistics) { table(:project_statistics) }
context 'when missing wiki sizes exist' do
before do
namespaces.create!(id: 1, name: 'wiki-migration', path: 'wiki-migration')
projects.create!(id: 1, name: 'wiki-project-1', path: 'wiki-project-1', namespace_id: 1)
projects.create!(id: 2, name: 'wiki-project-2', path: 'wiki-project-2', namespace_id: 1)
projects.create!(id: 3, name: 'wiki-project-3', path: 'wiki-project-3', namespace_id: 1)
project_statistics.create!(id: 1, project_id: 1, namespace_id: 1, wiki_size: 1000)
project_statistics.create!(id: 2, project_id: 2, namespace_id: 1, wiki_size: nil)
project_statistics.create!(id: 3, project_id: 3, namespace_id: 1, wiki_size: nil)
end
it 'schedules a background migration' do
Sidekiq::Testing.fake! do
Timecop.freeze do
migrate!
expect(migration_name).to be_scheduled_delayed_migration(5.minutes, 2, 3)
expect(BackgroundMigrationWorker.jobs.size).to eq 1
end
end
end
it 'calculates missing wiki sizes', :sidekiq_might_not_need_inline do
expect(project_statistics.find_by(id: 2).wiki_size).to be_nil
expect(project_statistics.find_by(id: 3).wiki_size).to be_nil
migrate!
expect(project_statistics.find_by(id: 2).wiki_size).not_to be_nil
expect(project_statistics.find_by(id: 3).wiki_size).not_to be_nil
end
end
context 'when missing wiki sizes do not exist' do
before do
namespaces.create!(id: 1, name: 'wiki-migration', path: 'wiki-migration')
projects.create!(id: 1, name: 'wiki-project-1', path: 'wiki-project-1', namespace_id: 1)
project_statistics.create!(id: 1, project_id: 1, namespace_id: 1, wiki_size: 1000)
end
it 'does not schedule a background migration' do
Sidekiq::Testing.fake! do
Timecop.freeze do
migrate!
expect(BackgroundMigrationWorker.jobs.size).to eq 0
end
end
end
end
end
| 37.6 | 96 | 0.693535 |
3929880e7660845eded78c8685cd714ed38e46dd | 3,645 | require 'optparse'
require 'simplecli'
class String
def indent number_of_spaces = 1
self.gsub(/^(.*)$/, (' ' * number_of_spaces) + '\1')
end
end
class RackBox::Bin
include SimpleCLI
def initialize *args
@default_command = :request
super
end
def usage *args
puts <<doco
rackbox == %{ For hitting your Rack applications }
Usage:
rackbox command [options]
Examples:
rackbox info # prints app info
rackbox commands # list commands
rackbox get / # request / path
rackbox post -d '<XML>' / # POST data to / path
rackbox / # GET /
rackbox --data '<XML>' # POST data to / path
rackbox /dogs --xml # get /dogs, accepting xml
Further help:
rackbox commands # list all available commands
rackbox help <COMMAND> # show help for COMMAND
rackbox help # show this help message
doco
end
%w( get post put delete ).each do |http_method|
define_method(http_method) do |*args|
request *( ['-m', http_method] + args )
end
define_method("#{http_method}_help") do
<<doco
Usage: #{ script_name } #{ http_method } '/some/path'
Summary:
Run a #{ http_method.upcase } request against a Rack app
end
doco
end
end
def request_help
<<doco
Usage: #{ script_name } request '/some/path'
Options:
-m, --method The HTTP method to use, default: get
-d, --data Data to that you can PUT/POST, eg. -d '<XML>'
-s, --show What to show, eg. -s body,headers,status or
call multiple times, eg. -s body -s headers
-h, --header Add header to request, eg. -h accept=text/plain
-j, --json Sets 'Accept' header to 'application/json'
-x, --xml Sets 'Accept' header to 'application/xml'
Summary:
Run a request against a Rack app
end
doco
end
def request *args
options = {
:show => [],
:headers => {}
}
opts = OptionParser.new do |opts|
opts.on('-j', '--json'){ options[:headers]['Accept'] = 'application/json' }
opts.on('-x', '--xml'){ options[:headers]['Accept'] = 'application/xml' }
opts.on('-m','--method [m]'){|m| options[:method] = m }
opts.on('-d','--data [d]'){|d| options[:data] = d }
opts.on('-s','--show [s]'){|s| options[:show] += s.split(',') }
opts.on('-h','--header [h]'){|h|
name,value = h.split('=')
options[:headers][name] = value
}
end
opts.parse! args
rackbox_options = { }
rackbox_options[:method] = options[:method] if options[:method]
rackbox_options[:data] = options[:data] if options[:data]
options[:headers].each do |name, value|
rackbox_options[name] = value
end
url = args.pop
response = RackBox.request url, rackbox_options
options[:show] = %w( body headers status ) if options[:show].empty?
if options[:show].include? 'body'
body_text = ''
response.body.each {|str| body_text << str }
end
output = "Response:\n"
output << " Status: #{ response.status }\n" if options[:show].include? 'status'
output << " Headers: \n#{ response.headers.to_yaml.strip.indent(4) }\n" if options[:show].include? 'headers'
output << " Body: \n#{ body_text.indent(4) }\n" if options[:show].include? 'body'
puts output
end
def info_help
<<doco
Usage: #{ script_name } info
Summary:
Display information about the current Rack application
end
doco
end
def info
puts "RackBox.app => #{ RackBox.app.inspect }"
end
end
| 27.406015 | 113 | 0.582167 |
18c943a645e3a59d51db521ad20b28b5cebccea9 | 1,278 | require 'uri'
module OpenAPIParser::Findable
# @param [String] reference
# @return [OpenAPIParser::Findable]
def find_object(reference)
return self if object_reference == reference
remote_reference = !reference.start_with?('#')
return find_remote_object(reference) if remote_reference
return nil unless reference.start_with?(object_reference)
@find_object_cache = {} unless defined? @find_object_cache
if (obj = @find_object_cache[reference])
return obj
end
if (child = _openapi_all_child_objects[reference])
@find_object_cache[reference] = child
return child
end
_openapi_all_child_objects.values.each do |c|
if (obj = c.find_object(reference))
@find_object_cache[reference] = obj
return obj
end
end
nil
end
def purge_object_cache
@purged = false unless defined? @purged
return if @purged
@find_object_cache = {}
@purged = true
_openapi_all_child_objects.values.each(&:purge_object_cache)
end
private
def find_remote_object(reference)
reference_uri = URI(reference)
fragment = reference_uri.fragment
reference_uri.fragment = nil
root.load_another_schema(reference_uri)&.find_object("##{fragment}")
end
end
| 24.576923 | 74 | 0.701878 |
ab44e8fdc6961e552572dcebe2387e785d1cc2be | 5,313 | # Copyright (c) 2009-2012 VMware, Inc.
module Bosh::Agent
module Message
class Apply < Base
def self.long_running?; true end
def self.process(args)
self.new(args).apply
end
def initialize(args)
@platform = Bosh::Agent::Config.platform
if args.size < 1
raise ArgumentError, "not enough arguments"
end
@new_spec = args.first
unless @new_spec.is_a?(Hash)
raise ArgumentError, "invalid spec, Hash expected, " +
"#{@new_spec.class} given"
end
# Note: new spec needs to be updated before a plan is
# created which binds to this new spec
#
# Collect network state from the infrastructure
# - Loop through each network
# - Get network settings for each network
if @new_spec["networks"]
@new_spec["networks"].each do |network, properties|
infrastructure = Bosh::Agent::Config.infrastructure
network_settings =
infrastructure.get_network_settings(network, properties)
logger.debug("current network settings from VM: #{network_settings.inspect}")
logger.debug("new network settings to be applied: #{properties.inspect}")
if network_settings
# This merge is messing with the DNS server list!
# It will overwrite the custom resolver which
@new_spec["networks"][network].merge!(network_settings)
logger.debug("merged network settings: #{@new_spec["networks"].inspect}")
end
end
end
@old_spec = Bosh::Agent::Config.state.to_hash
@old_plan = Bosh::Agent::ApplyPlan::Plan.new(@old_spec)
@new_plan = Bosh::Agent::ApplyPlan::Plan.new(@new_spec)
%w(bosh jobs packages monit).each do |dir|
FileUtils.mkdir_p(File.join(base_dir, dir))
end
end
def apply
logger.info("Applying: #{@new_spec.inspect}")
if !@old_plan.deployment.empty? &&
@old_plan.deployment != @new_plan.deployment
raise Bosh::Agent::MessageHandlerError,
"attempt to apply #{@new_plan.deployment} " +
"to #{old_plan.deployment}"
end
# FIXME: tests
# if @state["configuration_hash"] == @new_spec["configuration_hash"]
# return @state
# end
if @new_plan.configured?
begin
delete_job_monit_files
apply_job
apply_packages
configure_job
reload_monit
@platform.update_logging(@new_spec)
rescue Exception => e
raise Bosh::Agent::MessageHandlerError,
"#{e.message}: #{e.backtrace}"
end
end
# FIXME: assumption right now: if apply succeeds state should be
# identical with apply spec
Bosh::Agent::Config.state.write(@new_spec)
@new_spec
rescue Bosh::Agent::StateError => e
raise Bosh::Agent::MessageHandlerError, e
end
private
def delete_job_monit_files
dir = File.join(base_dir, "monit", "job")
logger.info("Removing job-specific monit files: #{dir}")
# Remove all symlink targets
Dir.glob(File.join(dir, "*")).each do |f|
if File.symlink?(f)
logger.info("Removing monit symlink target file: " +
"#{File.readlink(f)}")
FileUtils.rm_rf(File.readlink(f))
end
end
FileUtils.rm_rf(dir)
end
def apply_job
if @new_plan.has_jobs?
@new_plan.install_jobs
else
logger.info("No job")
end
end
def apply_packages
if @new_plan.has_packages?
@new_plan.install_packages
else
logger.info("No packages")
end
cleanup_packages
end
def configure_job
if @new_plan.has_jobs?
@new_plan.configure_jobs
end
end
# We GC packages - leaving the package union of old spec and new spec
def cleanup_packages
delete_old_packages
delete_old_symlinks
end
def delete_old_packages
files_to_keep = Set.new
(@old_plan.packages + @new_plan.packages).each do |package|
files_to_keep << package.install_path
end
glob = File.join(base_dir, "data", "packages", "*", "*")
Dir[glob].each do |path|
unless files_to_keep.include?(path)
logger.info("Removing old package version: #{path}")
FileUtils.rm_rf(path)
end
end
end
def delete_old_symlinks
files_to_keep = Set.new
(@old_plan.packages + @new_plan.packages).each do |package|
files_to_keep << package.link_path
end
glob = File.join(base_dir, "packages", "*")
Dir[glob].each do |path|
unless files_to_keep.include?(path)
logger.info("Removing old package link: #{path}")
FileUtils.rm_rf(path)
end
end
end
def reload_monit
if Bosh::Agent::Config.configure
Bosh::Agent::Monit.reload
end
end
end
end
end
| 28.411765 | 89 | 0.571617 |
f8d4aa78a3d3a095b601b0a17438db8b924693fa | 933 | cask 'lando' do
version '3.0.0-rc.13'
sha256 'd97805e556e086fe0b1f9f8083f95ed918a2ac92bcff4792a047d0ab52fe875d'
# github.com/lando/lando was verified as official when first introduced to the cask
url "https://github.com/lando/lando/releases/download/v#{version}/lando-v#{version}.dmg"
appcast 'https://github.com/lando/lando/releases.atom'
name 'Lando'
homepage 'https://docs.devwithlando.io/'
depends_on cask: 'docker'
pkg 'LandoInstaller.pkg',
choices: [
{
'choiceIdentifier' => 'choiceDocker',
'choiceAttribute' => 'selected',
'attributeSetting' => 0,
},
{
'choiceIdentifier' => 'choiceLando',
'choiceAttribute' => 'selected',
'attributeSetting' => 1,
},
]
uninstall pkgutil: 'io.lando.pkg.lando'
end
| 32.172414 | 90 | 0.566988 |
3871c628fe46b5015f4548e0b34142f038940cb0 | 5,002 | # -*- mode: ruby; ruby-indent-level: 4 -*- vim: sw=4
#
# Classes required by the full core typeset
#
module YAML
#
# Default private type
#
class PrivateType
def self.tag_subclasses?; false; end
verbose, $VERBOSE = $VERBOSE, nil
def initialize( type, val )
@type_id = type; @value = val
@value.taguri = "x-private:#{ @type_id }"
end
def to_yaml( opts = {} )
@value.to_yaml( opts )
end
ensure
$VERBOSE = verbose
end
#
# Default domain type
#
class DomainType
def self.tag_subclasses?; false; end
verbose, $VERBOSE = $VERBOSE, nil
def initialize( domain, type, val )
@domain = domain; @type_id = type; @value = val
@value.taguri = "tag:#{ @domain }:#{ @type_id }"
end
def to_yaml( opts = {} )
@value.to_yaml( opts )
end
ensure
$VERBOSE = verbose
end
#
# Unresolved objects
#
class Object
def self.tag_subclasses?; false; end
def to_yaml( opts = {} )
YAML::quick_emit( self, opts ) do |out|
out.map( "tag:ruby.yaml.org,2002:object:#{ @class }", to_yaml_style ) do |map|
@ivars.each do |k,v|
map.add( k, v )
end
end
end
end
end
#
# YAML Hash class to support comments and defaults
#
class SpecialHash < ::Hash
attr_accessor :default
def inspect
self.default.to_s
end
def to_s
self.default.to_s
end
def update( h )
if YAML::SpecialHash === h
@default = h.default if h.default
end
super( h )
end
def to_yaml( opts = {} )
opts[:DefaultKey] = self.default
super( opts )
end
end
#
# Builtin collection: !omap
#
class Omap < ::Array
yaml_as "tag:yaml.org,2002:omap"
def yaml_initialize( tag, val )
if Array === val
val.each do |v|
if Hash === v
concat( v.to_a ) # Convert the map to a sequence
else
raise YAML::Error, "Invalid !omap entry: " + val.inspect
end
end
else
raise YAML::Error, "Invalid !omap: " + val.inspect
end
self
end
def self.[]( *vals )
o = Omap.new
0.step( vals.length - 1, 2 ) do |i|
o[vals[i]] = vals[i+1]
end
o
end
def []( k )
self.assoc( k ).to_a[1]
end
def []=( k, *rest )
val, set = rest.reverse
if ( tmp = self.assoc( k ) ) and not set
tmp[1] = val
else
self << [ k, val ]
end
val
end
def has_key?( k )
self.assoc( k ) ? true : false
end
def is_complex_yaml?
true
end
def to_yaml( opts = {} )
YAML::quick_emit( self, opts ) do |out|
out.seq( taguri, to_yaml_style ) do |seq|
self.each do |v|
seq.add( Hash[ *v ] )
end
end
end
end
end
#
# Builtin collection: !pairs
#
class Pairs < ::Array
yaml_as "tag:yaml.org,2002:pairs"
def yaml_initialize( tag, val )
if Array === val
val.each do |v|
if Hash === v
concat( v.to_a ) # Convert the map to a sequence
else
raise YAML::Error, "Invalid !pairs entry: " + val.inspect
end
end
else
raise YAML::Error, "Invalid !pairs: " + val.inspect
end
self
end
def self.[]( *vals )
p = Pairs.new
0.step( vals.length - 1, 2 ) { |i|
p[vals[i]] = vals[i+1]
}
p
end
def []( k )
self.assoc( k ).to_a
end
def []=( k, val )
self << [ k, val ]
val
end
def has_key?( k )
self.assoc( k ) ? true : false
end
def is_complex_yaml?
true
end
def to_yaml( opts = {} )
YAML::quick_emit( self, opts ) do |out|
out.seq( taguri, to_yaml_style ) do |seq|
self.each do |v|
seq.add( Hash[ *v ] )
end
end
end
end
end
#
# Builtin collection: !set
#
class Set < ::Hash
yaml_as "tag:yaml.org,2002:set"
end
end
| 25.917098 | 94 | 0.419032 |
f893f7444e108036bfdb94da0f824bab2fc7e859 | 1,024 | Rails.application.routes.draw do
root 'needs#index'
resources :contacts, only: [:index, :show, :edit, :update, :new, :create] do
resources :needs, only: [:new, :create]
resources :assessments, only: [:new, :create]
# collection do
# get 'call-list'
# end
get 'triage', to: 'triage#edit', as: 'edit_triage'
put 'triage', to: 'triage#update', as: 'triage'
end
# get '/contacts/:id/needs', to: 'contacts#needs'
resources :needs, only: [:index, :show, :edit, :update, :destroy] do
resources :notes
end
resources :users, only: [:index, :new, :create, :edit, :update, :destroy]
post 'role', to: 'users#set_role', as: 'set_role'
passwordless_for :users, at: '/', as: :auth
patch '/assign_multiple' => 'needs#assign_multiple'
post '/needs/restore_need' => 'needs#restore_need'
post '/needs/restore_note' => 'needs#restore_note'
get '/deleted_needs' => 'needs#deleted_needs', as: 'deleted_needs'
get '/deleted_notes' => 'needs#deleted_notes', as: 'deleted_notes'
end
| 35.310345 | 78 | 0.655273 |
6abf4e09a16dd61e90a577b1463ce15b97503290 | 138 | source 'https://rubygems.org'
gemspec path: '..'
gem 'activemodel', '~> 6.0.0'
gem 'activerecord', '~> 6.0.0'
gem 'sqlite3', '~> 1.4.1'
| 17.25 | 30 | 0.586957 |
91c5c93e9a1bdb3ec02e691249bfe380b4c8a151 | 381 | # frozen_string_literal: true
module Types
class MutationType < Types::BaseObject
field :update_task, mutation: Mutations::UpdateTask
field :create_task, mutation: Mutations::CreateTask
field :create_like, mutation: Mutations::CreateLike
field :create_comment, mutation: Mutations::CreateComment
field :create_link, mutation: Mutations::CreateLink
end
end
| 31.75 | 61 | 0.774278 |
614f7d8a7f6aa41d7f8224f28c10b7a480f624ca | 682 | require 'spec_helper'
RSpec.describe Openapi2ruby::Generator do
let(:generator) { Openapi2ruby::Generator.new(schema) }
let(:schema) { Openapi2ruby::Parser.parse(schema_path).schemas.first }
let(:schema_path) { 'spec/fixtures/files/petstore.yaml' }
let(:output_path) { 'spec/tmp' }
describe '#generate' do
let(:generated_file) { "#{output_path}/pet_serializer.rb" }
let(:file_fixture) { 'spec/fixtures/files/pet_serializer.rb' }
before { generator.generate(output_path, nil) }
it 'generates serializer class' do
expect(File.exist?(generated_file)).to be true
expect(FileUtils.cmp(generated_file, file_fixture)).to be true
end
end
end
| 32.47619 | 72 | 0.715543 |
87285df8e76f815d01b51a6da8e1404595d2c9c1 | 6,882 | # Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
module Azure::Compute::Mgmt::V2019_12_01
module Models
#
# Specifies information about the operating system disk used by the virtual
# machine. <br><br> For more information about disks, see [About disks and
# VHDs for Azure virtual
# machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
#
class OSDisk
include MsRestAzure
# @return [OperatingSystemTypes] This property allows you to specify the
# type of the OS that is included in the disk if creating a VM from
# user-image or a specialized VHD. <br><br> Possible values are: <br><br>
# **Windows** <br><br> **Linux**. Possible values include: 'Windows',
# 'Linux'
attr_accessor :os_type
# @return [DiskEncryptionSettings] Specifies the encryption settings for
# the OS Disk. <br><br> Minimum api-version: 2015-06-15
attr_accessor :encryption_settings
# @return [String] The disk name.
attr_accessor :name
# @return [VirtualHardDisk] The virtual hard disk.
attr_accessor :vhd
# @return [VirtualHardDisk] The source user image virtual hard disk. The
# virtual hard disk will be copied before being attached to the virtual
# machine. If SourceImage is provided, the destination virtual hard drive
# must not exist.
attr_accessor :image
# @return [CachingTypes] Specifies the caching requirements. <br><br>
# Possible values are: <br><br> **None** <br><br> **ReadOnly** <br><br>
# **ReadWrite** <br><br> Default: **None** for Standard storage.
# **ReadOnly** for Premium storage. Possible values include: 'None',
# 'ReadOnly', 'ReadWrite'
attr_accessor :caching
# @return [Boolean] Specifies whether writeAccelerator should be enabled
# or disabled on the disk.
attr_accessor :write_accelerator_enabled
# @return [DiffDiskSettings] Specifies the ephemeral Disk Settings for
# the operating system disk used by the virtual machine.
attr_accessor :diff_disk_settings
# @return [DiskCreateOptionTypes] Specifies how the virtual machine
# should be created.<br><br> Possible values are:<br><br> **Attach**
# \u2013 This value is used when you are using a specialized disk to
# create the virtual machine.<br><br> **FromImage** \u2013 This value is
# used when you are using an image to create the virtual machine. If you
# are using a platform image, you also use the imageReference element
# described above. If you are using a marketplace image, you also use
# the plan element previously described. Possible values include:
# 'FromImage', 'Empty', 'Attach'
attr_accessor :create_option
# @return [Integer] Specifies the size of an empty data disk in
# gigabytes. This element can be used to overwrite the size of the disk
# in a virtual machine image. <br><br> This value cannot be larger than
# 1023 GB
attr_accessor :disk_size_gb
# @return [ManagedDiskParameters] The managed disk parameters.
attr_accessor :managed_disk
#
# Mapper for OSDisk class as Ruby Hash.
# This will be used for serialization/deserialization.
#
def self.mapper()
{
client_side_validation: true,
required: false,
serialized_name: 'OSDisk',
type: {
name: 'Composite',
class_name: 'OSDisk',
model_properties: {
os_type: {
client_side_validation: true,
required: false,
serialized_name: 'osType',
type: {
name: 'Enum',
module: 'OperatingSystemTypes'
}
},
encryption_settings: {
client_side_validation: true,
required: false,
serialized_name: 'encryptionSettings',
type: {
name: 'Composite',
class_name: 'DiskEncryptionSettings'
}
},
name: {
client_side_validation: true,
required: false,
serialized_name: 'name',
type: {
name: 'String'
}
},
vhd: {
client_side_validation: true,
required: false,
serialized_name: 'vhd',
type: {
name: 'Composite',
class_name: 'VirtualHardDisk'
}
},
image: {
client_side_validation: true,
required: false,
serialized_name: 'image',
type: {
name: 'Composite',
class_name: 'VirtualHardDisk'
}
},
caching: {
client_side_validation: true,
required: false,
serialized_name: 'caching',
type: {
name: 'Enum',
module: 'CachingTypes'
}
},
write_accelerator_enabled: {
client_side_validation: true,
required: false,
serialized_name: 'writeAcceleratorEnabled',
type: {
name: 'Boolean'
}
},
diff_disk_settings: {
client_side_validation: true,
required: false,
serialized_name: 'diffDiskSettings',
type: {
name: 'Composite',
class_name: 'DiffDiskSettings'
}
},
create_option: {
client_side_validation: true,
required: true,
serialized_name: 'createOption',
type: {
name: 'String'
}
},
disk_size_gb: {
client_side_validation: true,
required: false,
serialized_name: 'diskSizeGB',
type: {
name: 'Number'
}
},
managed_disk: {
client_side_validation: true,
required: false,
serialized_name: 'managedDisk',
type: {
name: 'Composite',
class_name: 'ManagedDiskParameters'
}
}
}
}
}
end
end
end
end
| 36.031414 | 162 | 0.540105 |
797f834cb4f9476219aae2dfb30beea1747d23f1 | 29,091 | # Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require 'google/apis/core/base_service'
require 'google/apis/core/json_representation'
require 'google/apis/core/hashable'
require 'google/apis/errors'
module Google
module Apis
module RedisV1beta1
# Google Cloud Memorystore for Redis API
#
# Creates and manages Redis instances on the Google Cloud Platform.
#
# @example
# require 'google/apis/redis_v1beta1'
#
# Redis = Google::Apis::RedisV1beta1 # Alias the module
# service = Redis::CloudRedisService.new
#
# @see https://cloud.google.com/memorystore/docs/redis/
class CloudRedisService < Google::Apis::Core::BaseService
# @return [String]
# API key. Your API key identifies your project and provides you with API access,
# quota, and reports. Required unless you provide an OAuth 2.0 token.
attr_accessor :key
# @return [String]
# Available to use for quota purposes for server-side applications. Can be any
# arbitrary string assigned to a user, but should not exceed 40 characters.
attr_accessor :quota_user
def initialize
super('https://redis.googleapis.com/', '')
@batch_path = 'batch'
end
# Gets information about a location.
# @param [String] name
# Resource name for the location.
# @param [String] fields
# Selector specifying which fields to include in a partial response.
# @param [String] quota_user
# Available to use for quota purposes for server-side applications. Can be any
# arbitrary string assigned to a user, but should not exceed 40 characters.
# @param [Google::Apis::RequestOptions] options
# Request-specific options
#
# @yield [result, err] Result & error if block supplied
# @yieldparam result [Google::Apis::RedisV1beta1::Location] parsed result object
# @yieldparam err [StandardError] error object if request failed
#
# @return [Google::Apis::RedisV1beta1::Location]
#
# @raise [Google::Apis::ServerError] An error occurred on the server and the request can be retried
# @raise [Google::Apis::ClientError] The request is invalid and should not be retried without modification
# @raise [Google::Apis::AuthorizationError] Authorization is required
def get_project_location(name, fields: nil, quota_user: nil, options: nil, &block)
command = make_simple_command(:get, 'v1beta1/{+name}', options)
command.response_representation = Google::Apis::RedisV1beta1::Location::Representation
command.response_class = Google::Apis::RedisV1beta1::Location
command.params['name'] = name unless name.nil?
command.query['fields'] = fields unless fields.nil?
command.query['quotaUser'] = quota_user unless quota_user.nil?
execute_or_queue_command(command, &block)
end
# Lists information about the supported locations for this service.
# @param [String] name
# The resource that owns the locations collection, if applicable.
# @param [String] filter
# The standard list filter.
# @param [Fixnum] page_size
# The standard list page size.
# @param [String] page_token
# The standard list page token.
# @param [String] fields
# Selector specifying which fields to include in a partial response.
# @param [String] quota_user
# Available to use for quota purposes for server-side applications. Can be any
# arbitrary string assigned to a user, but should not exceed 40 characters.
# @param [Google::Apis::RequestOptions] options
# Request-specific options
#
# @yield [result, err] Result & error if block supplied
# @yieldparam result [Google::Apis::RedisV1beta1::ListLocationsResponse] parsed result object
# @yieldparam err [StandardError] error object if request failed
#
# @return [Google::Apis::RedisV1beta1::ListLocationsResponse]
#
# @raise [Google::Apis::ServerError] An error occurred on the server and the request can be retried
# @raise [Google::Apis::ClientError] The request is invalid and should not be retried without modification
# @raise [Google::Apis::AuthorizationError] Authorization is required
def list_project_locations(name, filter: nil, page_size: nil, page_token: nil, fields: nil, quota_user: nil, options: nil, &block)
command = make_simple_command(:get, 'v1beta1/{+name}/locations', options)
command.response_representation = Google::Apis::RedisV1beta1::ListLocationsResponse::Representation
command.response_class = Google::Apis::RedisV1beta1::ListLocationsResponse
command.params['name'] = name unless name.nil?
command.query['filter'] = filter unless filter.nil?
command.query['pageSize'] = page_size unless page_size.nil?
command.query['pageToken'] = page_token unless page_token.nil?
command.query['fields'] = fields unless fields.nil?
command.query['quotaUser'] = quota_user unless quota_user.nil?
execute_or_queue_command(command, &block)
end
# Creates a Redis instance based on the specified tier and memory size.
# By default, the instance is accessible from the project's
# [default network](/compute/docs/networks-and-firewalls#networks).
# The creation is executed asynchronously and callers may check the returned
# operation to track its progress. Once the operation is completed the Redis
# instance will be fully functional. Completed longrunning.Operation will
# contain the new instance object in the response field.
# The returned operation is automatically deleted after a few hours, so there
# is no need to call DeleteOperation.
# @param [String] parent
# Required. The resource name of the instance location using the form:
# `projects/`project_id`/locations/`location_id``
# where `location_id` refers to a GCP region
# @param [Google::Apis::RedisV1beta1::Instance] instance_object
# @param [String] instance_id
# Required. The logical name of the Redis instance in the customer project
# with the following restrictions:
# * Must contain only lowercase letters, numbers, and hyphens.
# * Must start with a letter.
# * Must be between 1-40 characters.
# * Must end with a number or a letter.
# * Must be unique within the customer project / location
# @param [String] fields
# Selector specifying which fields to include in a partial response.
# @param [String] quota_user
# Available to use for quota purposes for server-side applications. Can be any
# arbitrary string assigned to a user, but should not exceed 40 characters.
# @param [Google::Apis::RequestOptions] options
# Request-specific options
#
# @yield [result, err] Result & error if block supplied
# @yieldparam result [Google::Apis::RedisV1beta1::Operation] parsed result object
# @yieldparam err [StandardError] error object if request failed
#
# @return [Google::Apis::RedisV1beta1::Operation]
#
# @raise [Google::Apis::ServerError] An error occurred on the server and the request can be retried
# @raise [Google::Apis::ClientError] The request is invalid and should not be retried without modification
# @raise [Google::Apis::AuthorizationError] Authorization is required
def create_project_location_instance(parent, instance_object = nil, instance_id: nil, fields: nil, quota_user: nil, options: nil, &block)
command = make_simple_command(:post, 'v1beta1/{+parent}/instances', options)
command.request_representation = Google::Apis::RedisV1beta1::Instance::Representation
command.request_object = instance_object
command.response_representation = Google::Apis::RedisV1beta1::Operation::Representation
command.response_class = Google::Apis::RedisV1beta1::Operation
command.params['parent'] = parent unless parent.nil?
command.query['instanceId'] = instance_id unless instance_id.nil?
command.query['fields'] = fields unless fields.nil?
command.query['quotaUser'] = quota_user unless quota_user.nil?
execute_or_queue_command(command, &block)
end
# Deletes a specific Redis instance. Instance stops serving and data is
# deleted.
# @param [String] name
# Required. Redis instance resource name using the form:
# `projects/`project_id`/locations/`location_id`/instances/`instance_id``
# where `location_id` refers to a GCP region
# @param [String] fields
# Selector specifying which fields to include in a partial response.
# @param [String] quota_user
# Available to use for quota purposes for server-side applications. Can be any
# arbitrary string assigned to a user, but should not exceed 40 characters.
# @param [Google::Apis::RequestOptions] options
# Request-specific options
#
# @yield [result, err] Result & error if block supplied
# @yieldparam result [Google::Apis::RedisV1beta1::Operation] parsed result object
# @yieldparam err [StandardError] error object if request failed
#
# @return [Google::Apis::RedisV1beta1::Operation]
#
# @raise [Google::Apis::ServerError] An error occurred on the server and the request can be retried
# @raise [Google::Apis::ClientError] The request is invalid and should not be retried without modification
# @raise [Google::Apis::AuthorizationError] Authorization is required
def delete_project_location_instance(name, fields: nil, quota_user: nil, options: nil, &block)
command = make_simple_command(:delete, 'v1beta1/{+name}', options)
command.response_representation = Google::Apis::RedisV1beta1::Operation::Representation
command.response_class = Google::Apis::RedisV1beta1::Operation
command.params['name'] = name unless name.nil?
command.query['fields'] = fields unless fields.nil?
command.query['quotaUser'] = quota_user unless quota_user.nil?
execute_or_queue_command(command, &block)
end
# Gets the details of a specific Redis instance.
# @param [String] name
# Required. Redis instance resource name using the form:
# `projects/`project_id`/locations/`location_id`/instances/`instance_id``
# where `location_id` refers to a GCP region
# @param [String] fields
# Selector specifying which fields to include in a partial response.
# @param [String] quota_user
# Available to use for quota purposes for server-side applications. Can be any
# arbitrary string assigned to a user, but should not exceed 40 characters.
# @param [Google::Apis::RequestOptions] options
# Request-specific options
#
# @yield [result, err] Result & error if block supplied
# @yieldparam result [Google::Apis::RedisV1beta1::Instance] parsed result object
# @yieldparam err [StandardError] error object if request failed
#
# @return [Google::Apis::RedisV1beta1::Instance]
#
# @raise [Google::Apis::ServerError] An error occurred on the server and the request can be retried
# @raise [Google::Apis::ClientError] The request is invalid and should not be retried without modification
# @raise [Google::Apis::AuthorizationError] Authorization is required
def get_project_location_instance(name, fields: nil, quota_user: nil, options: nil, &block)
command = make_simple_command(:get, 'v1beta1/{+name}', options)
command.response_representation = Google::Apis::RedisV1beta1::Instance::Representation
command.response_class = Google::Apis::RedisV1beta1::Instance
command.params['name'] = name unless name.nil?
command.query['fields'] = fields unless fields.nil?
command.query['quotaUser'] = quota_user unless quota_user.nil?
execute_or_queue_command(command, &block)
end
# Lists all Redis instances owned by a project in either the specified
# location (region) or all locations.
# The location should have the following format:
# * `projects/`project_id`/locations/`location_id``
# If `location_id` is specified as `-` (wildcard), then all regions
# available to the project are queried, and the results are aggregated.
# @param [String] parent
# Required. The resource name of the instance location using the form:
# `projects/`project_id`/locations/`location_id``
# where `location_id` refers to a GCP region
# @param [Fixnum] page_size
# The maximum number of items to return.
# If not specified, a default value of 1000 will be used by the service.
# Regardless of the page_size value, the response may include a partial list
# and a caller should only rely on response's
# next_page_token
# to determine if there are more instances left to be queried.
# @param [String] page_token
# The next_page_token value returned from a previous List request,
# if any.
# @param [String] fields
# Selector specifying which fields to include in a partial response.
# @param [String] quota_user
# Available to use for quota purposes for server-side applications. Can be any
# arbitrary string assigned to a user, but should not exceed 40 characters.
# @param [Google::Apis::RequestOptions] options
# Request-specific options
#
# @yield [result, err] Result & error if block supplied
# @yieldparam result [Google::Apis::RedisV1beta1::ListInstancesResponse] parsed result object
# @yieldparam err [StandardError] error object if request failed
#
# @return [Google::Apis::RedisV1beta1::ListInstancesResponse]
#
# @raise [Google::Apis::ServerError] An error occurred on the server and the request can be retried
# @raise [Google::Apis::ClientError] The request is invalid and should not be retried without modification
# @raise [Google::Apis::AuthorizationError] Authorization is required
def list_project_location_instances(parent, page_size: nil, page_token: nil, fields: nil, quota_user: nil, options: nil, &block)
command = make_simple_command(:get, 'v1beta1/{+parent}/instances', options)
command.response_representation = Google::Apis::RedisV1beta1::ListInstancesResponse::Representation
command.response_class = Google::Apis::RedisV1beta1::ListInstancesResponse
command.params['parent'] = parent unless parent.nil?
command.query['pageSize'] = page_size unless page_size.nil?
command.query['pageToken'] = page_token unless page_token.nil?
command.query['fields'] = fields unless fields.nil?
command.query['quotaUser'] = quota_user unless quota_user.nil?
execute_or_queue_command(command, &block)
end
# Updates the metadata and configuration of a specific Redis instance.
# Completed longrunning.Operation will contain the new instance object
# in the response field. The returned operation is automatically deleted
# after a few hours, so there is no need to call DeleteOperation.
# @param [String] name
# Required. Unique name of the resource in this scope including project and
# location using the form:
# `projects/`project_id`/locations/`location_id`/instances/`instance_id``
# Note: Redis instances are managed and addressed at regional level so
# location_id here refers to a GCP region; however, users may choose which
# specific zone (or collection of zones for cross-zone instances) an instance
# should be provisioned in. Refer to [location_id] and
# [alternative_location_id] fields for more details.
# @param [Google::Apis::RedisV1beta1::Instance] instance_object
# @param [String] update_mask
# Required. Mask of fields to update. At least one path must be supplied in
# this field. The elements of the repeated paths field may only include these
# fields from Instance:
# * `displayName`
# * `labels`
# * `memorySizeGb`
# * `redisConfig`
# @param [String] fields
# Selector specifying which fields to include in a partial response.
# @param [String] quota_user
# Available to use for quota purposes for server-side applications. Can be any
# arbitrary string assigned to a user, but should not exceed 40 characters.
# @param [Google::Apis::RequestOptions] options
# Request-specific options
#
# @yield [result, err] Result & error if block supplied
# @yieldparam result [Google::Apis::RedisV1beta1::Operation] parsed result object
# @yieldparam err [StandardError] error object if request failed
#
# @return [Google::Apis::RedisV1beta1::Operation]
#
# @raise [Google::Apis::ServerError] An error occurred on the server and the request can be retried
# @raise [Google::Apis::ClientError] The request is invalid and should not be retried without modification
# @raise [Google::Apis::AuthorizationError] Authorization is required
def patch_project_location_instance(name, instance_object = nil, update_mask: nil, fields: nil, quota_user: nil, options: nil, &block)
command = make_simple_command(:patch, 'v1beta1/{+name}', options)
command.request_representation = Google::Apis::RedisV1beta1::Instance::Representation
command.request_object = instance_object
command.response_representation = Google::Apis::RedisV1beta1::Operation::Representation
command.response_class = Google::Apis::RedisV1beta1::Operation
command.params['name'] = name unless name.nil?
command.query['updateMask'] = update_mask unless update_mask.nil?
command.query['fields'] = fields unless fields.nil?
command.query['quotaUser'] = quota_user unless quota_user.nil?
execute_or_queue_command(command, &block)
end
# Starts asynchronous cancellation on a long-running operation. The server
# makes a best effort to cancel the operation, but success is not
# guaranteed. If the server doesn't support this method, it returns
# `google.rpc.Code.UNIMPLEMENTED`. Clients can use
# Operations.GetOperation or
# other methods to check whether the cancellation succeeded or whether the
# operation completed despite cancellation. On successful cancellation,
# the operation is not deleted; instead, it becomes an operation with
# an Operation.error value with a google.rpc.Status.code of 1,
# corresponding to `Code.CANCELLED`.
# @param [String] name
# The name of the operation resource to be cancelled.
# @param [String] fields
# Selector specifying which fields to include in a partial response.
# @param [String] quota_user
# Available to use for quota purposes for server-side applications. Can be any
# arbitrary string assigned to a user, but should not exceed 40 characters.
# @param [Google::Apis::RequestOptions] options
# Request-specific options
#
# @yield [result, err] Result & error if block supplied
# @yieldparam result [Google::Apis::RedisV1beta1::Empty] parsed result object
# @yieldparam err [StandardError] error object if request failed
#
# @return [Google::Apis::RedisV1beta1::Empty]
#
# @raise [Google::Apis::ServerError] An error occurred on the server and the request can be retried
# @raise [Google::Apis::ClientError] The request is invalid and should not be retried without modification
# @raise [Google::Apis::AuthorizationError] Authorization is required
def cancel_project_location_operation(name, fields: nil, quota_user: nil, options: nil, &block)
command = make_simple_command(:post, 'v1beta1/{+name}:cancel', options)
command.response_representation = Google::Apis::RedisV1beta1::Empty::Representation
command.response_class = Google::Apis::RedisV1beta1::Empty
command.params['name'] = name unless name.nil?
command.query['fields'] = fields unless fields.nil?
command.query['quotaUser'] = quota_user unless quota_user.nil?
execute_or_queue_command(command, &block)
end
# Deletes a long-running operation. This method indicates that the client is
# no longer interested in the operation result. It does not cancel the
# operation. If the server doesn't support this method, it returns
# `google.rpc.Code.UNIMPLEMENTED`.
# @param [String] name
# The name of the operation resource to be deleted.
# @param [String] fields
# Selector specifying which fields to include in a partial response.
# @param [String] quota_user
# Available to use for quota purposes for server-side applications. Can be any
# arbitrary string assigned to a user, but should not exceed 40 characters.
# @param [Google::Apis::RequestOptions] options
# Request-specific options
#
# @yield [result, err] Result & error if block supplied
# @yieldparam result [Google::Apis::RedisV1beta1::Empty] parsed result object
# @yieldparam err [StandardError] error object if request failed
#
# @return [Google::Apis::RedisV1beta1::Empty]
#
# @raise [Google::Apis::ServerError] An error occurred on the server and the request can be retried
# @raise [Google::Apis::ClientError] The request is invalid and should not be retried without modification
# @raise [Google::Apis::AuthorizationError] Authorization is required
def delete_project_location_operation(name, fields: nil, quota_user: nil, options: nil, &block)
command = make_simple_command(:delete, 'v1beta1/{+name}', options)
command.response_representation = Google::Apis::RedisV1beta1::Empty::Representation
command.response_class = Google::Apis::RedisV1beta1::Empty
command.params['name'] = name unless name.nil?
command.query['fields'] = fields unless fields.nil?
command.query['quotaUser'] = quota_user unless quota_user.nil?
execute_or_queue_command(command, &block)
end
# Gets the latest state of a long-running operation. Clients can use this
# method to poll the operation result at intervals as recommended by the API
# service.
# @param [String] name
# The name of the operation resource.
# @param [String] fields
# Selector specifying which fields to include in a partial response.
# @param [String] quota_user
# Available to use for quota purposes for server-side applications. Can be any
# arbitrary string assigned to a user, but should not exceed 40 characters.
# @param [Google::Apis::RequestOptions] options
# Request-specific options
#
# @yield [result, err] Result & error if block supplied
# @yieldparam result [Google::Apis::RedisV1beta1::Operation] parsed result object
# @yieldparam err [StandardError] error object if request failed
#
# @return [Google::Apis::RedisV1beta1::Operation]
#
# @raise [Google::Apis::ServerError] An error occurred on the server and the request can be retried
# @raise [Google::Apis::ClientError] The request is invalid and should not be retried without modification
# @raise [Google::Apis::AuthorizationError] Authorization is required
def get_project_location_operation(name, fields: nil, quota_user: nil, options: nil, &block)
command = make_simple_command(:get, 'v1beta1/{+name}', options)
command.response_representation = Google::Apis::RedisV1beta1::Operation::Representation
command.response_class = Google::Apis::RedisV1beta1::Operation
command.params['name'] = name unless name.nil?
command.query['fields'] = fields unless fields.nil?
command.query['quotaUser'] = quota_user unless quota_user.nil?
execute_or_queue_command(command, &block)
end
# Lists operations that match the specified filter in the request. If the
# server doesn't support this method, it returns `UNIMPLEMENTED`.
# NOTE: the `name` binding allows API services to override the binding
# to use different resource name schemes, such as `users/*/operations`. To
# override the binding, API services can add a binding such as
# `"/v1/`name=users/*`/operations"` to their service configuration.
# For backwards compatibility, the default name includes the operations
# collection id, however overriding users must ensure the name binding
# is the parent resource, without the operations collection id.
# @param [String] name
# The name of the operation's parent resource.
# @param [String] filter
# The standard list filter.
# @param [Fixnum] page_size
# The standard list page size.
# @param [String] page_token
# The standard list page token.
# @param [String] fields
# Selector specifying which fields to include in a partial response.
# @param [String] quota_user
# Available to use for quota purposes for server-side applications. Can be any
# arbitrary string assigned to a user, but should not exceed 40 characters.
# @param [Google::Apis::RequestOptions] options
# Request-specific options
#
# @yield [result, err] Result & error if block supplied
# @yieldparam result [Google::Apis::RedisV1beta1::ListOperationsResponse] parsed result object
# @yieldparam err [StandardError] error object if request failed
#
# @return [Google::Apis::RedisV1beta1::ListOperationsResponse]
#
# @raise [Google::Apis::ServerError] An error occurred on the server and the request can be retried
# @raise [Google::Apis::ClientError] The request is invalid and should not be retried without modification
# @raise [Google::Apis::AuthorizationError] Authorization is required
def list_project_location_operations(name, filter: nil, page_size: nil, page_token: nil, fields: nil, quota_user: nil, options: nil, &block)
command = make_simple_command(:get, 'v1beta1/{+name}/operations', options)
command.response_representation = Google::Apis::RedisV1beta1::ListOperationsResponse::Representation
command.response_class = Google::Apis::RedisV1beta1::ListOperationsResponse
command.params['name'] = name unless name.nil?
command.query['filter'] = filter unless filter.nil?
command.query['pageSize'] = page_size unless page_size.nil?
command.query['pageToken'] = page_token unless page_token.nil?
command.query['fields'] = fields unless fields.nil?
command.query['quotaUser'] = quota_user unless quota_user.nil?
execute_or_queue_command(command, &block)
end
protected
def apply_command_defaults(command)
command.query['key'] = key unless key.nil?
command.query['quotaUser'] = quota_user unless quota_user.nil?
end
end
end
end
end
| 58.415663 | 148 | 0.668282 |
28db5c5bd99790a9864ecc3215bf267a3fdc1902 | 4,320 | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Deployments::AutoRollbackService, :clean_gitlab_redis_rate_limiting do
let_it_be(:maintainer) { create(:user) }
let_it_be(:project, refind: true) { create(:project, :repository) }
let_it_be(:environment, refind: true) { create(:environment, project: project) }
let_it_be(:commits) { project.repository.commits('master', limit: 2) }
let(:service) { described_class.new(project, nil) }
before_all do
project.add_maintainer(maintainer)
project.update!(auto_rollback_enabled: true)
end
shared_examples_for 'rollback failure' do
it 'returns an error' do
expect(subject[:status]).to eq(:error)
expect(subject[:message]).to eq(message)
end
end
describe '#execute' do
subject { service.execute(environment) }
before do
stub_licensed_features(auto_rollback: true)
commits.reverse_each { |commit| create_deployment(commit.id) }
end
it 'successfully roll back a deployment' do
expect { subject }.to change { Deployment.count }.by(1)
expect(subject[:status]).to eq(:success)
expect(subject[:deployment].sha).to eq(commits[1].id)
end
context 'when auto_rollback checkbox is disabled on the project' do
before do
environment.project.auto_rollback_enabled = false
end
it_behaves_like 'rollback failure' do
let(:message) { 'Auto Rollback is not enabled on the project.' }
end
end
context 'when project does not have an sufficient license' do
before do
stub_licensed_features(auto_rollback: false)
end
it_behaves_like 'rollback failure' do
let(:message) { 'Auto Rollback is not enabled on the project.' }
end
end
context 'when there are running deployments ' do
before do
create(:deployment, :running, environment: environment)
end
it_behaves_like 'rollback failure' do
let(:message) { 'There are running deployments on the environment.' }
end
end
context 'when auto rollback was triggered recently' do
before do
allow(::Gitlab::ApplicationRateLimiter).to receive(:throttled?) { true }
end
it_behaves_like 'rollback failure' do
let(:message) { 'Auto Rollback was recentlly trigged for the environment. It will be re-activated after a minute.' }
end
end
context 'when there are no deployments on the environment' do
before do
environment.deployments.fast_destroy_all
end
it_behaves_like 'rollback failure' do
let(:message) { 'Failed to find a rollback target.' }
end
end
context 'when there are no deployed commits in the repository' do
before do
environment.last_deployment.update!(sha: 'not-exist')
end
it_behaves_like 'rollback failure' do
let(:message) { 'Failed to find a rollback target.' }
end
end
context "when rollback target's deployable is not available" do
before do
environment.all_deployments.first.deployable.destroy!
end
it_behaves_like 'rollback failure' do
let(:message) { 'Failed to find a rollback target.' }
end
end
context "when rollback target's deployable is not retryable" do
before do
environment.all_deployments.first.deployable.degenerate!
end
it_behaves_like 'rollback failure' do
let(:message) { 'Failed to find a rollback target.' }
end
end
context "when the user who performed deployments is no longer a project member" do
let(:external_user) { create(:user) }
before do
environment.all_deployments.first.deployable.update!(user: external_user)
end
it 'raises an error' do
expect { subject }.to raise_error(Gitlab::Access::AccessDeniedError)
end
end
def create_deployment(commit_id)
attributes = { project: project, ref: 'master', user: maintainer }
pipeline = create(:ci_pipeline, :success, sha: commit_id, **attributes)
build = create(:ci_build, :success, pipeline: pipeline, environment: environment.name, **attributes)
create(:deployment, :success, environment: environment, deployable: build, sha: commit_id, **attributes)
end
end
end
| 30.857143 | 124 | 0.677546 |
e2d64f491f19680a534057411c2705e568e5bc6d | 4,250 | # This file is auto-generated from the current state of the database. Instead of editing this file,
# please use the migrations feature of Active Record to incrementally modify your database, and
# then regenerate this schema definition.
#
# Note that this schema.rb definition is the authoritative source for your database schema. If you need
# to create the application database on another system, you should be using db:schema:load, not running
# all the migrations from scratch. The latter is a flawed and unsustainable approach (the more migrations
# you'll amass, the slower it'll run and the greater likelihood for issues).
#
# It's strongly recommended to check this file into your version control system.
ActiveRecord::Schema.define(:version => 20090929164633) do
create_table "config", :force => true do |t|
t.string "key", :limit => 40, :default => "", :null => false
t.string "value", :default => ""
end
add_index "config", ["key"], :name => "key", :unique => true
create_table "extension_meta", :force => true do |t|
t.string "name"
t.integer "schema_version", :default => 0
t.boolean "enabled", :default => true
end
create_table "layouts", :force => true do |t|
t.string "name", :limit => 100
t.text "content"
t.datetime "created_at"
t.datetime "updated_at"
t.integer "created_by_id"
t.integer "updated_by_id"
t.string "content_type", :limit => 40
t.integer "lock_version", :default => 0
end
create_table "page_parts", :force => true do |t|
t.string "name", :limit => 100
t.string "filter_id", :limit => 25
t.text "content"
t.integer "page_id"
end
add_index "page_parts", ["page_id", "name"], :name => "parts_by_page"
create_table "pages", :force => true do |t|
t.string "title"
t.string "slug", :limit => 100
t.string "breadcrumb", :limit => 160
t.string "class_name", :limit => 25
t.integer "status_id", :default => 1, :null => false
t.integer "parent_id"
t.integer "layout_id"
t.datetime "created_at"
t.datetime "updated_at"
t.datetime "published_at"
t.integer "created_by_id"
t.integer "updated_by_id"
t.boolean "virtual", :default => false, :null => false
t.integer "lock_version", :default => 0
t.string "description"
t.string "keywords"
end
add_index "pages", ["class_name"], :name => "pages_class_name"
add_index "pages", ["parent_id"], :name => "pages_parent_id"
add_index "pages", ["slug", "parent_id"], :name => "pages_child_slug"
add_index "pages", ["virtual", "status_id"], :name => "pages_published"
create_table "sessions", :force => true do |t|
t.string "session_id"
t.text "data"
t.datetime "updated_at"
end
add_index "sessions", ["session_id"], :name => "index_sessions_on_session_id"
add_index "sessions", ["updated_at"], :name => "index_sessions_on_updated_at"
create_table "snippets", :force => true do |t|
t.string "name", :limit => 100, :default => "", :null => false
t.string "filter_id", :limit => 25
t.text "content"
t.datetime "created_at"
t.datetime "updated_at"
t.integer "created_by_id"
t.integer "updated_by_id"
t.integer "lock_version", :default => 0
end
add_index "snippets", ["name"], :name => "name", :unique => true
create_table "users", :force => true do |t|
t.string "name", :limit => 100
t.string "email"
t.string "login", :limit => 40, :default => "", :null => false
t.string "password", :limit => 40
t.datetime "created_at"
t.datetime "updated_at"
t.integer "created_by_id"
t.integer "updated_by_id"
t.boolean "admin", :default => false, :null => false
t.boolean "designer", :default => false, :null => false
t.text "notes"
t.integer "lock_version", :default => 0
t.string "salt"
t.string "session_token"
end
add_index "users", ["login"], :name => "login", :unique => true
end
| 37.610619 | 105 | 0.608471 |
875f924770c0ee840c4b243d1bbd798ed04a46c2 | 2,941 | {
name: 'UTEP',
num_matrices: '3',
notes: 'Matrices from Lenka Dubcova, Jakub Cerveny, and Pavel Solin
Univ. of Texas at El Paso. From a PDE solver. The fill-in when working on
a leading submatrix can be higher than for the whole matrix.
Subsets of interest are the first k rows and columns where:
matrix Dubcova1 k = 12033
matrix Dubcova2 k = 48641
matrix Dubcova3 k = 81153
Details below:
1 Dubcova1 ==========================================================
---------------- complete matrix:
name: \'UTEP/Dubcova1\'
title: \'Univ. Texas at El Paso, from a PDE solver\'
id: 1847
A: [16129x16129 double]
date: \'2007\'
author: \'L. Dubcova, J. Cerveny, and P. Solin\'
ed: \'T. Davis\'
kind: \'2D/3D problem\'
UMFPACK resid 1.23002e-10, tot time 0.374201 with AMD
nnz L: 576080 (with AMD), ordering time: 0.035546
nnz L: 608040 with METIS, ordering time: 0.264172
UMFPACK resid 1.22938e-10, tot time 0.625973 with METIS
---------------- small subset:
UMFPACK resid 1.20162e-10, tot time 0.406472 with AMD
nnz L: 814164 (with AMD), ordering time: 0.024683
nnz L: 571918 with METIS, ordering time: 0.169995
UMFPACK resid 1.19592e-10, tot time 0.458673 with METIS
2 Dubcova2 ==========================================================
---------------- complete matrix:
name: \'UTEP/Dubcova2\'
title: \'Univ. Texas at El Paso, from a PDE solver\'
id: 1848
A: [65025x65025 double]
date: \'2007\'
author: \'L. Dubcova, J. Cerveny, and P. Solin\'
ed: \'T. Davis\'
kind: \'2D/3D problem\'
UMFPACK resid 1.91486e-09, tot time 1.991 with AMD
nnz L: 2956869 (with AMD), ordering time: 0.168376
nnz L: 3018738 with METIS, ordering time: 1.42383
UMFPACK resid 1.9355e-09, tot time 3.30308 with METIS
---------------- small subset:
UMFPACK resid 1.89023e-09, tot time 2.54658 with AMD
nnz L: 4357099 (with AMD), ordering time: 0.121351
nnz L: 2835807 with METIS, ordering time: 0.92858
UMFPACK resid 1.87189e-09, tot time 2.48607 with METIS
3 Dubcova3 ==========================================================
---------------- complete matrix:
name: \'UTEP/Dubcova3\'
title: \'Univ. Texas at El Paso, from a PDE solver\'
id: 1849
A: [146689x146689 double]
date: \'2007\'
author: \'L. Dubcova, J. Cerveny, and P. Solin\'
ed: \'T. Davis\'
kind: \'2D/3D problem\'
Zeros: [146689x146689 double]
UMFPACK resid 2.04934e-09, tot time 5.38897 with AMD
nnz L: 7252162 (with AMD), ordering time: 0.331024
nnz L: 7539100 with METIS, ordering time: 2.49977
UMFPACK resid 2.05082e-09, tot time 7.39527 with METIS
---------------- small subset:
UMFPACK resid 2.04213e-09, tot time 5.87802 with AMD
nnz L: 10085274 (with AMD), ordering time: 0.195067
nnz L: 6536588 with METIS, ordering time: 1.11139
UMFPACK resid 2.03453e-09, tot time 4.6961 with METIS
',
}
| 31.287234 | 76 | 0.609997 |
01aa3d11d80ad1b116c95a6e3056490db5ee9430 | 1,349 | describe 'Search Result Block', type: :feature, js: true do
let(:exhibit) { FactoryBot.create(:exhibit) }
let(:exhibit_curator) { FactoryBot.create(:exhibit_curator, exhibit: exhibit) }
let!(:feature_page) { FactoryBot.create(:feature_page, exhibit: exhibit) }
let!(:alt_search) { FactoryBot.create(:search, title: 'Alt. Search', exhibit: exhibit) }
before do
login_as exhibit_curator
exhibit.searches.each { |x| x.update published: true }
visit spotlight.edit_exhibit_feature_page_path(exhibit, feature_page)
add_widget 'search_results'
end
pending 'allows a curator to select from existing browse categories' do
pending('Prefetched autocomplete does not work the same way as solr-backed autocompletes')
fill_in_typeahead_field with: 'All Exhibit Items'
check 'Gallery'
check 'Slideshow'
save_page
expect(page).not_to have_content 'per page'
expect(page).not_to have_content 'Sort by'
# The two configured view types should be
# present and the one not selected should not be
within('.view-type-group') do
expect(page).not_to have_css('.view-type-list')
expect(page).to have_css('.view-type-gallery')
expect(page).to have_css('.view-type-slideshow')
end
# Documents should exist
expect(page).to have_css('.documents .document')
end
end
| 32.902439 | 94 | 0.716827 |
91d53530980627692cd13590f53ca9f3ad4e464d | 270 | class CreateBarbers < ActiveRecord::Migration[6.0]
def change
create_table :barbers do |t|
t.text :name
t.timestamps
end
Barber.create :name => 'Jessie Pinkman'
Barber.create :name => 'Walter White'
Barber.create :name => 'Gus Fring'
end
end
| 18 | 50 | 0.67037 |
210ea75d378d2982e109629fd1cc0a854aa476b1 | 253 | module TZInfo
module Definitions
module Etc
module GMT__p__10
include TimezoneDefinition
timezone 'Etc/GMT+10' do |tz|
tz.offset :o0, -36000, 0, :'GMT+10'
end
end
end
end
end
| 16.866667 | 45 | 0.533597 |
39d06066a14c2e43c8ca6a25781138f2ce0d1c6a | 2,088 | class Okteta < Formula
desc "KDE hex editor for viewing and editing the raw data of files"
homepage "https://www.kde.org"
url "https://download.kde.org/stable/okteta/0.26.2/src/okteta-0.26.2.tar.xz"
sha256 "bb8d819af1d2d7ebb286542918985afcd1937f0ec6172180ff5b1acc5383684c"
head "git://anongit.kde.org/okteta.git"
depends_on "cmake" => :build
depends_on "KDE-mac/kde/kf5-extra-cmake-modules" => :build
depends_on "KDE-mac/kde/kf5-kdoctools" => :build
depends_on "ninja" => :build
depends_on "shared-mime-info" => :build
depends_on "hicolor-icon-theme"
depends_on "KDE-mac/kde/kf5-breeze-icons"
depends_on "KDE-mac/kde/kf5-kcmutils"
depends_on "KDE-mac/kde/kf5-knewstuff"
depends_on "KDE-mac/kde/kf5-kparts"
depends_on "qca"
def install
args = std_cmake_args
args << "-DBUILD_TESTING=OFF"
args << "-DKDE_INSTALL_QMLDIR=lib/qt5/qml"
args << "-DKDE_INSTALL_PLUGINDIR=lib/qt5/plugins"
args << "-DKDE_INSTALL_QTPLUGINDIR=lib/qt5/plugins"
args << "-DCMAKE_INSTALL_BUNDLEDIR=#{bin}"
args << "-DUPDATE_MIME_DATABASE_EXECUTABLE=OFF"
mkdir "build" do
system "cmake", "-G", "Ninja", "..", *args
system "ninja"
system "ninja", "install"
prefix.install "install_manifest.txt"
end
# Extract Qt plugin path
qtpp = `#{Formula["qt"].bin}/qtpaths --plugin-dir`.chomp
system "/usr/libexec/PlistBuddy",
"-c", "Add :LSEnvironment:QT_PLUGIN_PATH string \"#{qtpp}\:#{HOMEBREW_PREFIX}/lib/qt5/plugins\"",
"#{bin}/okteta.app/Contents/Info.plist"
end
def post_install
system HOMEBREW_PREFIX/"bin/update-mime-database", HOMEBREW_PREFIX/"share/mime"
mkdir_p HOMEBREW_PREFIX/"share/okteta"
ln_sf HOMEBREW_PREFIX/"share/icons/breeze/breeze-icons.rcc", HOMEBREW_PREFIX/"share/okteta/icontheme.rcc"
end
def caveats; <<~EOS
You need to take some manual steps in order to make this formula work:
"$(brew --repo kde-mac/kde)/tools/do-caveats.sh"
EOS
end
test do
assert `"#{bin}/okteta.app/Contents/MacOS/okteta" --help | grep -- --help` =~ /--help/
end
end
| 35.389831 | 109 | 0.693008 |
f82ee3772f162be26dd0d7e5a2067a520a445d3e | 1,036 | #
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
module Puppet::Parser::Functions
newfunction(:hdp_java_home, :type => :rvalue) do
size = lookupvar("size")
if size.nil? or size == :undefined
size = "64"
end
lookupvar("::hdp::params::java#{size.to_s}_home")
end
end
| 34.533333 | 62 | 0.734556 |
ac04418cfc568fef5956466f6e70665db44b4d1c | 9,228 | =begin
#MailSlurp API
#MailSlurp is an API for sending and receiving emails from dynamically allocated email addresses. It's designed for developers and QA teams to test applications, process inbound emails, send templated notifications, attachments, and more. ## Resources - [Homepage](https://www.mailslurp.com) - Get an [API KEY](https://app.mailslurp.com/sign-up/) - Generated [SDK Clients](https://www.mailslurp.com/docs/) - [Examples](https://github.com/mailslurp/examples) repository
The version of the OpenAPI document: 6.5.2
Generated by: https://openapi-generator.tech
OpenAPI Generator version: 4.3.1
=end
require 'date'
module MailSlurpClient
# Paginated webhook entity. Page index starts at zero. Projection results may omit larger entity fields. For fetching a full entity use the projection ID with individual method calls.
class PageWebhookProjection
attr_accessor :content
attr_accessor :empty
attr_accessor :first
attr_accessor :last
attr_accessor :number
attr_accessor :number_of_elements
attr_accessor :pageable
attr_accessor :size
attr_accessor :sort
attr_accessor :total_elements
attr_accessor :total_pages
# Attribute mapping from ruby-style variable name to JSON key.
def self.attribute_map
{
:'content' => :'content',
:'empty' => :'empty',
:'first' => :'first',
:'last' => :'last',
:'number' => :'number',
:'number_of_elements' => :'numberOfElements',
:'pageable' => :'pageable',
:'size' => :'size',
:'sort' => :'sort',
:'total_elements' => :'totalElements',
:'total_pages' => :'totalPages'
}
end
# Attribute type mapping.
def self.openapi_types
{
:'content' => :'Array<WebhookProjection>',
:'empty' => :'Boolean',
:'first' => :'Boolean',
:'last' => :'Boolean',
:'number' => :'Integer',
:'number_of_elements' => :'Integer',
:'pageable' => :'Pageable',
:'size' => :'Integer',
:'sort' => :'Sort',
:'total_elements' => :'Integer',
:'total_pages' => :'Integer'
}
end
# List of attributes with nullable: true
def self.openapi_nullable
Set.new([
])
end
# Initializes the object
# @param [Hash] attributes Model attributes in the form of hash
def initialize(attributes = {})
if (!attributes.is_a?(Hash))
fail ArgumentError, "The input argument (attributes) must be a hash in `MailSlurpClient::PageWebhookProjection` initialize method"
end
# check to see if the attribute exists and convert string to symbol for hash key
attributes = attributes.each_with_object({}) { |(k, v), h|
if (!self.class.attribute_map.key?(k.to_sym))
fail ArgumentError, "`#{k}` is not a valid attribute in `MailSlurpClient::PageWebhookProjection`. Please check the name to make sure it's valid. List of attributes: " + self.class.attribute_map.keys.inspect
end
h[k.to_sym] = v
}
if attributes.key?(:'content')
if (value = attributes[:'content']).is_a?(Array)
self.content = value
end
end
if attributes.key?(:'empty')
self.empty = attributes[:'empty']
end
if attributes.key?(:'first')
self.first = attributes[:'first']
end
if attributes.key?(:'last')
self.last = attributes[:'last']
end
if attributes.key?(:'number')
self.number = attributes[:'number']
end
if attributes.key?(:'number_of_elements')
self.number_of_elements = attributes[:'number_of_elements']
end
if attributes.key?(:'pageable')
self.pageable = attributes[:'pageable']
end
if attributes.key?(:'size')
self.size = attributes[:'size']
end
if attributes.key?(:'sort')
self.sort = attributes[:'sort']
end
if attributes.key?(:'total_elements')
self.total_elements = attributes[:'total_elements']
end
if attributes.key?(:'total_pages')
self.total_pages = attributes[:'total_pages']
end
end
# Show invalid properties with the reasons. Usually used together with valid?
# @return Array for valid properties with the reasons
def list_invalid_properties
invalid_properties = Array.new
invalid_properties
end
# Check to see if the all the properties in the model are valid
# @return true if the model is valid
def valid?
true
end
# Checks equality by comparing each attribute.
# @param [Object] Object to be compared
def ==(o)
return true if self.equal?(o)
self.class == o.class &&
content == o.content &&
empty == o.empty &&
first == o.first &&
last == o.last &&
number == o.number &&
number_of_elements == o.number_of_elements &&
pageable == o.pageable &&
size == o.size &&
sort == o.sort &&
total_elements == o.total_elements &&
total_pages == o.total_pages
end
# @see the `==` method
# @param [Object] Object to be compared
def eql?(o)
self == o
end
# Calculates hash code according to all attributes.
# @return [Integer] Hash code
def hash
[content, empty, first, last, number, number_of_elements, pageable, size, sort, total_elements, total_pages].hash
end
# Builds the object from hash
# @param [Hash] attributes Model attributes in the form of hash
# @return [Object] Returns the model itself
def self.build_from_hash(attributes)
new.build_from_hash(attributes)
end
# Builds the object from hash
# @param [Hash] attributes Model attributes in the form of hash
# @return [Object] Returns the model itself
def build_from_hash(attributes)
return nil unless attributes.is_a?(Hash)
self.class.openapi_types.each_pair do |key, type|
if type =~ /\AArray<(.*)>/i
# check to ensure the input is an array given that the attribute
# is documented as an array but the input is not
if attributes[self.class.attribute_map[key]].is_a?(Array)
self.send("#{key}=", attributes[self.class.attribute_map[key]].map { |v| _deserialize($1, v) })
end
elsif !attributes[self.class.attribute_map[key]].nil?
self.send("#{key}=", _deserialize(type, attributes[self.class.attribute_map[key]]))
end # or else data not found in attributes(hash), not an issue as the data can be optional
end
self
end
# Deserializes the data based on type
# @param string type Data type
# @param string value Value to be deserialized
# @return [Object] Deserialized data
def _deserialize(type, value)
case type.to_sym
when :DateTime
DateTime.parse(value)
when :Date
Date.parse(value)
when :String
value.to_s
when :Integer
value.to_i
when :Float
value.to_f
when :Boolean
if value.to_s =~ /\A(true|t|yes|y|1)\z/i
true
else
false
end
when :Object
# generic object (usually a Hash), return directly
value
when /\AArray<(?<inner_type>.+)>\z/
inner_type = Regexp.last_match[:inner_type]
value.map { |v| _deserialize(inner_type, v) }
when /\AHash<(?<k_type>.+?), (?<v_type>.+)>\z/
k_type = Regexp.last_match[:k_type]
v_type = Regexp.last_match[:v_type]
{}.tap do |hash|
value.each do |k, v|
hash[_deserialize(k_type, k)] = _deserialize(v_type, v)
end
end
else # model
MailSlurpClient.const_get(type).build_from_hash(value)
end
end
# Returns the string representation of the object
# @return [String] String presentation of the object
def to_s
to_hash.to_s
end
# to_body is an alias to to_hash (backward compatibility)
# @return [Hash] Returns the object in the form of hash
def to_body
to_hash
end
# Returns the object in the form of hash
# @return [Hash] Returns the object in the form of hash
def to_hash
hash = {}
self.class.attribute_map.each_pair do |attr, param|
value = self.send(attr)
if value.nil?
is_nullable = self.class.openapi_nullable.include?(attr)
next if !is_nullable || (is_nullable && !instance_variable_defined?(:"@#{attr}"))
end
hash[param] = _to_hash(value)
end
hash
end
# Outputs non-array value in the form of hash
# For object, use to_hash. Otherwise, just return the value
# @param [Object] value Any valid value
# @return [Hash] Returns the value in the form of hash
def _to_hash(value)
if value.is_a?(Array)
value.compact.map { |v| _to_hash(v) }
elsif value.is_a?(Hash)
{}.tap do |hash|
value.each { |k, v| hash[k] = _to_hash(v) }
end
elsif value.respond_to? :to_hash
value.to_hash
else
value
end
end
end
end
| 30.76 | 470 | 0.613459 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.