code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
defmodule Pass.Plugs do
@moduledoc """
The `Pass.Plugs` module is meant to be imported into a Phoenix Router
module so that its methods can be used as plugs in the router's pipelines.
## Example
```elixir
defmodule MyApp.Router do
use MyApp.Web, :router
import Pass.Plugs
pipeline :browser do
plug :fetch_session
# Pass.Plugs function
plug :authenticate_session
end
pipeline ::require_auth do
# Pass.Plugs function
plug :require_authentication, redirect_to: "/login"
end
end
```
"""
import Plug.Conn
alias Pass.Authenticate
@doc """
Extends a valid authentication session or clears that data.
"""
def authenticate_session(%Plug.Conn{} = conn, _ops) do
Authenticate.session(conn)
end
@doc """
Ensures the current session is valid. If not, returns a 401 or redirects to
the path specfied to by the `:redirect_to` option.
If the session is valid, the current user will be retrieved and stored in the
connection struct. If this is not desirable, the `:skip_user_lookup` option
can be set to `true`. BE CAREFUL when doing this as then it doesn't ensure
that the user hasn't been deleted
"""
def require_authentication(%Plug.Conn{} = conn, %{} = opts) do
user = nil
cond do
Authenticate.session_valid?(conn) &&
(opts[:skip_user_lookup] || (user = Pass.DataStore.adapter.get(get_session(conn, :user_id))) != nil) ->
assign(conn, :current_user, user)
redirect_path = Map.get(opts, :redirect_to) ->
conn
|> put_session(:redirect_url, conn.request_path)
|> put_resp_header("location", redirect_path)
|> send_resp(302, "")
|> halt
true ->
conn
|> set_content_type(opts[:content_type])
|> send_resp(401, Map.get(opts, :body, ""))
|> halt
end
end
def require_authentication(conn, _opts), do: require_authentication(conn, %{})
defp set_content_type(conn, nil), do: conn
defp set_content_type(conn, value) when is_binary(value) do
put_resp_header(conn, "Content-Type", value)
end
end
|
lib/pass/plugs.ex
| 0.880791 | 0.66788 |
plugs.ex
|
starcoder
|
defmodule Advent20.Encoding do
@moduledoc """
Day 9: Encoding Error
"""
defp parse(input) do
input
|> String.split("\n", trim: true)
|> Enum.map(&String.to_integer/1)
end
@doc """
Part 1: The first step of attacking the weakness in the XMAS data
is to find the first number in the list (after the preamble) which
is not the sum of two of the 25 numbers before it.
What is the first number that does not have this property?
"""
def first_failing_number(input, preamble_length) do
numbers = parse(input)
Stream.unfold(0, fn
index ->
range = index..(index + preamble_length - 1)
current_number = Enum.at(numbers, index + preamble_length)
window = Enum.slice(numbers, range)
{{current_number, window}, index + 1}
end)
|> Stream.filter(&number_not_in_window/1)
|> Enum.find(& &1)
|> elem(0)
end
defp number_not_in_window({current_number, window}) do
matching_numbers = for a <- window, b <- window, a != b, do: {a, b}
Enum.find(matching_numbers, fn {a, b} -> a + b == current_number end)
|> case do
nil -> current_number
_ -> false
end
end
@doc """
Part 2: What is the encryption weakness in your XMAS-encrypted list of numbers?
"""
def contigous_set_for_number(input, invalid_number) do
numbers = parse(input)
Stream.iterate(0, &(&1 + 1))
|> Stream.map(&try_find_sum(&1, numbers, invalid_number))
|> Enum.find(& &1)
end
defp try_find_sum(index, numbers, looking_for) do
prepared_numbers = Enum.drop(numbers, index)
Enum.reduce_while(prepared_numbers, %{sum: 0, contigous_numbers: []}, fn number, acc ->
new_sum = number + acc.sum
cond do
new_sum > looking_for ->
{:halt, false}
new_sum < looking_for ->
new_acc = %{sum: new_sum, contigous_numbers: [number | acc.contigous_numbers]}
{:cont, new_acc}
new_sum == looking_for ->
all_contigous_numbers = [number | acc.contigous_numbers]
{:halt, Enum.max(all_contigous_numbers) + Enum.min(all_contigous_numbers)}
end
end)
end
end
|
lib/advent20/09_encoding.ex
| 0.660063 | 0.521288 |
09_encoding.ex
|
starcoder
|
defmodule Gyx.Environments.Pure.FrozenLake do
@moduledoc """
This module implements the FrozenLake-v0
environment according to
OpenAI implementation: https://gym.openai.com/envs/FrozenLake-v0/
"""
alias Gyx.Core.{Env, Exp}
alias Gyx.Core.Spaces.Discrete
use Env
use GenServer
defstruct map: nil,
row: nil,
col: nil,
ncol: nil,
nrow: nil,
action_space: nil,
observation_space: nil
@type t :: %__MODULE__{
map: list(charlist),
row: integer(),
col: integer(),
ncol: integer(),
nrow: integer(),
action_space: Discrete.t(),
observation_space: Discrete.t()
}
@actions %{0 => :left, 1 => :down, 2 => :right, 3 => :up}
@maps %{
"4x4" => [
"SFFF",
"FHFH",
"FFFH",
"HFFG"
],
"8x8" => [
"SFFFFFFF",
"FFFFFFFF",
"FFFHFFFF",
"FFFFFHFF",
"FFFHFFFF",
"FHHFFFHF",
"FHFFHFHF",
"FFFHFFFG"
]
}
@impl true
def init(map_name) do
map = @maps[map_name]
{:ok,
%__MODULE__{
map: map,
row: 0,
col: 0,
nrow: length(map),
ncol: String.length(List.first(map)),
action_space: %Discrete{n: 4},
observation_space: %Discrete{n: 16}
}}
end
def start_link(_, opts) do
GenServer.start_link(__MODULE__, "4x4", opts)
end
@impl Env
def reset(environment) do
GenServer.call(environment, :reset)
end
def render(environment) do
GenServer.call(environment, :render)
end
def handle_call(:render, _from, state) do
printEnv(state.map, state.row, state.col)
{:reply, {:ok, position: {state.row, state.col}}, state}
end
@impl true
def handle_call(:reset, _from, state) do
new_env_state = %{state | row: 0, col: 0}
{:reply, %Exp{next_state: new_env_state}, new_env_state}
end
def handle_call({:act, action}, _from, state) do
new_state = rwo_col_step(state, action)
current = get_position(new_state.map, new_state.row, new_state.col)
{:reply,
%Exp{
state: env_state_transformer(state),
action: action,
next_state: env_state_transformer(new_state),
reward: if(current == "G", do: 1.0, else: 0.0),
done: current in ["H", "G"],
info: %{}
}, new_state}
end
defp get_position(map, row, col) do
Enum.at(String.graphemes(Enum.at(map, row)), col)
end
def env_state_transformer(state), do: Map.put(state, :enumerated, state.row * 4 + state.col)
@spec rwo_col_step(__MODULE__.t(), atom) :: __MODULE__.t()
defp rwo_col_step(state, action) do
case action do
:left -> %{state | col: max(state.col - 1, 0)}
:down -> %{state | row: min(state.row + 1, state.nrow - 1)}
:right -> %{state | col: min(state.col + 1, state.ncol - 1)}
:up -> %{state | row: max(state.row - 1, 0)}
_ -> state
end
end
defp printEnv([], _, _), do: :ok
defp printEnv([h | t], row, col) do
printEnvLine(h, col, row == 0)
printEnv(t, row - 1, col)
end
defp printEnvLine(string_line, agent_position, mark) do
chars_line = String.graphemes(string_line)
m =
if mark,
do:
IO.ANSI.format_fragment(
[:light_magenta, :italic, chars_line |> Enum.at(agent_position)],
true
),
else: [Enum.at(chars_line, agent_position)]
p =
IO.ANSI.format_fragment(
[:light_blue, :italic, chars_line |> Enum.take(agent_position) |> List.to_string()],
true
)
q =
IO.ANSI.format_fragment(
[
:light_blue,
:italic,
chars_line |> Enum.take(agent_position - length(chars_line) + 1) |> List.to_string()
],
true
)
(p ++ m ++ q)
|> IO.puts()
end
end
|
lib/environments/pure/frozenlake.ex
| 0.849379 | 0.472257 |
frozenlake.ex
|
starcoder
|
defmodule Graphd.Node do
@moduledoc """
Simple high level API for accessing graphs
## Usage
defmodule Shared do
use Graphd.Node
shared do
field :id, :string, index: ["term"]
field :name, :string, index: ["term"]
end
end
defmodule User do
use Graphd.Node, depends_on: Shared
schema "user" do
field :id, :auto
field :name, :auto
end
end
defmodule User do
use Graphd.Node
schema "user" do
field :id, :auto, depends_on: Shared
field :name, :string, index: ["term"]
field :age, :integer
field :cache, :any, virtual: true
field :owns, :uid
end
end
Dgraph types:
* `:integer`
* `:float`
* `:string`
* `:geo`
* `:datetime`
* `:uid`
* `:auto` - special type, which can be used for `depends_on`
## Reflection
Any schema module will generate the `__schema__` function that can be
used for runtime introspection of the schema:
* `__schema__(:source)` - Returns the source as given to `schema/2`;
* `__schema__(:fields)` - Returns a list of all non-virtual field names;
* `__schema__(:alter)` - Returns a generated alter schema
* `__schema__(:field, field)` - Returns the name of field in database for field in a struct and
vice versa;
* `__schema__(:type, field)` - Returns the type of the given non-virtual field;
Additionally it generates `Ecto` compatible `__changeset__` for using with `Ecto.Changeset`.
"""
alias Graphd.Field
defmacro __using__(opts) do
depends_on = Keyword.get(opts, :depends_on, nil)
quote do
@depends_on unquote(depends_on)
import Graphd.Node, only: [shared: 1, schema: 2]
end
end
defmacro schema(name, block) do
prepare = prepare_block(name, block)
postprocess = postprocess()
quote do
unquote(prepare)
unquote(postprocess)
end
end
defmacro shared(block) do
prepare = prepare_block(nil, block)
postprocess = postprocess()
quote do
@depends_on __MODULE__
unquote(prepare)
unquote(postprocess)
end
end
defp prepare_block(name, block) do
quote do
@name unquote(name)
Module.register_attribute(__MODULE__, :fields, accumulate: true)
Module.register_attribute(__MODULE__, :fields_struct, accumulate: true)
Module.register_attribute(__MODULE__, :fields_data, accumulate: true)
Module.register_attribute(__MODULE__, :depends_on_modules, accumulate: true)
import Graphd.Node
unquote(block)
end
end
defp postprocess() do
quote unquote: false do
defstruct [:uid | @fields_struct]
fields = Enum.reverse(@fields)
source = @name
alter = Graphd.Node.__schema_alter___(__MODULE__, source)
def __schema__(:source), do: unquote(source)
def __schema__(:fields), do: unquote(fields)
def __schema__(:alter), do: unquote(Macro.escape(alter))
def __schema__(:depends_on), do: unquote(Graphd.Node.__depends_on_modules__(__MODULE__))
for %Graphd.Field{name: name, db_name: db_name, type: type, unique: unique, required: required} <- @fields_data do
def __schema__(:type, unquote(name)), do: unquote(type)
def __schema__(:field, unquote(name)), do: unquote(db_name)
def __schema__(:field, unquote(db_name)), do: {unquote(name), unquote(type)}
def __schema__(:unique, unquote(name)), do: unquote(unique)
def __schema__(:required, unquote(name)), do: unquote(required)
end
def __schema__(:type, _), do: nil
def __schema__(:field, _), do: nil
def __schema__(:unique, _), do: false
def __schema__(:required, _), do: false
unique_fields = Graphd.Node.__unique_fields__(@fields_data)
def __schema__(:unique_fields), do: unquote(Macro.escape(unique_fields))
required_fields = Graphd.Node.__required_fields__(@fields_data)
def __schema__(:required_fields), do: unquote(Macro.escape(required_fields))
changeset = Graphd.Node.__gen_changeset__(@fields_data)
def __changeset__(), do: unquote(Macro.escape(changeset))
end
end
@doc false
def __schema_alter___(module, source) do
preds =
module
|> Module.get_attribute(:fields_data)
|> Enum.flat_map(&List.wrap(&1.alter))
|> Enum.reverse()
type_fields =
module
|> Module.get_attribute(:fields_data)
|> Enum.map(fn fdata ->
%{
"name" => fdata.db_name,
"type" => db_type(fdata.type)
}
end)
type = %{"name" => source, "fields" => type_fields}
%{
"types" => List.wrap(type),
"schema" => preds
}
end
@doc false
def __depends_on_modules__(module) do
depends_on_module = module |> Module.get_attribute(:depends_on) |> List.wrap()
:lists.usort(depends_on_module ++ Module.get_attribute(module, :depends_on_modules))
end
@doc false
def __unique_fields__(fields) do
for %Graphd.Field{name: name, unique: unique} <- fields, unique != false, into: [], do: name
end
@doc false
def __required_fields__(fields) do
for %Graphd.Field{name: name, required: required} <- fields, required != false, into: [], do: name
end
@doc false
def __gen_changeset__(fields) do
for %Graphd.Field{name: name, type: type, opts: opts} <- fields, into: %{}, do: {name, ecto_type(type, opts[:list])}
end
defp ecto_type(type, list) do
typ = do_ecto_type(type)
case list do
true -> {:array, typ}
_ -> typ
end
end
defp do_ecto_type(:datetime), do: :utc_datetime
defp do_ecto_type(type), do: type
defmacro field(name, type, opts \\ []) do
quote do
Graphd.Node.__field__(__MODULE__, unquote(name), unquote(type), unquote(opts), @depends_on)
end
end
defmacro has_many(name, type, opts \\ []) do
quote do
Graphd.Node.__field__(__MODULE__, unquote(name), unquote(type), [list: true] ++ unquote(opts), @depends_on)
end
end
@doc false
def __field__(module, name, type, opts, depends_on) do
type =
case type do
:uid -> Graphd.DataType.UID
:geo -> Graphd.DataType.Geo
:password -> Graphd.DataType.Password
_ -> type
end
schema_name = Module.get_attribute(module, :name)
Module.put_attribute(module, :fields_struct, {name, opts[:default]})
unless opts[:virtual] do
Module.put_attribute(module, :fields, name)
{db_name, type, alter} = db_field(name, type, opts, schema_name, module, depends_on)
{unique, opts} = Keyword.pop(opts, :unique, false)
{required, opts} = Keyword.pop(opts, :required, false)
field = %Field{name: name, type: type, db_name: db_name, unique: unique, required: required, alter: alter, opts: opts}
Module.put_attribute(module, :fields_data, field)
end
end
defp db_field(name, type, opts, schema_name, module, depends_on) do
if depends_on = opts[:depends_on] || depends_on do
put_attribute_if_not_exists(module, :depends_on_modules, depends_on)
with {:error, error} <- Code.ensure_compiled(depends_on),
do: raise("Module `#{depends_on}` not available, error: #{error}")
field_name = Atom.to_string(name)
if module == depends_on do
{field_name, type, alter_field(field_name, type, opts)}
else
{field_name, depends_on.__schema__(:type, name), nil}
end
else
field_name = "#{schema_name}.#{name}"
{field_name, type, alter_field(field_name, type, opts)}
end
end
defp put_attribute_if_not_exists(module, key, value) do
unless module |> Module.get_attribute(key) |> Enum.member?(value),
do: Module.put_attribute(module, key, value)
end
defp alter_field(field_name, type, opts) do
basic_alter = %{
"predicate" => field_name,
"type" => db_type(type, opts[:list])
}
opts |> Enum.flat_map(&gen_opt(&1, type)) |> Enum.into(basic_alter)
end
@types_mapping [
integer: "int",
float: "float",
string: "string",
geo: "geo",
datetime: "datetime",
uid: "uid",
password: "password"
]
for {type, dgraph_type} <- @types_mapping do
defp primitive_type(unquote(type)), do: unquote(dgraph_type)
end
@primitive_types Keyword.keys(@types_mapping)
def primitive_type?(type), do: type in @primitive_types
defp db_type(type, list \\ nil) do
typ = if primitive_type?(type), do: primitive_type(type), else: primitive_type(type.type)
case list do
true -> "[#{typ}]"
_ -> typ
end
end
@ignore_keys [:default, :depends_on, :list, :unique, :required]
defp gen_opt({key, _value}, _type) when key in @ignore_keys, do: []
defp gen_opt({:index, true}, type), do: [{"index", true}, {"tokenizer", [db_type(type)]}]
defp gen_opt({:index, tokenizers}, :string) when is_list(tokenizers),
do: [{"index", true}, {"tokenizer", tokenizers}]
defp gen_opt({key, value}, _type), do: [{Atom.to_string(key), value}]
end
|
lib/graphd/node.ex
| 0.787155 | 0.469763 |
node.ex
|
starcoder
|
defmodule Day09 do
@moduledoc """
Documentation for `Day09`.
"""
def hack_xmas(filePath, preambleSize) do
input = load_input(filePath)
vulnerability = get_vulnerability(input, preambleSize)
IO.puts(vulnerability)
{i,j} = get_longest_combination(input,vulnerability)
input = input
|> Enum.slice(i..j)
|> Enum.sort()
Enum.at(input, 0) + Enum.at(input,-1)
end
def get_vulnerability(input, preambleSize) do
input = Enum.with_index(input)
input
|> Enum.slice(preambleSize..-1)
|> Enum.reduce_while(0, fn {x,i}, acc ->
if fits_pattern(Enum.slice(input, (i-preambleSize)..i), x), do: {:cont, acc}, else: {:halt, x}
end)
end
def fits_pattern(input, target) do
combinations = for {x,x_index} <- input, {y,y_index} <- input, x_index != y_index, do: {x, y}
combinations
|> Enum.reduce_while(0, fn {x,y}, _acc -> if x + y == target, do: {:halt, true}, else: {:cont, false} end)
end
def get_longest_combination(inputs, target) do
inputs = Enum.with_index(inputs)
inputs
|> Enum.map(fn {_,i} -> {i, get_combination(Enum.slice(inputs, i..-1), target)} end)
|> Enum.filter(fn {x,i} -> i > x end)
|> Enum.sort(fn {x,i}, {y,j} -> i - x >= j - y end)
|> Enum.fetch(0)
|> elem(1)
end
def get_combination(list, target) do
list_size = Enum.count(list)
end_index = Enum.at(list, -1) |> elem(1)
if list_size < 2 do
0
else
list
|> Enum.reduce_while(0, fn {y, j}, acc ->
case {y,j, acc, end_index} do
{y,_j,_acc,_} when y == target -> {:halt, 0}
{y,j,acc,_} when acc + y == target -> {:halt, j}
{y,_,acc,_} when (acc + y >= target) -> {:halt, 0}
{_,j,_acc,end_index} when j == end_index -> {:halt,0}
{y,_,acc,_} -> {:cont, acc + y}
end
end)
end
end
def load_input(filePath) do
{_, input} = File.read(filePath)
String.replace(input, "\r", "")
|> String.split("\n")
|> Enum.map(&Integer.parse(&1))
|> Enum.map(fn x -> elem(x,0) end)
end
end
|
src/Day09/day09/lib/day09.ex
| 0.597725 | 0.433981 |
day09.ex
|
starcoder
|
defmodule Marker.Compiler do
@moduledoc """
`Marker.Compiler` renders the element macros to html. It tries do as much work during macro expansion,
resulting in a run time performance comparible to precompiled templates.
For example, this element call:
```elixir
div 1 + 1
```
will be expanded to this:
```elixir
"<div>" <> Marker.Encoder.encode(1 + 1) <> "</div>"
```
"""
alias Marker.Element
@type element :: String.t() | Macro.t() | Marker.Element.t()
@type chunks :: [String.t() | Macro.t()]
# API
@doc false
# @spec compile_env(Marker.content(), any) :: {:safe, String.t()} | Macro.t()
def compile_env(content, env) do
content
|> compile([])
|> to_result()
|> case do
{:safe, body} ->
if :ok == Macro.validate(body) do
body
|> expand(env)
else
{:safe, body}
end
macro ->
macro
end
end
@doc false
@spec escape(String.t()) :: String.t()
def escape(string) do
escape(string, "")
end
# Content parsing
@spec compile(Marker.content(), chunks) :: chunks
defp compile(content, chunks) when is_list(content) do
Enum.reduce(content, chunks, &compile/2)
end
@spec compile(element, chunks) :: chunks
defp compile(%Element{tag: tag, attrs: attrs, content: content}, chunks) do
chunks =
chunks
|> maybe_doctype(tag)
|> begin_tag_open(tag)
|> build_attrs(attrs)
if void_element?(tag) do
void_tag_close(chunks)
else
compile(content, begin_tag_close(chunks))
|> end_tag(tag)
end
end
defp compile(value, chunks) do
add_chunk(chunks, Marker.Encoder.encode(value))
end
# Element helpers
defp begin_tag_open(chunks, tag), do: add_chunk(chunks, "<#{tag}")
defp begin_tag_close(chunks), do: add_chunk(chunks, ">")
defp void_tag_close(chunks), do: add_chunk(chunks, "/>")
defp end_tag(chunks, tag), do: add_chunk(chunks, "</#{tag}>")
defp maybe_doctype(chunks, :html), do: add_chunk(chunks, "<!doctype html>\n")
defp maybe_doctype(chunks, _), do: chunks
defp void_element?(tag) do
tag in ~w(area base br col embed hr img input keygen link meta param source track wbr)a
end
# Attributes parsing
@spec build_attrs(chunks, Marker.Element.attrs()) :: chunks
defp build_attrs(chunks, attrs) when is_list(attrs) do
Enum.reduce(attrs, chunks, fn
{_, nil}, chunks -> chunks
{_, false}, chunks -> chunks
{k, true}, chunks -> enabled_attr(chunks, k)
{k, v}, chunks -> attr(chunks, k, v)
end)
end
defp build_attrs(chunks, attrs) do
if Macro.validate(attrs) do
quote do
Enum.reduce(unquote(attrs), unquote(chunks), fn
{_, nil}, chunks -> chunks
{_, false}, chunks -> chunks
{k, true}, chunks -> Marker.Compiler.enabled_attr(chunks, k)
{k, v}, chunks -> Marker.Compiler.attr(chunks, k, v)
end)
end
else
chunks
end
end
@spec attr(chunks, atom, Marker.Encoder.t()) :: chunks
def attr(chunks, field, value) do
field = attr_field(field)
case Marker.Encoder.encode(value) do
string when is_binary(string) ->
add_chunk(chunks, "#{field}='#{string}'")
expr ->
add_chunk(chunks, attr_resolver(field, expr))
end
end
@spec enabled_attr(chunks, atom) :: chunks
def enabled_attr(chunks, field) do
add_chunk(chunks, attr_field(field))
end
defp attr_field(field) do
case Atom.to_string(field) do
"_" <> field -> " data-" <> field
field -> " " <> field
end
end
# Helpers
entity_map = %{
"&" => "&",
"<" => "<",
">" => ">",
"\"" => """,
"'" => "'"
}
for {char, entity} <- entity_map do
defp escape(unquote(char) <> rest, acc) do
escape(rest, unquote(entity) <> acc)
end
end
defp escape(<<char::utf8, rest::binary>>, acc) do
escape(rest, acc <> <<char::utf8>>)
end
defp escape("", acc) do
acc
end
defp add_chunk([acc | rest], chunk) when is_binary(acc) and is_binary(chunk) do
rest ++ [acc <> chunk]
end
defp add_chunk(chunks, chunk) when is_binary(chunk) do
if :ok == Macro.validate(chunks) do
quote do
unquote(chunks) ++ [unquote(chunk)]
end
else
chunks ++ [chunk]
end
end
defp add_chunk(chunks, {:safe, expr}) do
if :ok == Macro.validate(chunks) do
quote do
unquote(chunks) ++ [expr]
end
else
chunks ++ [expr]
end
end
defp add_chunk(chunks, chunk) do
if :ok == Macro.validate(chunks) do
quote do
expr = Marker.Encoder.encode(unquote(chunk))
unquote(chunks) ++ [expr]
end
else
expr = quote do: Marker.Encoder.encode(unquote(chunk))
chunks ++ [expr]
end
end
defp to_result([string]) when is_binary(string) do
{:safe, string}
end
defp to_result(chunks) do
if Macro.validate(chunks) == :ok do
{:safe, quote(do: {:safe, Marker.Compiler.concat(unquote(chunks))})}
else
{:safe, concat(chunks)}
end
end
def concat(buffer) do
Enum.reduce(buffer, fn chunk, acc ->
quote do
unquote(acc) <> unquote(chunk)
end
end)
end
# defmacro concat(buffer) do
# Enum.reduce(buffer, fn chunk, acc ->
# quote do
# unquote(acc) <> unquote(chunk)
# end
# end)
# end
# defp to_result(chunks) do
# if Macro.validate(chunks) == :ok do
# {:safe,
# quote do
# {:safe,
# Enum.reduce(unquote(chunks), fn chunk, acc ->
# # unquote_splicing()
# unquote(acc) <> unquote(chunk)
# end)}
# end}
# else
# {:safe, concat(chunks)}
# end
# end
def expand(arg, env) do
Macro.prewalk(arg, &Macro.expand_once(&1, env))
end
defp attr_resolver(field, expr) do
{:safe,
quote do
case unquote(expr) do
nil -> ""
false -> ""
true -> unquote(field)
value -> unquote(field) <> "='" <> Marker.Encoder.encode(value) <> "'"
end
end}
end
end
|
lib/marker/compiler.ex
| 0.724091 | 0.693135 |
compiler.ex
|
starcoder
|
defmodule Swagger.Schema.Operation do
@moduledoc """
An Operation defines a specific action one can take against an API.
An Operation contains all of the information necessary to execute a request to
perform that action, such as what parameters it requires, what content types it
will accept and return, the specification of it's responses, what security it
requires, and more.
"""
alias Swagger.Schema.{Utils, Parameter}
defstruct name: nil,
tags: [],
summary: nil,
description: nil,
id: nil,
produces: [],
consumes: [],
parameters: %{},
responses: %{},
schemes: [],
deprecated?: false,
security: nil,
properties: %{}
@type mime_type :: String.t
@type schema :: Map.t
@type response :: :default | pos_integer
@type t :: %__MODULE__{
name: String.t, id: String.t,
tags: [String.t],
summary: String.t, description: String.t,
produces: [mime_type], consumes: [mime_type],
parameters: %{String.t => Parameter.t},
responses: %{response => schema},
schemes: [String.t],
deprecated?: boolean,
security: String.t,
properties: Map.t
}
use Swagger.Access
def from_schema(name, %{"operationId" => id} = op) do
case extract_parameters(op) do
{:error, _} = err ->
err
params ->
res = %__MODULE__{name: name, id: id}
|> Map.put(:tags, Map.get(op, "tags", []))
|> Map.put(:summary, Map.get(op, "summary", "No summary"))
|> Map.put(:description, Map.get(op, "description", "No description"))
|> Map.put(:produces, Utils.extract_media_types(op, "produces"))
|> Map.put(:consumes, Utils.extract_media_types(op, "consumes"))
|> Map.put(:parameters, params)
|> Map.put(:responses, extract_responses(op))
|> Map.put(:schemes, Map.get(op, "schemes", []))
|> Map.put(:deprecated?, Map.get(op, "deprecated", false))
|> Map.put(:security, extract_security(op))
|> Map.put(:properties, Utils.extract_properties(op))
{:ok, res}
end
end
defp extract_security(%{"security" => security_requirements}) when is_list(security_requirements) do
security_requirements
|> Enum.flat_map(fn req -> Enum.into(req, []) end)
|> Enum.into(%{})
end
defp extract_security(_), do: %{}
defp extract_parameters(%{"parameters" => parameters}) when is_list(parameters) do
Enum.reduce(parameters, %{}, fn
_, {:error, _} = err ->
err
p, acc ->
case Parameter.from_schema(p) do
{:error, _} = err ->
err
{:ok, param} ->
Map.put(acc, param.name, param)
end
end)
end
defp extract_parameters(_), do: %{}
defp extract_responses(%{"responses" => responses}) when is_map(responses) do
Enum.reduce(responses, %{}, fn
{"default", %{"schema" => schema}}, acc ->
Map.put(acc, :default, schema)
{http_status, %{"schema" => schema}}, acc ->
Map.put(acc, String.to_integer(http_status), schema)
{http_status, _}, acc ->
Map.put(acc, String.to_integer(http_status), nil)
end)
end
defp extract_responses(_), do: %{}
end
|
lib/schema/operation.ex
| 0.729616 | 0.40642 |
operation.ex
|
starcoder
|
defmodule Cldr.Gettext.Interpolation do
@moduledoc """
As of [Gettext 0.19](https://hex.pm/packages/gettext/0.19.0), `Gettext`
supports user-defined [interpolation modules](https://hexdocs.pm/gettext/Gettext.html#module-backend-configuration).
This makes it easy to combine the power of ICU message formats with the
broad `gettext` ecosystem and the inbuilt support for `gettext`
in [Phoenix](https://hex.pm/packages/phoenix).
The documentation for [Gettext](https://hexdocs.pm/gettext/Gettext.html#content)
should be followed with considerations in mind:
1. A Gettext backend module should use the `:interpolation` option
defined referring to the `ex_cldr_messages` backend you have defined.
2. The message format is in the ICU message format (instead of the Gettext format).
### Defining a Gettext Interpolation Module
Any [ex_cldr](https://hex.pm/packages/ex_cldr) [backend module](https://hexdocs.pm/ex_cldr/readme.html#backend-module-configuration) that has a `Cldr.Message` provider configured can be used as an interpolation module. Here is an example:
```elixir
# CLDR backend module
defmodule MyApp.Cldr do
use Cldr,
locales: ["en", "fr", "ja", "he", "th", "ar"],
default_locale: "en",
providers: [Cldr.Number, Cldr.DateTime, Cldr.Unit, Cldr.List, Cldr.Calendar, Cldr.Message],
gettext: MyApp.Gettext,
message_formats: %{
USD: [format: :long]
}
end
# Define an interpolation module for ICU messages
defmodule MyApp.Gettext.Interpolation do
use Cldr.Gettext.Interpolation, cldr_backend: MyApp.Cldr
end
# Define a gettext module with ICU message interpolation
defmodule MyApp.Gettext do
use Gettext, otp_app: :ex_cldr_messages, interpolation: MyApp.Gettext.Interpolation
end
```
Now you can proceed to use `Gettext` in the normal manner, most
typically with the `gettext/3` macro.
"""
defmacro __using__(opts \\ []) do
backend = Keyword.get_lazy(opts, :cldr_backend, &Cldr.default_backend!/0)
quote do
@behaviour Gettext.Interpolation
@icu_format "icu-format"
@impl Gettext.Interpolation
def runtime_interpolate(message, bindings) when is_binary(message) do
options = [backend: unquote(backend), locale: Cldr.get_locale(unquote(backend))]
Cldr.Message.Backend.gettext_interpolate(message, bindings, options)
end
@impl Gettext.Interpolation
defmacro compile_interpolate(_translation_type, message, bindings) do
alias Cldr.Message.Parser
alias Cldr.Message.Backend
backend = unquote(backend)
message = Backend.expand_to_binary!(message, __CALLER__)
case Cldr.Message.Parser.parse(message) do
{:ok, parsed_message} ->
Backend.validate_bindings!(parsed_message, bindings)
static_bindings = Backend.static_bindings(bindings)
Backend.quoted_message(parsed_message, backend, bindings, static_bindings)
{:error, {exception, reason}} ->
raise exception, reason
end
end
@impl Gettext.Interpolation
def message_format do
@icu_format
end
end
end
end
|
lib/cldr/gettext/interpolation.ex
| 0.793746 | 0.79538 |
interpolation.ex
|
starcoder
|
defmodule Monad.Maybe do
use Monad
use Monad.Pipeline
@moduledoc """
The Maybe monad.
The `Maybe` monad encapsulates an optional value. A `maybe` monad either
contains a value `x` (represented as "`{:just, x}`") or is empty (represented
as "`:nothing`").
`Maybe` can be used a simple kind of error monad, where all errors are
represented by `:nothing`.
## Examples
iex> require Monad.Maybe, as: Maybe
iex> Maybe.m do
...> x <- {:just, 1}
...> y <- {:just, 2}
...> return x + y
...> end
{:just, 3}
iex> require Monad.Maybe, as: Maybe
iex> Maybe.m do
...> x <- {:just, 1}
...> y <- :nothing
...> return x + y
...> end
:nothing
"""
@type maybe_m :: {:just, any} | :nothing
## Monad implementations
@spec bind(maybe_m, (any -> maybe_m)) :: maybe_m
@doc """
Bind the value inside Maybe monad `m` to function `f`.
Note that the computation shortcircuits if `m` is `:nothing`.
"""
def bind(m, f)
def bind({:just, x}, f), do: f.(x)
def bind(:nothing, _), do: :nothing
@doc """
Inject `x` into a Maybe monad, i.e. returns `{:just, x}`.
"""
@spec return(any) :: maybe_m
def return(x), do: {:just, x}
## Auxiliary functions
@doc """
Signal failure, i.e. returns `:nothing`.
"""
@spec fail(any) :: maybe_m
def fail(msg)
def fail(_), do: :nothing
@doc """
Call function `f` with `x` if `m` is `{:just, x}`, otherwise call function `f`
with default value `d`.
"""
@spec maybe(any, (any -> any), maybe_m) :: any
def maybe(d, f, m)
def maybe(_, f, {:just, x}), do: f.(x)
def maybe(d, f, :nothing), do: f.(d)
@doc """
Returns true if given `{:just, x}` and false if given `:nothing`.
"""
@spec is_just(maybe_m) :: boolean
def is_just({:just, _}), do: true
def is_just(:nothing), do: false
@doc """
Returns true if given `:nothing` value and false if given `{:just, x}`.
"""
@spec is_nothing(maybe_m) :: boolean
def is_nothing(:nothing), do: true
def is_nothing({:just, _}), do: false
@doc """
Extracts value `x` out of `{:just, x}` or raises an error if given `:nothing`.
"""
@spec from_just(maybe_m) :: any
def from_just(m)
def from_just({:just, x}), do: x
def from_just(:nothing), do: raise "Monad.Maybe.from_just: :nothing"
@doc """
Extracts value `x` out of `{:just, x}` or returns default `d` if given
`:nothing`.
"""
@spec from_maybe(any, maybe_m) :: any
def from_maybe(d, m)
def from_maybe(_, {:just, x}), do: x
def from_maybe(d, :nothing), do: d
@doc """
Converts maybe value `m` to a list.
Returns an empty list if given `:nothing` or returns a list `[x]` if given
`{:just, x}`.
## Examples
iex> maybe_to_list :nothing
[]
iex> maybe_to_list {:just, 42}
[42]
"""
@spec maybe_to_list(maybe_m) :: [any]
def maybe_to_list(m)
def maybe_to_list({:just, x}), do: [x]
def maybe_to_list(:nothing), do: []
@doc """
Converts list `l` to a maybe value.
Returns `:nothing` if given the empty list; returns `{:just, x}` when given
the nonempty list `l`, where `x` is the head of `l`.
## Examples
iex> list_to_maybe []
:nothing
iex> list_to_maybe [1, 2, 3]
{:just, 1}
"""
@spec list_to_maybe([any]) :: maybe_m
def list_to_maybe(l)
def list_to_maybe([x | _]), do: {:just, x}
def list_to_maybe([]), do: :nothing
@doc """
Takes a list of `maybe`s and returns a list of all the `just` values.
## Example
iex> cat_maybes [{:just, 1}, :nothing, {:just, 2}, :nothing, {:just, 3}]
[1, 2, 3]
"""
@spec cat_maybes([maybe_m]) :: [any]
def cat_maybes(l) do
for x <- l, is_just(x), do: from_just x
end
@doc """
Map function `f` over the list `l` and throw out elements for which `f`
returns `:nothing`.
"""
@spec map_maybes((any -> maybe_m), [any]) :: [any]
def map_maybes(f, l) do
for x <- l, is_just(f.(x)), do: from_just f.(x)
end
end
|
lib/monad/maybe.ex
| 0.905982 | 0.536252 |
maybe.ex
|
starcoder
|
defmodule Day17 do
def readinput() do
File.read!("17.input.txt")
|> String.split("\n", trim: true)
|> Enum.map(&String.split(&1, "", trim: true))
end
def neighbors3({x, y, z}) do
for i <- -1..1 do
for j <- -1..1 do
for k <- -1..1 do
{x + i, y + j, z + k}
end
end
end
|> List.flatten()
# we have to return the neighbors of a point, so we explicitly
# remove the point from the output
|> Enum.reject(fn {a, b, c} -> a == x and b == y and c == z end)
end
def neighbors4({x, y, z, w}) do
for i <- -1..1 do
for j <- -1..1 do
for k <- -1..1 do
for l <- -1..1 do
{x + i, y + j, z + k, w + l}
end
end
end
end
|> List.flatten()
|> Enum.reject(fn {a, b, c, d} -> a == x and b == y and c == z and d == w end)
end
def activeneighbors(state, neighborfn, point) do
Enum.count(neighborfn.(point), &Map.has_key?(state, &1))
end
def cycle(state, _, 0) do
state
|> Map.values()
|> Enum.count(fn cell -> cell == "#" end)
end
def cycle(state, neighborfn, count) do
# find the neighbors of active cells
tocheck =
state
|> Map.keys()
|> Enum.flat_map(neighborfn)
|> Enum.frequencies()
Enum.map(tocheck, fn {point, _} ->
cell = Map.get(state, point)
countactive = activeneighbors(state, neighborfn, point)
cond do
cell == "#" and countactive not in [2, 3] -> {point, :inactive}
countactive == 3 -> {point, :active}
true -> nil
end
end)
|> Enum.filter(& &1)
|> Enum.reduce(state, fn {p, newstate}, acc ->
case newstate do
:inactive -> Map.delete(acc, p)
:active -> Map.put(acc, p, "#")
end
end)
# why does this happen?
|> Enum.reject(fn {point, _} -> !Map.get(tocheck, point) end)
|> Map.new()
|> cycle(neighborfn, count - 1)
end
def part1(input \\ readinput()) do
rows = length(input)
cols = length(Enum.at(input, 0))
for c <- 0..(cols - 1) do
for r <- 0..(rows - 1) do
{{c, r, 0}, Enum.at(input, r) |> Enum.at(c)}
end
|> Enum.filter(fn {_, v} -> v == "#" end)
|> Enum.into(%{})
end
# i don't like this
|> Enum.reduce(%{}, fn m, acc -> Map.merge(acc, m) end)
|> cycle(&neighbors3/1, 6)
end
def part2(input \\ readinput()) do
rows = length(input)
cols = length(Enum.at(input, 0))
for c <- 0..(cols - 1) do
for r <- 0..(rows - 1) do
{{c, r, 0, 0}, Enum.at(input, r) |> Enum.at(c)}
end
|> Enum.filter(fn {_, v} -> v == "#" end)
|> Enum.into(%{})
end
# i don't like this
|> Enum.reduce(%{}, fn m, acc -> Map.merge(acc, m) end)
|> cycle(&neighbors4/1, 6)
end
end
|
2020/day17/lib/day17.ex
| 0.528047 | 0.548915 |
day17.ex
|
starcoder
|
defmodule DublinBusTelegramBot.Commands do
require Logger
import Meter
@as_markdown [{:parse_mode, "Markdown"}]
defmeter start(chat_id) do
Nadia.send_message(chat_id, "
Welcome to the Dublin Bus bot:
Access to the *Real Time Passenger Information (RTPI)* for Dublin Bus services. Data are retrieved parsing the still-in-development RTPI site. The html could change without notice and break the API, we don't take any responsibility for missed bus. The bot is precise as the dublin bus application or the screen at the stops.
_This service is in no way affiliated with Dublin Bus or the providers of the RTPI service_.
Available commands
/stop <stop number>
Retrieve upcoming timetable at this stop
``` /stop 4242```
/watch <stop number> <line>
Send you a message every minute with ETA of the bus at the stop. It stop after the bus is Due or until command unwatch is sent. Only one watch at time is possible.
``` /watch 4242 184```
/unwatch
Stop watch
``` /unwatch```
/search <query>
Search stops that match the name, if only one result is found it send also the timetable.
``` /search Townsend Street```
/info
Return some info about the bot
``` /info```
", @as_markdown)
%{}
end
defmeter(stop(chat_id, stop, update), do: handle_stop(chat_id, stop, update))
defp handle_stop(chat_id, "IL" <> stop, %{callback_query: %{message: %{message_id: message_id}}}),
do: stop_update_message(chat_id, stop, message_id)
defp handle_stop(chat_id, "IL" <> stop, %{
"callback_query" => %{"message" => %{"message_id" => message_id}}
}),
do: stop_update_message(chat_id, stop, message_id)
defp handle_stop(chat_id, stop, _) do
stop
|> Stop.get_info()
|> send_timetable(chat_id, stop)
end
defp stop_update_message(chat_id, stop, message_id) do
{text, options} =
stop
|> Stop.get_info()
|> timetable(stop)
{:ok, _} =
Nadia.API.request(
"editMessageText",
[chat_id: chat_id, message_id: message_id, text: text] ++ options
)
end
defmeter info(chat_id) do
apps = Application.loaded_applications()
{_, _, app_version} = List.keyfind(apps, :dublin_bus_telegram_bot, 0)
{_, _, api_version} = List.keyfind(apps, :dublin_bus_api, 0)
Nadia.send_message(
chat_id,
"""
Bot version: *#{app_version}*
API version: *#{api_version}*
API last time checked: *#{Stop.last_time_checked_formatted()}*
Bot icon made by Baianat from www.flaticon.com
""",
@as_markdown
)
Stop.last_time_checked_formatted()
%{}
end
defmeter watch(chat_id, stop, line) do
job = %Quantum.Job{
schedule: "* * * * *",
task: fn -> send_short_message(chat_id, stop, line) end
}
Quantum.add_job(chat_id, job)
Nadia.send_message(chat_id, "Watch set", [{:reply_markup, %{keyboard: [["/unwatch"]]}}])
send_short_message(chat_id, stop, line)
%{}
end
defmeter unwatch(chat_id) do
Quantum.delete_job(chat_id)
%{}
end
defp join_line({line, destination}), do: "#{line} #{destination}"
defp join_stop(stop) do
lines =
stop.lines
|> Enum.map(&join_line/1)
|> Enum.join("\n")
"** #{stop.ref} - #{stop.name} \n #{lines}"
end
defmeter search(chat_id, q) do
data = Stop.search(q)
case length(data) do
1 ->
Nadia.send_message(chat_id, "Search return only 1 result, here is the timetable")
[stop] = data
send_timetable(stop, chat_id, stop.ref)
x ->
Nadia.send_message(chat_id, "Search return #{x} results")
message =
data
|> Enum.map(&join_stop/1)
|> Enum.join("\n")
Nadia.send_message(chat_id, "```\n#{message}```", @as_markdown)
end
data
end
defmeter not_implemented(chat_id, command) do
Nadia.send_message(chat_id, "Not yet implemented")
warn =
"#{command} not yet implemented"
|> Logger.warn()
%{warn: warn}
end
defp to_button(text) when is_binary(text), do: %{text: text, callback_data: text}
defp to_button(button), do: button
defp timetable(data, stop) do
title = "*#{stop} - #{data.name}*\n"
timetable =
data.timetable
|> Enum.map(&to_line/1)
|> Enum.join("\n")
keyboard =
[
["/stop #{stop}", %{text: "refresh π", callback_data: "/stop IL#{stop}"}]
| data.timetable
|> Enum.map(& &1.line)
|> Enum.uniq()
|> Enum.sort()
|> Enum.map(&"/watch #{stop} #{&1}")
|> Enum.chunk(3, 3, [])
]
|> Enum.map(fn r -> Enum.map(r, &to_button/1) end)
{title <> "```\n#{timetable}```",
@as_markdown ++
[
{:reply_markup, %{inline_keyboard: keyboard}}
]}
end
defp send_timetable(data, chat_id, stop) do
{text, options} = timetable(data, stop)
{:ok, _} = Nadia.send_message(chat_id, text, options)
data
end
defp send_short_message(chat_id, stop, line) do
data = Stop.get_info(stop)
row =
data.timetable
|> Enum.find(fn row -> row.line == line end)
if row == nil || row.time == "Due" do
Quantum.delete_job(chat_id)
Logger.info("[#{chat_id}] Remove watch stop #{stop} line #{line}")
end
if row != nil do
Nadia.send_message(chat_id, "```#{row |> to_line}```", @as_markdown)
end
end
defp to_line(%{time: time, line: line, direction: direction})
when line == "Red" or line == "Green" do
time = String.rjust(time, 9)
"#{time} | #{direction}"
end
defp to_line(%{time: time, line: line, direction: direction}) do
line = String.rjust(line, 5)
"#{line} | #{time}"
end
end
|
lib/commands.ex
| 0.554953 | 0.549157 |
commands.ex
|
starcoder
|
defmodule ForgeAbi.Util.TypeUrl do
@moduledoc """
quick convertion among type, type_url and type_mod
"""
require Logger
defmodule DummyCodec do
@moduledoc false
def encode(data), do: data
def decode(data), do: data
end
defmodule JsonCodec do
@moduledoc false
def encode(data), do: Jason.encode!(data)
def decode(data), do: Jason.decode!(data)
end
alias Google.Protobuf.Any
@table_name :forge_abi
@base_types [
# forge tx
{"fg:t:declare", ForgeAbi.DeclareTx},
{"fg:t:deploy_protocol", ForgeAbi.DeployProtocolTx},
# forge state
{"fg:s:account", ForgeAbi.AccountState},
{"fg:s:asset", ForgeAbi.AssetState},
{"fg:s:blacklist", ForgeAbi.BlacklistState},
{"fg:s:forge", ForgeAbi.ForgeState},
{"fg:s:stake", ForgeAbi.StakeState},
{"fg:s:statistics", ForgeAbi.StatisticsState},
{"fg:s:protocol", ForgeAbi.ProtocolState},
{"fg:s:root", ForgeAbi.RootState},
{"fg:s:swap", ForgeAbi.SwapState},
{"fg:s:delegate", ForgeAbi.DelegateState},
{"fg:s:asset_factory_state", ForgeAbi.AssetFactoryState},
# other type url
{"fg:x:block_info", ForgeAbi.BlockInfo},
{"fg:x:tx", ForgeAbi.Transaction},
{"fg:x:tx_info", ForgeAbi.TransactionInfo},
{"fg:x:tx_status", ForgeAbi.TxStatus},
{"fg:x:withdraw_item", ForgeAbi.WithdrawItem},
# forge tx
# account
{"fg:t:account_migrate", ForgeAbi.AccountMigrateTx},
{"fg:t:declare", ForgeAbi.DeclareTx},
{"fg:t:delegate", ForgeAbi.DelegateTx},
{"fg:t:revoke_delegate", ForgeAbi.RevokeDelegateTx},
{"fg:t:update_asset", ForgeAbi.UpdateAssetTx},
# asset
{"fg:t:acquire_asset", ForgeAbi.AcquireAssetTx},
{"fg:t:consume_asset", ForgeAbi.ConsumeAssetTx},
{"fg:t:create_asset", ForgeAbi.CreateAssetTx},
{"fg:x:asset_factory", ForgeAbi.AssetFactory},
# governance
{"fg:t:update_consensus_params", ForgeAbi.UpdateConsensusParamsTx},
{"fg:t:update_validator", ForgeAbi.UpdateValidatorTx},
{"fg:t:upgrade_node", ForgeAbi.UpgradeNodeTx},
# misc
{"fg:t:poke", ForgeAbi.PokeTx},
{"fg:t:refuel", ForgeAbi.RefuelTx},
# atomic swap
{"fg:t:retrieve_swap", ForgeAbi.RetrieveSwapTx},
{"fg:t:revoke_swap", ForgeAbi.RevokeSwapTx},
{"fg:t:setup_swap", ForgeAbi.SetupSwapTx},
# token swap
{"fg:t:approve_withdraw", ForgeAbi.ApproveWithdrawTx},
{"fg:t:deposit_token", ForgeAbi.DepositTokenTx},
{"fg:t:revoke_withdraw", ForgeAbi.RevokeWithdrawTx},
{"fg:t:withdraw_token", ForgeAbi.WithdrawTokenTx},
# trade
{"fg:t:exchange", ForgeAbi.ExchangeTx},
{"fg:t:transfer", ForgeAbi.TransferTx},
# dummy codec
{"fg:x:address", DummyCodec},
{"fg:x:json", JsonCodec}
]
@doc """
Add a type url or a list of type urls to the table
"""
@spec add([{String.t(), module()}]) :: :ok
def add(items) when is_list(items), do: Enum.each(items, &add/1)
@spec add({String.t(), module()}) :: :ok
def add({type_url, mod}), do: add(type_url, mod)
@spec add(String.t(), module()) :: :ok
def add(type_url, mod) do
:ets.insert(@table_name, {type_url, mod})
:ets.insert(@table_name, {mod, type_url})
:ok
end
@doc """
Initialize the table with prepopulated data
"""
@spec init() :: :ok
def init do
:ets.new(@table_name, [:named_table, :public, read_concurrency: true])
add(@base_types)
end
@doc """
Remove a type_url from the table
"""
@spec remove(String.t()) :: :ok
def remove(type_url) do
:ets.delete(@table_name, type_url)
:ok
end
@doc """
retrieve all types for introspection.
"""
@spec all :: Enumerable.t()
def all do
fn -> :ets.first(@table_name) end
|> Stream.resource(
fn
:"$end_of_table" -> {:halt, nil}
previous_key -> {[previous_key], :ets.next(@table_name, previous_key)}
end,
fn _ -> :ok end
)
|> Enum.map(fn key -> List.first(:ets.lookup(@table_name, key)) end)
end
@doc """
retrieve all urls for introspection.
"""
@spec all :: Enumerable.t()
def all_urls do
fn -> :ets.first(@table_name) end
|> Stream.resource(
fn
:"$end_of_table" -> {:halt, nil}
previous_key -> {[previous_key], :ets.next(@table_name, previous_key)}
end,
fn _ -> :ok end
)
|> Enum.filter(fn key -> is_binary(key) and String.starts_with?(key, "fg:t") end)
end
@doc """
Retrieve mod by type_url
iex> ForgeAbi.Util.TypeUrl.add("fg:t:declare", ForgeAbi.DeclareTx)
iex> ForgeAbi.Util.TypeUrl.get("fg:t:declare")
ForgeAbi.DeclareTx
iex> ForgeAbi.Util.TypeUrl.add("fg:s:account", ForgeAbi.AccountState)
iex> ForgeAbi.Util.TypeUrl.get("fg:s:account")
ForgeAbi.AccountState
"""
@spec get(String.t() | module() | nil) :: module() | String.t() | nil
def get(nil), do: nil
def get(key) do
result = :ets.lookup(@table_name, key)
case length(result) !== 1 do
true -> nil
_ -> result |> List.first() |> elem(1)
end
end
@doc """
Decode the binary inside the Any.
"""
@spec decode_any(Any.t() | nil) :: {:error, term()} | {:ok, any()}
def decode_any(nil), do: {:error, :noent}
def decode_any(%{type_url: type_url, value: value}) do
case get(type_url) do
nil ->
Logger.debug("Failed to find #{type_url}.")
{:error, :noent}
mod ->
{:ok, mod.decode(value)}
end
rescue
e ->
Logger.warn("Failed to decode data: Error: #{inspect(e)}")
{:error, :invalid_data}
end
@doc """
Decode the binary inside the Any. Raise if error.
"""
@spec decode_any!(Any.t() | nil) :: any() | no_return()
def decode_any!(any) do
case decode_any(any) do
{:error, reason} -> raise reason
{:ok, data} -> data
end
end
@doc """
Encode a struct and wrap it with Any.
"""
@spec encode_any(map(), String.t() | nil) :: {:ok, Any.t()} | {:error, term()}
def encode_any(data, type_url \\ nil)
def encode_any(data, nil) do
type = data.__struct__
case get(type) do
nil ->
Logger.debug("Failed to find #{inspect(type)}.")
{:error, :noent}
type_url ->
encode_any(data, type_url)
end
rescue
e ->
Logger.warn("Failed to get type_url for data: Error: #{inspect(e)}")
{:error, :invalid_data}
end
def encode_any(data, type_url) do
case get(type_url) do
nil ->
Logger.warn("Failed to find #{type_url}.")
{:error, :noent}
mod ->
{:ok, Any.new(type_url: type_url, value: mod.encode(data))}
end
rescue
e ->
Logger.warn("Failed to encode data: Error: #{inspect(e)}")
{:error, :invalid_data}
end
@doc """
Encode a struct and wrap it with Any. Throw exception on error.
"""
@spec encode_any!(map(), String.t() | nil) :: Any.t() | no_return()
def encode_any!(data, type_url \\ nil) do
case encode_any(data, type_url) do
{:ok, result} -> result
{:error, reason} -> raise "#{inspect(reason)}"
end
end
end
|
lib/forge_abi/util/type_url.ex
| 0.767429 | 0.683526 |
type_url.ex
|
starcoder
|
defmodule ExUnit do
defrecord Test, [:name, :case, :failure, :invalid] do
@moduledoc """
A record that keeps information about the test.
It is received by formatters and also accessible
in the metadata under the key `:test`.
"""
end
defrecord TestCase, [:name, :failure] do
@moduledoc """
A record that keeps information about the test case.
It is received by formatters and also accessible
in the metadata under the key `:case`.
"""
end
@moduledoc """
Basic unit testing framework for Elixir.
## Example
A basic setup for ExUnit is shown below:
# File: assertion_test.exs
# 1) Start ExUnit. You could also pass some options to the start function
# (see `configure/1` for the list of options)
ExUnit.start
# 2) Create a new test module (or "case") and use ExUnit.Case
defmodule AssertionTest do
# 3) Notice we pass async: true, this runs the test case
# concurrently with other test cases
use ExUnit.Case, async: true
# 4) A test is a function whose name starts with
# test and receives a context
def test_always_pass(_) do
assert true
end
# 5) It is recommended to use the test macro instead of def
test "the truth" do
assert true
end
end
To run the test above, all you need to do is to run the file
using elixir from command line. Assuming you named your file
assertion_test.exs, you can run it as:
bin/elixir assertion_test.exs
## Case, callbacks and assertions
See `ExUnit.Case` and `ExUnit.Callbacks` for more information about
defining test cases.
The `ExUnit.Assertions` module contains a set of macros to easily
generate assertions with appropriate error messages.
## Integration with Mix
Mix is the project management and build tool for Elixir. Invoking `mix test`
from the command line will run tests in each file matching the pattern
"*_test.exs" found in the `test` directory of your project.
By convention, you could also create a test_helper.exs file inside the
`test` directory and put the code common to all tests there.
The minimum example of a test_helper.exs file would be:
# test/test_helper.exs
ExUnit.start
Then, in each test file, require test_helper.exs before defining test modules
(or cases):
# test/myproject_test.exs
Code.require_file "test_helper.exs", __DIR__
# ... test cases follow
"""
use Application.Behaviour
@doc false
def start(_type, []) do
ExUnit.Sup.start_link([])
end
@doc """
Starts up ExUnit and automatically set it up to run
tests at the VM exit. It accepts a set of options to
configure `ExUnit` (the same ones accepted by `configure/1`).
In case you want to run tests manually, skip calling this
function and rely on `configure/1` and `run/0` instead.
"""
def start(options // []) do
:application.start(:elixir)
:application.start(:ex_unit)
configure(options)
ExUnit.Server.start_load
System.at_exit fn
0 ->
failures = ExUnit.run
System.at_exit fn _ ->
if failures > 0, do: System.halt(1), else: System.halt(0)
end
_ ->
:ok
end
end
@doc """
Configures ExUnit.
## Options
ExUnit supports the following options given to start:
* `:formatter` - The formatter that will print results.
Defaults to `ExUnit.CLIFormatter`;
* `:max_cases` - Maximum number of cases to run in parallel.
Defaults to `:erlang.system_info(:schedulers_online)`;
"""
def configure(options) do
ExUnit.Server.merge_options(options)
end
@doc """
API used to run the tests. It is invoked automatically
if ExUnit is started via `ExUnit.start`.
Returns the number of failures.
"""
def run do
{ async, sync, options, load_us } = ExUnit.Server.start_run
ExUnit.Runner.run async, sync, options, load_us
end
end
|
lib/ex_unit/lib/ex_unit.ex
| 0.832951 | 0.826677 |
ex_unit.ex
|
starcoder
|
defmodule CouchGears.App do
@moduledoc """
A CouchGears tries to load each application from `apps/*` directory.
Check the `CouchGears.Initializer` module for more details.
Developers can use module functions to configure execution environment.
## Application (aka gear)
This is a main module for gear application.
Actually, it's a simple wrapper around `Dynamo` module.
Check the `Dynamo` module for examples and documentation.
"""
@doc false
defmacro __using__(_) do
quote location: :keep do
use Dynamo
import unquote(__MODULE__)
end
end
@doc """
Applies the environment specific block for application.
## Examples
environment "dev" do
config :dynamo, compile_on_demand: true, reload_modules: true
end
It sets `compile_on_demand: true, reload_modules: true` opts for application which
started in the `dev` environment.
"""
defmacro environment(name, contents) when is_binary(name) do
if CouchGears.env == name, do: contents
end
defmacro environment(reg, contents) do
quote do: if Regex.match?(unquote(reg), CouchGears.env), do: unquote(contents)
end
@doc false
def normalize_config(config) when is_list(config) do
unless config[:handlers] do
config = [handlers: :undefined]
else
config = normalize_global_opts(config)
config = normalize_dbs_opts(config)
end
config
end
@doc false
def normalize_config(nil), do: normalize_config([])
@doc false
def normalize_config(app) do
normalize_config(app.config[:gear])
end
defp normalize_global_opts(config) do
global = config[:handlers][:global]
global_should_be_false = fn(config) ->
Keyword.put(config, :handlers, Keyword.put(config[:handlers], :global, false))
end
unless global, do: config = global_should_be_false.(config)
unless is_boolean(global), do: config = global_should_be_false.(config)
config
end
defp normalize_dbs_opts(config) do
dbs = config[:handlers][:dbs]
unless is_list(dbs) do
unless dbs == :all do
config = Keyword.put(config, :handlers, Keyword.put(config[:handlers], :dbs, []))
end
end
if is_list(dbs) do
dbs = Enum.map dbs, fn(db) ->
cond do
is_binary(db) -> binary_to_atom(db)
is_atom(db) -> db
end
end
config = Keyword.put(config, :handlers, Keyword.put(config[:handlers], :dbs, dbs))
end
config
end
end
|
lib/couch_gears/app.ex
| 0.767603 | 0.412175 |
app.ex
|
starcoder
|
defmodule Mechanize.Page do
@moduledoc """
The HTML Page.
This module defines `Mechanize.Page` and the main functions for working with Pages.
The Page is created as a result of a successful HTTP request.
```
alias Mechanize.{Browser, Page}
browser = Browser.new()
page = Browser.get!(browser, "https://www.example.com")
```
"""
alias Mechanize.{Response, Query, Form}
alias Mechanize.Query.BadQueryError
alias Mechanize.Page.{Link, Element}
defstruct [:response_chain, :status_code, :content, :url, :browser, :parser]
@typedoc """
The HTML Page struct.
"""
@type t :: %__MODULE__{
response_chain: [Response.t()],
status_code: integer(),
content: String.t(),
url: String.t(),
browser: Browser.t(),
parser: module()
}
@typedoc """
A fragment of a page. It is an array of `Mechanize.Page.Element` struct in most of the cases,
but it could be any struct that implements `Mechanize.Page.Elementable` protocol.
"""
@type fragment :: [any]
defmodule ClickError do
@moduledoc """
Raised when an error occurs on a click action.
"""
defexception [:message]
end
defmodule InvalidMetaRefreshError do
@moduledoc """
Raised when Mechanize cannot parse the `content` attribute of a
`<meta http-equiv="refresh" ...>` element inside the page content.
"""
defexception [:message]
end
@doc """
Returns the browser that fetched the `page`.
"""
@spec get_browser(t()) :: Browser.t()
def get_browser(nil), do: raise(ArgumentError, "page is nil")
def get_browser(%__MODULE__{} = page), do: page.browser
@doc """
Returns the `page` url.
"""
@spec get_url(t()) :: String.t()
def get_url(nil), do: raise(ArgumentError, "page is nil")
def get_url(%__MODULE__{} = page), do: page.url
@doc """
Returns the page content.
"""
@spec get_content(t()) :: String.t()
def get_content(%__MODULE__{} = page), do: page.content
@doc """
Extracts meta-refresh data from a `page`.
A two element tuple with a integer representing the delay in the first position and
the a string representing the URL in the second position will be returned if a
`<meta http-equiv="refresh" ...>` is found, otherwise `nil` will be returned.
Raises `Mechanize.Page.InvalidMetaRefreshError` if Mechanize cannot parse the `content` attribute
of the meta-refresh.
## Example
```
# <meta http-equiv="refresh" content="10; url=https://www.example.com">
{delay, url} = Page.meta_refresh(page)
delay # => 10
url # => https://www.example.com
```
"""
@spec meta_refresh(t()) :: {integer(), String.t()}
def meta_refresh(nil), do: raise(ArgumentError, "page is nil")
def meta_refresh(%__MODULE__{} = page) do
page
|> search("meta[http-equiv=refresh]")
|> List.first()
|> case do
nil ->
nil
meta ->
meta
|> Element.attr(:content)
|> parse_meta_refresh_content(page)
end
end
defp parse_meta_refresh_content(content, page) do
content =
content
|> String.split(";")
|> Enum.map(&String.trim/1)
|> Enum.join(";")
case Regex.scan(~r/^(\d+)(?:;url\s*=\s*(.*))?$/, content) do
[[_, delay, url]] -> {String.to_integer(delay), url}
[[_, delay]] -> {String.to_integer(delay), nil}
_ -> raise InvalidMetaRefreshError, "can't parse meta-refresh content of #{page.url}"
end
end
@doc """
Returns the response headers of a `page`.
In case of Mechanize Browser has followed one or more redirects when `page` was fetched,
the headers returned corresponds to the headers of the last response.
"""
@spec get_headers(t()) :: Header.headers()
def get_headers(%__MODULE__{} = page) do
page
|> get_response()
|> Response.headers()
end
@doc """
Return the response of a `page`.
In case of Mechanize Browser has followed one or more redirects when `page` was fetched,
the response returned correspond to the last respose.
"""
@spec get_response(t()) :: Response.t()
def get_response(%__MODULE__{} = page), do: List.first(page.response_chain)
@doc """
Clicks on a link that matches `query`.
Links are all elements defined by `a` and `area` html tags. In case of more than one link matches
the query, Mechanize will click on the first matched link.
Raises `Mechanize.Page.ClickError` if the matched link has no href attribute.
Raises `Mechanize.Page.BadQueryError` if no link matches with given `query`.
Raises additional exceptions from `Mechanize.Browser.request!/5`.
See `Mechanize.Query` module documentation to know all query capabilities in depth.
## Examples
Click on the first link with text equals to "Back":
```
Page.click_link!(page, "Back")
```
Click on the first link by its "href" attribute:
```
Page.click_link!(page, href: "sun.html")
```
"""
@dialyzer :no_return
@spec click_link!(t() | fragment(), Query.t()) :: t()
def click_link!(page_or_fragment, query) do
page_or_fragment
|> link_with!(query)
|> Link.click!()
end
@doc """
Returns a list containing all links from a page or fragment of a page, or an empty list in
case it has no links.
"""
@spec links(t() | fragment()) :: [Link.t()]
defdelegate links(page_or_fragment), to: __MODULE__, as: :links_with
@doc """
Return the first link matched by `query`.
Nil is returned if no link was matched.
See `Mechanize.Page.links_with/2` for more details about how to query links.
"""
@spec link_with(t() | fragment(), Query.t()) :: Link.t() | nil
def link_with(page_or_fragment, query \\ []) do
page_or_fragment
|> links_with(query)
|> List.first()
end
@doc """
Return the first link matched by `query`.
Raise `Mechanize.Query.BadQueryError` if no link was matched.
See `Mechanize.Page.links_with/2` for more details about how to query links.
"""
@spec link_with!(t() | fragment(), Query.t()) :: Link.t() | nil
def link_with!(page_or_fragment, query \\ []) do
case link_with(page_or_fragment, query) do
nil -> raise BadQueryError, "no link found with given query"
link -> link
end
end
@doc """
Return all links matched by `query`.
An empty list is returned if no link was matched.
See `Mechanize.Query` module documentation to know all query capabilities in depth.
## Examples
Retrieving all links containing "Back" text of `page`:
```
Page.links_with(page, "Back")
```
Retrieving all links by attribute:
```
Page.links_with(page, href: "sun.html")
```
"""
@spec links_with(t() | fragment(), Query.t()) :: [Link.t()]
def links_with(page_or_fragment, query \\ []) do
page_or_fragment
|> elements_with("a, area", query)
|> Enum.map(&Link.new/1)
end
@doc """
Return all links matched by `query`.
Raise `Mechanize.Query.BadQueryError` if no link was matched.
See `Mechanize.Page.links_with/2` for more details about how to query links.
"""
@spec links_with!(t() | fragment(), Query.t()) :: [Link.t()]
def links_with!(page_or_fragment, query \\ []) do
case links_with(page_or_fragment, query) do
[] -> raise BadQueryError, "no link found with given query"
link -> link
end
end
@doc """
Returns the first form in a given page or fragment or nil in case of the given page or fragment
does not have a form.
"""
@spec form(t() | fragment()) :: Form.t() | nil
def form(page_or_fragment) do
page_or_fragment
|> forms()
|> List.first()
end
@doc """
Returns a list containing all forms of a given page or fragment.
In case of a page or fragment does not have a form, returns a empty list.
"""
@spec forms(t() | fragment()) :: [Form.t()]
defdelegate forms(page_or_fragment), to: __MODULE__, as: :forms_with
@doc """
Returns the first form that matches the `query` for the given page or fragment.
In case of no form matches, returns nil instead.
See `Mechanize.Query` module documentation to know all query capabilities in depth.
## Examples
Fetch the first form which name is equal to "login".
```
%Form{} = Page.form_with(page, name: "login")
```
"""
@spec form_with(t() | fragment(), Query.t()) :: Form.t() | nil
def form_with(page_or_fragment, query \\ []) do
page_or_fragment
|> forms_with(query)
|> List.first()
end
@doc """
Returns a list containing all forms matching `query` for the given page or fragment.
In case of no form matches, returns an empty list instead.
See `Mechanize.Query` module documentation to know all query capabilities in depth.
## Examples
Fetch all forms which name is equal to "login".
```
list = Page.forms_with(page, name: "login")
```
"""
@spec forms_with(t() | fragment(), Query.t()) :: [Form.t()]
def forms_with(page_or_fragment, query \\ []) do
page_or_fragment
|> elements_with("form", query)
|> Enum.map(&Form.new(page_or_fragment, &1))
end
@doc """
Search for elements on a given page or fragment using a CSS selector.
A list of `Mechanize.Page.Element` matching the selector will be return. In case of no element
matches the selector, an empty list will be returned instead.
See also `Mechanize.Page.elements_with/3`.
## Example
Printing in console todos of a todo html unordered list:
```
page
|> Page.search("ul.todo > li")
|> Enum.map(&Element.text/1)
|> Enum.each(&IO.puts/1)
```
"""
@spec search(t() | fragment(), String.t()) :: [Element.t()]
defdelegate search(page, selector), to: Query
@doc """
Returns all elements not matching the selector.
A list of `Mechanize.Page.Element` matching the selector will be return. In case of all elements
match the selector, and empty list will be returned instead.
## Example
Removing a unordered list with "todo" class from the content of a page.
```
Page.filter_out(page, "ul.todo > li")
```
"""
@spec filter_out(t() | fragment(), String.t()) :: [Element.t()]
defdelegate filter_out(page, selector), to: Query
@doc """
Search for elements on a given page or fragment both using a CSS selector and queries.
This function is similar to `Mechanize.Page.search/2`, but you can also use the power of
queries combined. First, the function will match the page or the fragments against the
CSS selector, after it will perform a match of the remaining elements to the query. A list of
`Mechanize.Page.Element` will be return. In case of no element both matches the selector and
the query, an empty list will be returned instead.
See `Mechanize.Query` module documentation to know all query capabilities in depth.
## Example
Printing in console todos of a todo html unordered list starting with "A":
```
page
|> Page.elements_with("ul.todo > li", text: ~r/^A/i)
|> Enum.map(&Element.text/1)
|> Enum.each(&IO.puts/1)
```
"""
@spec elements_with(t() | fragment(), String.t(), Query.t()) :: [Element.t()]
defdelegate elements_with(page_or_fragment, selector, query \\ []), to: Query
end
|
lib/mechanize/page.ex
| 0.930703 | 0.818483 |
page.ex
|
starcoder
|
defmodule Phoenix.HTML.Link do
@moduledoc """
Conveniences for working with links and URLs in HTML.
"""
import Phoenix.HTML.Tag
@doc """
Generates a link to the given URL.
## Examples
link("hello", to: "/world")
#=> <a href="/world">hello</a>
link("hello", to: URI.parse("https://elixir-lang.org"))
#=> <a href="https://elixir-lang.org">hello</a>
link("<hello>", to: "/world")
#=> <a href="/world"><hello></a>
link("<hello>", to: "/world", class: "btn")
#=> <a class="btn" href="/world"><hello></a>
link("delete", to: "/the_world", data: [confirm: "Really?"])
#=> <a data-confirm="Really?" href="/the_world">delete</a>
# If you supply a method other than `:get`:
link("delete", to: "/everything", method: :delete)
#=> <a href="/everything" data-csrf="csrf_token" data-method="delete" data-to="/everything">delete</a>
# You can use a `do ... end` block too:
link to: "/hello" do
"world"
end
#=> <a href="/hello">world<a>
## Options
* `:to` - the page to link to. This option is required
* `:method` - the method to use with the link. In case the
method is not `:get`, the link is generated inside the form
which sets the proper information. In order to submit the
form, JavaScript must be enabled
* `:csrf_token` - a custom token to use for links with a method
other than `:get`.
All other options are forwarded to the underlying `<a>` tag.
## Data attributes
Data attributes are added as a keyword list passed to the `data` key.
The following data attributes are supported:
* `data-confirm` - shows a confirmation prompt before
generating and submitting the form when `:method`
is not `:get`.
## CSRF Protection
By default, CSRF tokens are generated through `Plug.CSRFProtection`.
"""
def link(text, opts)
def link(opts, do: contents) when is_list(opts) do
link(contents, opts)
end
def link(_text, opts) when not is_list(opts) do
raise ArgumentError, "link/2 requires a keyword list as second argument"
end
def link(text, opts) do
{to, opts} = pop_required_option!(opts, :to, "expected non-nil value for :to in link/2")
{method, opts} = Keyword.pop(opts, :method, :get)
if method == :get do
# Call link attributes to validate `to`
[data: data] = Phoenix.HTML.link_attributes(to, [])
content_tag(:a, text, [href: data[:to]] ++ Keyword.delete(opts, :csrf_token))
else
{csrf_token, opts} = Keyword.pop(opts, :csrf_token, true)
opts = Keyword.put_new(opts, :rel, "nofollow")
[data: data] = Phoenix.HTML.link_attributes(to, method: method, csrf_token: csrf_token)
content_tag(:a, text, [data: data, href: data[:to]] ++ opts)
end
end
@doc """
Generates a button tag that uses the Javascript function handleClick()
(see phoenix_html.js) to submit the form data.
Useful to ensure that links that change data are not triggered by
search engines and other spidering software.
## Examples
button("hello", to: "/world")
#=> <button class="button" data-csrf="csrf_token" data-method="post" data-to="/world">hello</button>
button("hello", to: "/world", method: :get, class: "btn")
#=> <button class="btn" data-method="get" data-to="/world">hello</button>
## Options
* `:to` - the page to link to. This option is required
* `:method` - the method to use with the button. Defaults to :post.
All other options are forwarded to the underlying button input.
When the `:method` is set to `:get` and the `:to` URL contains query
parameters the generated form element will strip the parameters in accordance
with the [W3C](https://www.w3.org/TR/html401/interact/forms.html#h-17.13.3.4)
form specification.
## Data attributes
Data attributes are added as a keyword list passed to the
`data` key. The following data attributes are supported:
* `data-confirm` - shows a confirmation prompt before generating and
submitting the form.
"""
def button(opts, do: contents) do
button(contents, opts)
end
def button(text, opts) do
{to, opts} = pop_required_option!(opts, :to, "option :to is required in button/2")
{link_opts, opts} =
opts
|> Keyword.put_new(:method, :post)
|> Keyword.split([:method, :csrf_token])
link_attributes = Phoenix.HTML.link_attributes(to, link_opts)
content_tag(:button, text, link_attributes ++ opts)
end
defp pop_required_option!(opts, key, error_message) do
{value, opts} = Keyword.pop(opts, key)
unless value do
raise ArgumentError, error_message
end
{value, opts}
end
end
|
lib/phoenix_html/link.ex
| 0.713731 | 0.485112 |
link.ex
|
starcoder
|
defmodule GroupManager.Data.Item do
require Record
require Chatter.NetID
alias Chatter.NetID
alias Chatter.Serializer
Record.defrecord :item,
member: nil,
op: :get,
start_range: 0,
end_range: 0xffffffff,
port: 0
@type t :: record( :item,
member: NetID.t,
op: atom,
start_range: integer,
end_range: integer,
port: integer )
@spec new(NetID.t) :: t
def new(id)
when NetID.is_valid(id)
do
item(member: id)
end
defmacro is_valid_port(data) do
case Macro.Env.in_guard?(__CALLER__) do
true ->
quote do
is_integer(unquote(data)) and
unquote(data) >= 0 and
unquote(data) <= 0xffff
end
false ->
quote bind_quoted: binding() do
is_integer(data) and
data >= 0 and
data <= 0xffff
end
end
end
defmacro is_valid_uint32(data) do
case Macro.Env.in_guard?(__CALLER__) do
true ->
quote do
is_integer(unquote(data)) and
unquote(data) >= 0 and
unquote(data) <= 0xffffffff
end
false ->
quote bind_quoted: binding() do
is_integer(data) and
data >= 0 and
data <= 0xffffffff
end
end
end
defmacro is_valid(data) do
case Macro.Env.in_guard?(__CALLER__) do
true ->
quote do
is_tuple(unquote(data)) and tuple_size(unquote(data)) == 6 and
:erlang.element(1, unquote(data)) == :item and
# member
NetID.is_valid(:erlang.element(2, unquote(data))) and
# op
:erlang.element(3, unquote(data)) in [:add, :rmv, :get] and
# start_range
is_integer(:erlang.element(4, unquote(data))) and
:erlang.element(4, unquote(data)) >= 0 and
:erlang.element(4, unquote(data)) <= 0xffffffff and
# end_range
is_integer(:erlang.element(5, unquote(data))) and
:erlang.element(5, unquote(data)) >= 0 and
:erlang.element(5, unquote(data)) <= 0xffffffff and
# port
is_integer(:erlang.element(6, unquote(data))) and
:erlang.element(6, unquote(data)) >= 0 and
:erlang.element(6, unquote(data)) <= 0xffffffff and
# start_range <= end_range
:erlang.element(4, unquote(data)) <= :erlang.element(5, unquote(data))
end
false ->
quote bind_quoted: binding() do
is_tuple(data) and tuple_size(data) == 6 and
:erlang.element(1, data) == :item and
# member
NetID.is_valid(:erlang.element(2, data)) and
# op
:erlang.element(3, data) in [:add, :rmv, :get] and
# start_range
is_integer(:erlang.element(4, data)) and
:erlang.element(4,data) >= 0 and
:erlang.element(4, data) <= 0xffffffff and
# end_range
is_integer(:erlang.element(5, data)) and
:erlang.element(5, data) >= 0 and
:erlang.element(5, data) <= 0xffffffff and
# port
is_integer(:erlang.element(6, data)) and
:erlang.element(6, data) >= 0 and
:erlang.element(6, data) <= 0xffffffff and
# start_range <= end_range
:erlang.element(4, data) <= :erlang.element(5, data)
end
end
end
@spec valid?(t) :: boolean
def valid?(data)
when is_valid(data)
do
true
end
def valid?(_), do: false
@spec set(t, :add|:rmv|:get, integer, integer, integer) :: t
def set(itm, opv, from, to, port)
when is_valid(itm) and
opv in [:add, :rmv, :get] and
is_valid_uint32(from) and
is_valid_uint32(to) and
from <= to and
is_valid_port(port)
do
item(itm, op: opv)
|> item(start_range: from)
|> item(end_range: to)
|> item(port: port)
end
@spec member(t) :: NetID
def member(itm)
when is_valid(itm)
do
item(itm, :member)
end
@spec op(t) :: :add | :rmv | :get
def op(itm)
when is_valid(itm)
do
item(itm, :op)
end
@spec op(t, :add|:rmv|:get) :: t
def op(itm, v)
when is_valid(itm) and
v in [:add, :rmv, :get]
do
item(itm, op: v)
end
@spec start_range(t) :: integer
def start_range(itm)
when is_valid(itm)
do
item(itm, :start_range)
end
@spec start_range(t, integer) :: t
def start_range(itm, v)
when is_valid(itm) and
is_valid_uint32(v)
do
item(itm, start_range: v)
end
@spec end_range(t) :: integer
def end_range(itm)
when is_valid(itm)
do
item(itm, :end_range)
end
@spec end_range(t, integer) :: t
def end_range(itm, v)
when is_valid(itm) and
is_valid_uint32(v)
do
item(itm, end_range: v)
end
@spec port(t) :: integer
def port(itm)
when is_valid(itm)
do
item(itm, :port)
end
@spec port(t, integer) :: t
def port(itm, v)
when is_valid(itm) and
is_valid_port(v)
do
item(itm, port: v)
end
@spec encode_with(t, map) :: binary
def encode_with(itm, id_map)
when is_valid(itm) and
is_map(id_map)
do
id = Map.fetch!(id_map, item(itm, :member))
bin_member = id |> Serializer.encode_uint
bin_op = item(itm, :op) |> op_to_id |> Serializer.encode_uint
bin_start = item(itm, :start_range) |> Serializer.encode_uint
bin_end = item(itm, :end_range) |> Serializer.encode_uint
bin_port = item(itm, :port) |> Serializer.encode_uint
<< bin_member :: binary,
bin_op :: binary,
bin_start :: binary,
bin_end :: binary,
bin_port :: binary >>
end
defp op_to_id(:add), do: 1
defp op_to_id(:rmv), do: 2
defp op_to_id(:get), do: 3
@spec decode_with(binary, map) :: {t, binary}
def decode_with(bin, id_map)
when is_binary(bin) and
byte_size(bin) > 0 and
is_map(id_map)
do
{id, remaining} = Serializer.decode_uint(bin)
{decoded_op_raw, remaining} = Serializer.decode_uint(remaining)
{decoded_start, remaining} = Serializer.decode_uint(remaining)
{decoded_end, remaining} = Serializer.decode_uint(remaining)
{decoded_port, remaining} = Serializer.decode_uint(remaining)
decoded_member = Map.fetch!(id_map, id)
decoded_op = id_to_op(decoded_op_raw)
{ new(decoded_member)
|> op(decoded_op)
|> start_range(decoded_start)
|> end_range(decoded_end)
|> port(decoded_port),
remaining }
end
defp id_to_op(1), do: :add
defp id_to_op(2), do: :rmv
defp id_to_op(3), do: :get
end
|
lib/group_manager/data/item.ex
| 0.639173 | 0.505554 |
item.ex
|
starcoder
|
defmodule Plug.Router do
@moduledoc ~S"""
A DSL to define a routing algorithm that works with Plug.
It provides a set of macros to generate routes. For example:
defmodule AppRouter do
use Plug.Router
import Plug.Conn
plug :match
plug :dispatch
get "/hello" do
send_resp(conn, 200, "world")
end
match _ do
send_resp(conn, 404, "oops")
end
end
Each route needs to return a connection, as per the Plug spec.
A catch all `match` is recommended to be defined, as in the example
above, otherwise routing fails with a function clause error.
The router is a plug, which means it can be invoked as:
AppRouter.call(conn, [])
Notice the router contains a plug stack and by default it requires
two plugs: `match` and `dispatch`. `match` is responsible for
finding a matching route which is then forwarded to `dispatch`.
This means users can easily hook into the router mechanism and add
behaviour before match, before dispatch or after both.
## Routes
get "/hello" do
send_resp(conn, 200, "world")
end
In the example above, a request will only match if it is
a `GET` request and the route "/hello". The supported
HTTP methods are `get`, `post`, `put`, `patch`, `delete`
and `options`.
A route can also specify parameters which will then be
available in the function body:
get "/hello/:name" do
send_resp(conn, 200, "hello #{name}")
end
Routes allow for globbing which will match the remaining parts
of a route and can be available as a parameter in the function
body, also note that a glob can't be followed by other segments:
get "/hello/*_rest" do
send_resp(conn, 200, "matches all routes starting with /hello")
end
get "/hello/*glob" do
send_resp(conn, 200, "route after /hello: #{inspect glob}")
end
Finally, a general `match` function is also supported:
match "/hello" do
send_resp(conn, 200, "world")
end
A `match` will match any route regardless of the HTTP method.
Check `match/3` for more information on how route compilation
works and a list of supported options.
## Routes compilation
All routes are compiled to a match function that receives
three arguments: the method, the request path split on "/"
and the connection. Consider this example:
match "/foo/bar", via: :get do
send_resp(conn, 200, "hello world")
end
It is compiled to:
defp match("GET", ["foo", "bar"], conn) do
send_resp(conn, 200, "hello world")
end
This opens up a few possibilities. First, guards can be given
to match:
match "/foo/:bar" when size(bar) <= 3, via: :get do
send_resp(conn, 200, "hello world")
end
Second, a list of splitten paths (which is the compiled result)
is also allowed:
match ["foo", bar], via: :get do
send_resp(conn, 200, "hello world")
end
After a match is found, the block given as `do/end` is stored
as a function in the connection. This function is then retrieved
and invoked in the `dispatch` plug.
"""
@doc false
defmacro __using__(_) do
quote location: :keep do
import Plug.Builder, only: [plug: 1, plug: 2]
import Plug.Router
@behaviour Plug
def init(opts) do
opts
end
def match(conn, _opts) do
Plug.Conn.assign_private(conn,
:plug_route,
do_match(conn.method, conn.path_info))
end
def dispatch(%Plug.Conn{assigns: assigns} = conn, _opts) do
Map.get(conn.private, :plug_route).(conn)
end
defoverridable [init: 1, dispatch: 2]
Module.register_attribute(__MODULE__, :plugs, accumulate: true)
@before_compile Plug.Router
end
end
@doc false
defmacro __before_compile__(env) do
plugs = Module.get_attribute(env.module, :plugs)
{conn, body} = Plug.Builder.compile(plugs)
quote do
import Plug.Router, only: []
def call(unquote(conn), _), do: unquote(body)
end
end
## Match
@doc """
Main API to define routes. It accepts an expression representing
the path and many options allowing the match to be configured.
## Examples
match "/foo/bar", via: :get do
send_resp(conn, 200, "hello world")
end
## Options
`match` accepts the following options:
* `:via` - matches the route against some specific HTTP methods
* `:do` - contains the implementation to be invoked in case
the route matches
"""
defmacro match(expression, options, contents \\ []) do
compile(:build_match, expression, Keyword.merge(contents, options), __CALLER__)
end
@doc """
Dispatches to the path only if it is get request.
See `match/3` for more examples.
"""
defmacro get(path, contents) do
compile(:build_match, path, Keyword.put(contents, :via, :get), __CALLER__)
end
@doc """
Dispatches to the path only if it is post request.
See `match/3` for more examples.
"""
defmacro post(path, contents) do
compile(:build_match, path, Keyword.put(contents, :via, :post), __CALLER__)
end
@doc """
Dispatches to the path only if it is put request.
See `match/3` for more examples.
"""
defmacro put(path, contents) do
compile(:build_match, path, Keyword.put(contents, :via, :put), __CALLER__)
end
@doc """
Dispatches to the path only if it is patch request.
See `match/3` for more examples.
"""
defmacro patch(path, contents) do
compile(:build_match, path, Keyword.put(contents, :via, :patch), __CALLER__)
end
@doc """
Dispatches to the path only if it is delete request.
See `match/3` for more examples.
"""
defmacro delete(path, contents) do
compile(:build_match, path, Keyword.put(contents, :via, :delete), __CALLER__)
end
@doc """
Dispatches to the path only if it is options request.
See `match/3` for more examples.
"""
defmacro options(path, contents) do
compile(:build_match, path, Keyword.put(contents, :via, :options), __CALLER__)
end
@doc """
Forwards requests to another Plug. The path_info of the forwarded
connection will exclude the portion of the path specified in the
call to `forward`.
## Examples
forward "/users", to: UserRouter
## Options
`forward` accepts the following options:
* `:to` - a Plug where the requests will be forwarded
All remaining options are passed to the underlying plug.
"""
defmacro forward(path, options) when is_binary(path) do
quote do
{target, options} = Keyword.pop(unquote(options), :to)
if nil?(target) or !is_atom(target) do
raise ArgumentError, message: "expected :to to be an alias or an atom"
end
@plug_forward_target target
@plug_forward_opts target.init(options)
match unquote(path <> "/*glob") do
Plug.Router.Utils.forward(var!(conn), var!(glob), @plug_forward_target, @plug_forward_opts)
end
end
end
## Match Helpers
# Entry point for both forward and match that is actually
# responsible to compile the route.
defp compile(builder, expr, options, caller) do
methods = options[:via]
body = options[:do]
unless body do
raise ArgumentError, message: "expected :do to be given as option"
end
{method, guard} = convert_methods(List.wrap(methods))
{path, guards} = extract_path_and_guards(expr, guard)
{_vars, match} = apply Plug.Router.Utils, builder, [Macro.expand(path, caller)]
quote do
defp do_match(unquote(method), unquote(match)) when unquote(guards) do
fn var!(conn) -> unquote(body) end
end
end
end
# Convert the verbs given with :via into a variable
# and guard set that can be added to the dispatch clause.
defp convert_methods([]) do
{quote(do: _), true}
end
defp convert_methods([method]) do
{Plug.Router.Utils.normalize_method(method), true}
end
defp convert_methods(methods) do
methods = Enum.map methods, &Plug.Router.Utils.normalize_method(&1)
var = quote do: method
{var, quote(do: unquote(var) in unquote(methods))}
end
# Extract the path and guards from the path.
defp extract_path_and_guards({:when, _, [path, guards]}, true) do
{path, guards}
end
defp extract_path_and_guards({:when, _, [path, guards]}, extra_guard) do
{path, {:and, [], [guards, extra_guard]}}
end
defp extract_path_and_guards(path, extra_guard) do
{path, extra_guard}
end
end
|
lib/plug/router.ex
| 0.881328 | 0.525125 |
router.ex
|
starcoder
|
defmodule Cog.BusDriver do
@moduledoc """
BusDriver is responsible for configuring, starting, and stopping Cog's embedded
message bus.
## Configuration
BusDriver can be configured with the following configuration parameters.
`:host` - Listen host name or IP address. Defaults to `"127.0.0.1"`.
`:port` - List port. Defaults to `1883`.
`:cert_file` - Path to SSL certificate file. Required for SSL support.
`:key_file` - Path to SSL key file. Required for SSL support.
## Example configuration
```config :cog, :message_bus,
host: "192.168.1.133",
port: 10883,
cert_file: "/etc/cog/ssl.cert",
key_file: "/etc/cog/ssl.key"
"""
require Logger
use GenServer
def start_link() do
GenServer.start_link(__MODULE__, [], name: __MODULE__)
end
def init(_) do
case configure_message_bus() do
{:ok, [app_name]} ->
case Application.ensure_all_started(app_name) do
{:ok, _} ->
:erlang.process_flag(:trap_exit, true)
{:ok, app_name}
error ->
{:stop, error}
end
error ->
Logger.error("Message bus configuration error: #{inspect error}")
{:stop, :shutdown}
end
end
def terminate(_, _) do
Application.stop(:emqttd)
Application.stop(:esockd)
end
defp configure_message_bus() do
case prepare_bindings() do
{:ok, common_bindings, cert_bindings} ->
case load_private_config("common_mqtt") do
{:ok, _} ->
if length(cert_bindings) == 2 do
# SSL enabled
Logger.info("Message bus configured for SSL")
load_private_config("ssl_mqtt", [{:mqtt_type, :mqtts}|common_bindings] ++ cert_bindings)
else
# SSL disabled
Logger.info("Message bus configured for plain TCP")
load_private_config("plain_mqtt", [{:mqtt_type, :mqtt}|common_bindings])
end
error ->
error
end
error ->
error
end
end
defp prepare_bindings() do
bus_opts = Application.get_env(:cog, :message_bus)
case prepare_host(Keyword.get(bus_opts, :host, "127.0.0.1")) do
{:ok, mqtt_host} ->
mqtt_port = Keyword.get(bus_opts, :port, 1883)
cert_file = Keyword.get(bus_opts, :ssl_cert) |> convert_string
key_file = Keyword.get(bus_opts, :ssl_key) |> convert_string
common = [mqtt_addr: mqtt_host,
mqtt_port: mqtt_port]
cond do
cert_file != nil and key_file != nil ->
{:ok, common, [cert: cert_file, key: key_file]}
cert_file == nil and key_file == nil ->
{:ok, common, []}
cert_file == nil ->
Logger.error("Message bus SSL configuration error. Path to certificate file is empty.")
{:error, {:missing_config, :cert_file}}
key_file == nil ->
Logger.error("Message bus SSL configuration error. Path to key file is empty.")
{:error, {:missing_config, :key_file}}
end
error ->
error
end
end
defp convert_string(nil), do: nil
defp convert_string(value), do: String.to_charlist(value)
defp load_private_config(name, bindings \\ []) do
config = File.read!(Path.join([:code.priv_dir(:cog), "config", name <> ".exs"]))
case Code.eval_string(config, bindings) do
{:ok, results} ->
[{_, agent}|_] = Enum.reverse(results)
config = Mix.Config.Agent.get(agent)
Mix.Config.Agent.stop(agent)
Mix.Config.validate!(config)
{:ok, Mix.Config.persist(config)}
error ->
error
end
end
defp prepare_host(host) when is_binary(host),
do: prepare_host(String.to_charlist(host))
defp prepare_host(host) do
case :inet.getaddr(host, :inet) do
{:ok, addr} ->
{:ok, addr}
{:error, v4_error} ->
{:error, "#{host}: #{:inet.format_error(v4_error)}"}
end
end
end
|
lib/cog/bus_driver.ex
| 0.835618 | 0.551272 |
bus_driver.ex
|
starcoder
|
defmodule Automaton.Blackboard do
@moduledoc """
The Node Blackboard
Blackboard Architectures
A blackboard system isnβt a decision making tool in its own right. It is a
mechanism for coordinating the actions of several decision makers. The
individual decision making systems can be implemented in any way: from a
decision tree to an expert system or even to learning tools such as neural
networks. It is this flexibility that makes blackboard architectures
appealing.
The Problem
We would like to be able to coordinate the decision making of several different
techniques. Each technique may be able to make suggestions as to what to do
next, but the final decision can only be made if they cooperate.
The Blackboard Metaphor
Blackboard-based problem solving is often presented using the following
metaphor: Imagine a group of human specialists seated next to a large
blackboard. The specialists are working cooperatively to solve a problem,
using the blackboard as the workplace for developing the solution. Problem
solving begins when the problem and initial data are written onto the
blackboard. The specialists watch the blackboard, looking for an
opportunity to apply their expertise to the developing solution. When a
specialist finds sufficient information to make a contribution, she
records the contribution on the blackboard, hopefully enabling other
specialists to apply their expertise. This process of adding contributions
to the blackboard continues until the problem has been solved.
This simple metaphor captures a number of the important characteristics of
blackboard systems, each of which is described separately below.
- Independence of expertise (I think, therefore I am.)
- Diversity in problem-solving techniques (I donβt think like you do.)
- Flexible representation of blackboard information (If you can draw it, I can use it.)
- Common interaction language (Whatβd you say?)
- Positioning metrics (You could look it up.)
If the problem being solved by our human specialists is complex and the number of their
contributions made on the blackboard begins to grow, quickly locating pertinent information
becomes a problem. A specialist should not have to scan the entire blackboard to see if a
particular item has been placed on the blackboard by another specialist.
One solution is to subdivide the blackboard into regions, each corresponding to a particular
kind of information. This approach is commonly used in blackboard systems, where different
levels, planes, or multiple blackboards are used to group related objects.
Similarly, ordering metrics can be used within each region, to sort information numerically,
alphabetically, or by relevance. Advanced blackboard-system frameworks provide sophisticated
multidimensional metrics for efficiently locating blackboard objects of interest.
- Event-based activation (Is anybody there?)
- Need for control (Itβs my turn.)
- Incremental solution generation (Step by step, inch by inch. . .)
"""
defmacro __using__(user_opts) do
end
end
|
lib/automata/blackboard/automaton_blackboard.ex
| 0.756268 | 0.757436 |
automaton_blackboard.ex
|
starcoder
|
defmodule Wallaby.Query do
@moduledoc ~S"""
Provides the query DSL.
Queries are used to locate and retrieve DOM elements from a browser (see
`Wallaby.Browser`). You create queries like so:
```
Query.css(".some-css")
Query.xpath(".//input")
```
## Form elements
There are several custom finders for locating form elements. Each of these allows
finding by their name, id text, or label text. This allows for more robust querying
and decouples the query from presentation selectors like css classes.
```
Query.text_field("<NAME>")
Query.checkbox("Checkbox")
Query.select("A Fancy Select Box")
```
## Query Options
All of the query operations accept the following options:
* `:count` - The number of elements that should be found (default: 1).
* `:visible` - Determines if the query should return only visible elements (default: true).
* `:text` - Text that should be found inside the element (default: nil).
* `:at` - The position number of the element to select if multiple elements satisfy the selection criteria. (:all for all elements)
## Re-using queries
It is often convenient to re-use queries. The easiest way is to use module
attributes:
```
@name_field Query.text_field("<NAME>")
@submit_button Query.button("Save")
```
If the queries need to be dynamic then you should create a module that
encapsulates the queries as functions:
```
defmodule TodoListPage do
def todo_list do
Query.css(".todo-list")
end
def todos(count) do
Query.css(".todo", count: count)
end
end
```
## What does my query do?
Wanna check out what exactly your query will do? Look no further than
`Wallaby.Query.compile/1` - it takes a query and returns the css or xpath
query that will be sent to the driver:
iex> Wallaby.Query.compile Wallaby.Query.text("my text")
{:xpath, ".//*[contains(normalize-space(text()), \"my text\")]"}
So, whenever you're not sure whatever a specific query will do just compile
it to get all the details!
"""
alias __MODULE__
alias Wallaby.Query.XPath
alias Wallaby.Element
defstruct method: nil,
selector: nil,
html_validation: nil,
conditions: [],
result: []
@type method :: :css
| :xpath
| :link
| :button
| :fillable_field
| :checkbox
| :radio_button
| :option
| :select
| :file_field
@type attribute_key_value_pair :: {String.t, String.t}
@type selector :: String.t
| :attribute_key_value_pair
@type html_validation :: :bad_label
| :button_type
| nil
@type conditions :: [
count: non_neg_integer,
text: String.t,
visible: boolean(),
minimum: non_neg_integer,
at: pos_integer
]
@type result :: list(Element.t)
@type opts :: nonempty_list()
@type t :: %__MODULE__{
method: method(),
selector: selector(),
html_validation: html_validation(),
conditions: conditions(),
result: result(),
}
@type compiled :: {:xpath | :css, String.t}
@doc """
Literally queries for the css selector you provide.
"""
def css(selector, opts \\ []) do
%Query{
method: :css,
selector: selector,
conditions: build_conditions(opts),
}
end
@doc """
Literally queries for the xpath selector you provide.
"""
def xpath(selector, opts \\ []) do
%Query{
method: :xpath,
selector: selector,
conditions: build_conditions(opts)
}
end
@doc """
Checks if the provided text is contained anywhere.
"""
def text(selector, opts \\ []) do
%Query{
method: :text,
selector: selector,
conditions: build_conditions(opts)
}
end
@doc """
Checks if the provided value is contained anywhere.
"""
def value(selector, opts \\ []) do
attribute("value", selector, opts)
end
@doc """
Checks if the provided attribute, value pair is contained anywhere.
"""
def attribute(name, value, opts \\ []) do
%Query{
method: :attribute,
selector: {name, value},
conditions: build_conditions(opts)
}
end
@doc """
See `Wallaby.Query.fillable_field/2`.
"""
def text_field(selector, opts \\ []) do
%Query{
method: :fillable_field,
selector: selector,
conditions: build_conditions(opts),
html_validation: :bad_label,
}
end
@doc """
Looks for a text input field where the provided selector is the id, name or
placeholder of the text field itself or alternatively the id or the text of
the label.
"""
def fillable_field(selector, opts \\ []) do
%Query{
method: :fillable_field,
selector: selector,
conditions: build_conditions(opts),
html_validation: :bad_label,
}
end
@doc """
Looks for a radio button where the provided selector is the id, name or
placeholder of the radio button itself or alternatively the id or the text of
the label.
"""
def radio_button(selector, opts \\ []) do
%Query{
method: :radio_button,
selector: selector,
conditions: build_conditions(opts),
html_validation: :bad_label,
}
end
@doc """
Looks for a checkbox where the provided selector is the id, name or
placeholder of the checkbox itself or alternatively the id or the text of
the label.
"""
def checkbox(selector, opts \\ []) do
%Query{
method: :checkbox,
selector: selector,
conditions: build_conditions(opts),
html_validation: :bad_label,
}
end
@doc """
Looks for a select box where the provided selector is the id or name of the
select box itself or alternatively the id or the text of the label.
"""
def select(selector, opts \\ []) do
%Query{
method: :select,
selector: selector,
conditions: build_conditions(opts),
html_validation: :bad_label,
}
end
@doc """
Looks for an option that contains the given text.
"""
def option(selector, opts \\ []) do
%Query{
method: :option,
selector: selector,
conditions: build_conditions(opts),
html_validation: :bad_label,
}
end
@doc """
Looks for a button (literal button or input type button, submit, image or
reset) where the provided selector is the id, name, value, alt or title of the
button.
"""
def button(selector, opts \\ []) do
%Query{
method: :button,
selector: selector,
conditions: build_conditions(opts),
html_validation: :button_type,
}
end
@doc """
Looks for a link where the selector is the id, link text, title of the link
itself or the alt of an image child node.
"""
def link(selector, opts \\ []) do
%Query{
method: :link,
selector: selector,
conditions: build_conditions(opts),
html_validation: :bad_label,
}
end
@doc """
Looks for a file input where the selector is the id or name of the file input
itself or the id or text of the label.
"""
def file_field(selector, opts \\ []) do
%Query{
method: :file_field,
selector: selector,
conditions: build_conditions(opts),
html_validation: :bad_label,
}
end
def validate(query) do
cond do
query.conditions[:minimum] > query.conditions[:maximum] ->
{:error, :min_max}
!Query.visible?(query) && Query.inner_text(query) ->
{:error, :cannot_set_text_with_invisible_elements}
true ->
{:ok, query}
end
end
@doc """
Compiles a query into css or xpath so its ready to be sent to the driver
iex> Wallaby.Query.compile Wallaby.Query.text("my text")
{:xpath, ".//*[contains(normalize-space(text()), \\"my text\\")]"}
iex> Wallaby.Query.compile Wallaby.Query.css("#some-id")
{:css, "#some-id"}
"""
@spec compile(t) :: compiled
def compile(%{method: :css, selector: selector}), do: {:css, selector}
def compile(%{method: :xpath, selector: selector}), do: {:xpath, selector}
def compile(%{method: :link, selector: selector}), do: {:xpath, XPath.link(selector)}
def compile(%{method: :button, selector: selector}), do: {:xpath, XPath.button(selector)}
def compile(%{method: :fillable_field, selector: selector}), do: {:xpath, XPath.fillable_field(selector)}
def compile(%{method: :checkbox, selector: selector}), do: {:xpath, XPath.checkbox(selector)}
def compile(%{method: :radio_button, selector: selector}), do: {:xpath, XPath.radio_button(selector)}
def compile(%{method: :option, selector: selector}), do: {:xpath, XPath.option(selector)}
def compile(%{method: :select, selector: selector}), do: {:xpath, XPath.select(selector)}
def compile(%{method: :file_field, selector: selector}), do: {:xpath, XPath.file_field(selector)}
def compile(%{method: :text, selector: selector}), do: {:xpath, XPath.text(selector)}
def compile(%{method: :attribute, selector: {name, value}}), do: {:xpath, XPath.attribute(name, value)}
def visible?(%Query{conditions: conditions}) do
Keyword.get(conditions, :visible)
end
def count(%Query{conditions: conditions}) do
Keyword.get(conditions, :count)
end
def at_number(%Query{conditions: conditions}) do
Keyword.get(conditions, :at)
end
def inner_text(%Query{conditions: conditions}) do
Keyword.get(conditions, :text)
end
def result(query) do
if specific_element_requested(query) do
[element] = query.result
element
else
query.result
end
end
def specific_element_requested(query) do
count(query) == 1 || at_number(query) != :all
end
def matches_count?(%{conditions: conditions}, count) do
cond do
conditions[:count] == :any ->
count > 0
conditions[:count] ->
conditions[:count] == count
true ->
!(conditions[:minimum] && conditions[:minimum] > count) &&
!(conditions[:maximum] && conditions[:maximum] < count)
end
end
defp build_conditions(opts) do
opts
|> add_visibility
|> add_text
|> add_count
|> add_at
end
defp add_visibility(opts) do
Keyword.put_new(opts, :visible, true)
end
defp add_text(opts) do
Keyword.put_new(opts, :text, nil)
end
defp add_count(opts) do
if opts[:count] == nil && opts[:minimum] == nil && opts[:maximum] == nil do
Keyword.put(opts, :count, 1)
else
opts
|> Keyword.put_new(:count, opts[:count])
|> Keyword.put_new(:minimum, opts[:minimum])
|> Keyword.put_new(:maximum, opts[:maximum])
end
end
defp add_at(opts) do
Keyword.put_new(opts, :at, :all)
end
end
|
lib/wallaby/query.ex
| 0.870721 | 0.842345 |
query.ex
|
starcoder
|
defmodule Slipstream.Configuration do
@definition [
uri: [
doc: """
The endpoint to which the websocket will connect. Schemes of "ws" and
"wss" are supported, and a scheme must be provided. Either binaries or
`URI` structs are accepted. E.g. `"ws://localhost:4000/socket/websocket"`.
""",
type: {:custom, __MODULE__, :parse_uri, []},
required: true
],
heartbeat_interval_msec: [
doc: """
The time between heartbeat messages. A value of `0` will disable automatic
heartbeat sending. Note that a Phoenix.Channel will close out a connection
after 60 seconds of inactivity (`60_000`).
""",
type: :non_neg_integer,
default: 30_000
],
headers: [
doc: """
A set of headers to merge with the request headers when GETing the
websocket URI. Headers must be provided as two-tuples where both elements
are binaries. Casing of these headers is inconsequential.
""",
type: {:list, {:custom, __MODULE__, :parse_pair_of_strings, []}},
default: []
],
json_parser: [
doc: """
A JSON parser module which exports at least `encode/1` and `decode/2`.
""",
type: :atom,
default: Jason
],
reconnect_after_msec: [
doc: """
A list of times to reference for trying reconnection when
`Slipstreamm.reconnect/0` is used to request reconnection. The msec time
will be fetched based on its position in the list with
`Enum.at(reconnect_after_msec, try_number)`. If the number of tries
exceeds the length of the list, the final value will be repeated.
""",
type: {:list, :non_neg_integer},
default: [10, 50, 100, 150, 200, 250, 500, 1_000, 2_000, 5_000]
],
rejoin_after_msec: [
doc: """
A list of times to reference for trying to rejoin a topic when
`Slipstreamm.rejoin/0` is used. The msec time
will be fetched based on its position in the list with
`Enum.at(rejoin_after_msec, try_number)`. If the number of tries
exceeds the length of the list, the final value will be repeated.
""",
type: {:list, :non_neg_integer},
default: [100, 500, 1_000, 2_000, 5_000, 10_000]
],
gun_open_options: [
doc: """
A map of options to pass to `:gun.open/3`. See the `:gun` documentation
for more information. Note that `:gun` does not support websocket over
HTTP2 and that `:gun` naively prefers HTTP2 when connecting over TLS.
The `:protocols => [:http]` option will be merged in by default to allow
`"wss"` connections out of the box.
""",
type: {:custom, __MODULE__, :parse_gun_open_options, []},
default: %{protocols: [:http]}
],
test_mode?: [
doc: """
Whether or not to start-up the client in test-mode. See
`Slipstream.SocketTest` for notes on testing Slipstream clients.
""",
type: :boolean,
default: false
]
]
@moduledoc """
Configuration for a Slipstream websocket connection
Slipstream server process configuration is passed in with
`Slipstream.connect/2` (or `Slipstream.connect!/2`), and so all configuration
is evauated and validated at runtime, as opposed to compile-time validation.
You should not expect to see validation errors on configuration unless you
force the validation at compile-time, e.g.:
# you probably don't want to do this...
defmodule MyClient do
@config Application.compile_env!(:my_app, __MODULE__)
use Slipstream
def start_link(args) do
Slipstream.start_link(__MODULE__, args, name: __MODULE__)
end
def init(_args), do: {:ok, connect!(@config)}
..
end
This approach will validate the configuration at compile-time, but you
will be unable to change the configuration after compilation, so any
secrets contained in the configuration (e.g. a basic-auth request header)
will be compiled into the beam files.
See the docs for `c:Slipstream.init/1` for a safer approach.
## Options
#{NimbleOptions.docs(@definition)}
Note that a Phoenix.Channel defined with
```elixir
socket "/socket", UserSocket, ..
```
Can be connected to at `/socket/websocket`.
"""
defstruct Keyword.keys(@definition)
@type t :: %__MODULE__{
uri: %URI{},
heartbeat_interval_msec: non_neg_integer(),
headers: [{String.t(), String.t()}],
json_parser: module(),
reconnect_after_msec: [non_neg_integer()],
rejoin_after_msec: [non_neg_integer()]
}
@known_schemes ~w[ws wss]
@doc """
Validates a proposed configuration
"""
@doc since: "0.1.0"
@spec validate(Keyword.t()) ::
{:ok, t()} | {:error, %NimbleOptions.ValidationError{}}
def validate(opts) do
case NimbleOptions.validate(opts, @definition) do
{:ok, validated} -> {:ok, struct(__MODULE__, validated)}
{:error, reason} -> {:error, reason}
end
end
@doc """
Validates a proposed configuration, raising on error
"""
@spec validate!(Keyword.t()) :: t()
def validate!(opts) do
validated = NimbleOptions.validate!(opts, @definition)
struct(__MODULE__, validated)
end
@doc false
def parse_uri(proposed_uri) when is_binary(proposed_uri) do
parse_uri(URI.parse(proposed_uri))
end
def parse_uri(%URI{} = proposed_uri) do
with %URI{} = uri <- proposed_uri |> assume_port(),
{:scheme, scheme} when scheme in @known_schemes <-
{:scheme, uri.scheme},
{:port, port} when is_integer(port) and port > 0 <- {:port, uri.port} do
{:ok, uri}
else
{:port, bad_port} ->
{:error,
"unparseable port value #{inspect(bad_port)}: please provide a positive-integer value"}
{:scheme, scheme} ->
{:error,
"unknown scheme #{inspect(scheme)}: only #{inspect(@known_schemes)} are accepted"}
end
end
def parse_uri(unparsed) do
{:error, "could not parse #{inspect(unparsed)} as a binary or URI struct"}
end
defp assume_port(%URI{scheme: "ws", port: nil} = uri),
do: %URI{uri | port: 80}
defp assume_port(%URI{scheme: "wss", port: nil} = uri),
do: %URI{uri | port: 443}
defp assume_port(uri), do: uri
@doc false
def parse_pair_of_strings({key, value})
when is_binary(key) and is_binary(value) do
{:ok, {key, value}}
end
def parse_pair_of_strings(unparsed) do
{:error, "could not parse #{inspect(unparsed)} as a two-tuple of strings"}
end
@doc false
def parse_gun_open_options(options) when is_map(options) do
{:ok, Map.merge(%{protocols: [:http]}, options)}
end
def parse_gun_open_options(unknown) do
{:error, "gun options must be a map, got #{inspect(unknown)}"}
end
end
|
lib/slipstream/configuration.ex
| 0.903704 | 0.767232 |
configuration.ex
|
starcoder
|
defmodule Credo.Check.Readability.MaxLineLength do
@moduledoc """
Checks for the length of lines.
Ignores function definitions and (multi-)line strings by default.
"""
@explanation [
check: @moduledoc,
params: [
max_length: "The maximum number of characters a line may consist of.",
ignore_definitions: "Set to `true` to ignore lines including function definitions.",
ignore_specs: "Set to `true` to ignore lines including `@spec`s.",
ignore_strings: "Set to `true` to ignore lines that are strings or in heredocs"
]
]
@default_params [
max_length: 120,
ignore_definitions: true,
ignore_specs: false,
ignore_strings: true
]
@def_ops [:def, :defp, :defmacro]
use Credo.Check, base_priority: :low
alias Credo.Code.Heredocs
alias Credo.Code.Strings
@doc false
def run(source_file, params \\ []) do
issue_meta = IssueMeta.for(source_file, params)
max_length = Params.get(params, :max_length, @default_params)
ignore_definitions = Params.get(params, :ignore_definitions, @default_params)
ignore_specs = Params.get(params, :ignore_specs, @default_params)
ignore_strings = Params.get(params, :ignore_strings, @default_params)
# TODO v1.0: this should be two different params
ignore_heredocs = ignore_strings
definitions = Credo.Code.prewalk(source_file, &find_definitions/2)
specs = Credo.Code.prewalk(source_file, &find_specs/2)
source = SourceFile.source(source_file)
source =
if ignore_heredocs do
Heredocs.replace_with_spaces(source, "")
else
source
end
lines = Credo.Code.to_lines(source)
lines_for_comparison =
if ignore_strings do
source
|> Strings.replace_with_spaces("")
|> Credo.Code.to_lines()
else
lines
end
Enum.reduce(lines, [], fn {line_no, line}, issues ->
{_, line_for_comparison} = Enum.at(lines_for_comparison, line_no - 1)
if String.length(line_for_comparison) > max_length do
if refute_issue?(
line_no,
definitions,
ignore_definitions,
specs,
ignore_specs
) do
issues
else
[issue_for(line_no, max_length, line, issue_meta) | issues]
end
else
issues
end
end)
end
for op <- @def_ops do
defp find_definitions({unquote(op), meta, arguments} = ast, definitions)
when is_list(arguments) do
{ast, [meta[:line] | definitions]}
end
end
defp find_definitions(ast, definitions) do
{ast, definitions}
end
defp find_specs({:spec, meta, arguments} = ast, specs)
when is_list(arguments) do
{ast, [meta[:line] | specs]}
end
defp find_specs(ast, specs) do
{ast, specs}
end
defp refute_issue?(
line_no,
definitions,
ignore_definitions,
specs,
ignore_specs
) do
ignore_definitions? =
if ignore_definitions do
Enum.member?(definitions, line_no)
else
false
end
ignore_specs? =
if ignore_specs do
Enum.member?(specs, line_no)
else
false
end
ignore_definitions? || ignore_specs?
end
defp issue_for(line_no, max_length, line, issue_meta) do
line_length = String.length(line)
column = max_length + 1
trigger = String.slice(line, max_length, line_length - max_length)
format_issue(
issue_meta,
message: "Line is too long (max is #{max_length}, was #{line_length}).",
line_no: line_no,
column: column,
trigger: trigger
)
end
end
|
lib/credo/check/readability/max_line_length.ex
| 0.555435 | 0.466906 |
max_line_length.ex
|
starcoder
|
defmodule Game.Command.Map do
@moduledoc """
The "map" command
"""
use Game.Command
use Game.Zone
alias Game.Environment
alias Game.Session.GMCP
commands(["map"], parse: false)
@impl Game.Command
def help(:topic), do: "Map"
def help(:short), do: "View a map of the zone"
def help(:full) do
"""
View the map of the zone your are in. A room is shown as the [ ] symbols. Rooms
that are connected by open spaces between the [ ] symbols. Walls are drawn
between rooms that are next to each other but do not have an exit connecting them.
Sample map:
[ ] [ ]
\\ |
[ ] - [X]
/
[ ]
Map Legend:
X - You
[ ] - Room
- - Path way
| - Path way
\\ - Path way
/ - Path way
Map colors show what the room's ecology might be:
{map:blue}[ ]{/map:blue} - Ocean, lake, or river
{map:brown}[ ]{/map:brown} - Mount, or road
{map:dark-green}[ ]{/map:dark-green} - Hill, field, or meadow
{map:green}[ ]{/map:green} - Forest, or jungle
{map:grey}[ ]{/map:grey} - Town, or dungeon
{map:light-grey}[ ]{/map:light-grey} - Inside
"""
end
@impl true
def parse(command, _context), do: parse(command)
@impl Game.Command
@doc """
Parse the command into arguments
iex> Game.Command.Map.parse("map")
{}
iex> Game.Command.Map.parse("map extra")
{:error, :bad_parse, "map extra"}
iex> Game.Command.Map.parse("unknown")
{:error, :bad_parse, "unknown"}
"""
def parse(command)
def parse("map"), do: {}
@impl Game.Command
@doc """
View a map of the zone
"""
def run(command, state)
def run({}, state) do
case Environment.room_type(state.save.room_id) do
:room ->
room_map(state)
:overworld ->
overworld_map(state)
end
end
defp room_map(state = %{save: %{room_id: room_id}}) do
{:ok, room} = @environment.look(room_id)
map = room.zone_id |> @zone.map({room.x, room.y, room.map_layer})
mini_map = room.zone_id |> @zone.map({room.x, room.y, room.map_layer}, mini: true)
state |> GMCP.map(mini_map)
state.socket |> @socket.echo(map)
end
defp overworld_map(state = %{save: %{room_id: room_id}}) do
{:ok, room} = @environment.look(room_id)
map = room.zone_id |> @zone.map({room.x, room.y})
mini_map = room.zone_id |> @zone.map({room.x, room.y}, mini: true)
state |> GMCP.map(mini_map)
state.socket |> @socket.echo(map)
end
end
|
lib/game/command/map.ex
| 0.73173 | 0.407422 |
map.ex
|
starcoder
|
defmodule AdventOfCode.Y2021.Day4 do
@moduledoc """
--- Day 4: Giant Squid ---
You're already almost 1.5km (almost a mile) below the surface of the ocean, already so deep that you can't see any sunlight. What you can see, however, is a giant squid that has attached itself to the outside of your submarine.
Maybe it wants to play bingo?
Bingo is played on a set of boards each consisting of a 5x5 grid of numbers. Numbers are chosen at random, and the chosen number is marked on all boards on which it appears. (Numbers may not appear on all boards.) If all numbers in any row or any column of a board are marked, that board wins. (Diagonals don't count.)
The submarine has a bingo subsystem to help passengers (currently, you and the giant squid) pass the time. It automatically generates a random order in which to draw numbers and a random set of boards (your puzzle input). For example:
7,4,9,5,11,17,23,2,0,14,21,24,10,16,13,6,15,25,12,22,18,20,8,19,3,26,1
22 13 17 11 0
8 2 23 4 24
21 9 14 16 7
6 10 3 18 5
1 12 20 15 19
3 15 0 2 22
9 18 13 17 5
19 8 7 25 23
20 11 10 24 4
14 21 16 12 6
14 21 17 24 4
10 16 15 9 19
18 8 23 26 20
22 11 13 6 5
2 0 12 3 7
After the first five numbers are drawn (7, 4, 9, 5, and 11), there are no winners, but the boards are marked as follows (shown here adjacent to each other to save space):
22 13 17 11 0 3 15 0 2 22 14 21 17 24 4
8 2 23 4 24 9 18 13 17 5 10 16 15 9 19
21 9 14 16 7 19 8 7 25 23 18 8 23 26 20
6 10 3 18 5 20 11 10 24 4 22 11 13 6 5
1 12 20 15 19 14 21 16 12 6 2 0 12 3 7
After the next six numbers are drawn (17, 23, 2, 0, 14, and 21), there are still no winners:
22 13 17 11 0 3 15 0 2 22 14 21 17 24 4
8 2 23 4 24 9 18 13 17 5 10 16 15 9 19
21 9 14 16 7 19 8 7 25 23 18 8 23 26 20
6 10 3 18 5 20 11 10 24 4 22 11 13 6 5
1 12 20 15 19 14 21 16 12 6 2 0 12 3 7
Finally, 24 is drawn:
22 13 17 11 0 3 15 0 2 22 14 21 17 24 4
8 2 23 4 24 9 18 13 17 5 10 16 15 9 19
21 9 14 16 7 19 8 7 25 23 18 8 23 26 20
6 10 3 18 5 20 11 10 24 4 22 11 13 6 5
1 12 20 15 19 14 21 16 12 6 2 0 12 3 7
At this point, the third board wins because it has at least one complete row or column of marked numbers (in this case, the entire top row is marked: 14 21 17 24 4).
The score of the winning board can now be calculated. Start by finding the sum of all unmarked numbers on that board; in this case, the sum is 188. Then, multiply that sum by the number that was just called when the board won, 24, to get the final score, 188 * 24 = 4512.
To guarantee victory against the giant squid, figure out which board will win first. What will your final score be if you choose that board?
"""
@doc """
Day 1 - Part 1
## Examples
# iex> AdventOfCode.Y2021.Day4.part1()
# 2496
"""
def part1() do
{numbers, boards} = setup()
find_winner(numbers, boards, false, &no_op/1)
|> get_score()
end
def no_op(v), do: v
defp setup() do
[numbers_str | rest] =
AdventOfCode.etl_file(
"lib/y_2021/d4/input.txt",
fn x -> x end,
&AdventOfCode.split_newline/1,
%{reject_blanks: false, reject_nils: true}
)
numbers =
numbers_str
|> String.split(",")
|> Enum.map(fn n ->
{num, ""} = Integer.parse(n)
num
end)
boards =
build_boards(rest, [])
|> Enum.with_index()
|> Enum.map(fn {board, idx} -> BingoBoard.build(board, idx) end)
{numbers, boards}
end
defp build_boards([], boards), do: boards
defp build_boards([""], boards), do: boards
defp build_boards(["" | rest], boards) do
{board, remaining} = rest |> Enum.split_while(fn elem -> elem != "" end)
build_boards(remaining, boards ++ [generate_unmarked_board(board)])
end
defp generate_unmarked_board(board) do
board
|> Enum.with_index()
|> Enum.reduce(%{}, fn {row, r_idx}, acc ->
row
|> String.split(~r{\s+})
|> Enum.reject(fn elem -> elem == "" || is_nil(elem) end)
|> Enum.with_index()
|> Enum.reduce(acc, fn {str, c_idx}, sub_acc ->
{num, ""} = Integer.parse(str)
Map.merge(sub_acc, %{{r_idx, c_idx} => num})
end)
end)
end
defp find_winner([next_number | rest], boards, false, next_board_limiter) do
next_boards =
boards
|> Enum.map(fn board -> BingoBoard.mark_position(board, next_number) end)
|> next_board_limiter.()
find_winner(
rest,
next_boards,
Enum.any?(next_boards, fn board -> board.won == true end),
next_board_limiter
)
end
defp find_winner(_, boards, true, _) do
boards
|> Enum.find(fn board -> board.won == true end)
end
defp get_score(%{marked_positions: positions} = board) do
sum =
positions
|> Enum.reduce(0, fn {k, v}, acc ->
if v == false do
acc + board.board[k]
else
acc
end
end)
sum * board.last_mark
end
@doc """
Day 4 - Part 2
## Examples
iex> AdventOfCode.Y2021.Day4.part2()
25925
"""
def part2() do
{numbers, boards} = setup()
find_winner(numbers, boards, false, &reject_all_but_last_winner/1)
|> get_score()
end
def reject_all_but_last_winner([board]), do: [board]
def reject_all_but_last_winner(boards) do
res = boards |> Enum.reject(fn %{won: won} -> won == true end)
if res == [] do
boards
else
res
end
end
end
defmodule BingoBoard do
defstruct id: 0, board: %{}, values: [], marked_positions: %{}, won: false, last_mark: 0
@type t(id, board, values, marked_positions, won) :: %BingoBoard{
id: id,
board: board,
values: values,
marked_positions: marked_positions,
won: won,
last_mark: 0
}
def build(input_board, id) do
%BingoBoard{
id: id,
board: input_board,
values: input_board |> Map.values(),
marked_positions:
input_board
|> Enum.reduce(%{}, fn {key, _val}, acc -> Map.merge(acc, %{key => false}) end),
won: false,
last_mark: 0
}
end
def mark_position(%BingoBoard{values: values} = board, mark) do
values
|> Enum.member?(mark)
|> evaluate_mark(board, mark)
end
defp evaluate_mark(false, board, _mark), do: board
defp evaluate_mark(true, board, mark) do
{key, _v} =
board.board
|> Enum.find(fn {_k, v} ->
v == mark
end)
new_marks = Map.merge(board.marked_positions, %{key => true})
rows =
new_marks
|> Enum.group_by(fn {{r_idx, _c_idx}, _val} ->
r_idx
end)
cols =
new_marks
|> Enum.group_by(fn {{_r_idx, c_idx}, _val} ->
c_idx
end)
rows_winner =
rows
|> Enum.any?(fn {_k, row} ->
row |> Enum.all?(fn {_k, v} -> v end)
end)
cols_winner =
cols
|> Enum.any?(fn {_k, col} ->
col |> Enum.all?(fn {_k, v} -> v end)
end)
%BingoBoard{
id: board.id,
board: board.board,
values: board.values,
marked_positions: new_marks,
won: rows_winner || cols_winner,
last_mark: mark
}
end
end
|
lib/y_2021/d4/day4.ex
| 0.751648 | 0.582016 |
day4.ex
|
starcoder
|
defmodule Sage.EmptyError do
@moduledoc """
Raised at runtime when empty sage is executed.
"""
defexception [:message]
@doc false
def exception(_opts) do
message = "trying to execute empty Sage is not allowed"
%__MODULE__{message: message}
end
end
defmodule Sage.AsyncTransactionTimeoutError do
@moduledoc """
Raised at runtime when the asynchronous transaction timed out.
"""
defexception [:name, :timeout]
@impl true
def message(%__MODULE__{name: name, timeout: timeout}) do
"""
asynchronous transaction for operation #{name} has timed out,
expected it to return within #{to_string(timeout)} microseconds
"""
end
end
defmodule Sage.DuplicateStageError do
@moduledoc """
Raised at runtime when operation with duplicated name is added to Sage.
"""
defexception [:message]
@impl true
def exception(opts) do
sage = Keyword.fetch!(opts, :sage)
name = Keyword.fetch!(opts, :name)
message = """
#{inspect(name)} is already a member of the Sage:
#{inspect(sage)}
"""
%__MODULE__{message: message}
end
end
defmodule Sage.DuplicateTracerError do
@moduledoc """
Raised at runtime when a duplicated tracer is added to Sage.
"""
defexception [:message]
@impl true
def exception(opts) do
sage = Keyword.fetch!(opts, :sage)
module = Keyword.fetch!(opts, :module)
message = """
#{inspect(module)} is already defined as tracer for Sage:
#{inspect(sage)}
"""
%__MODULE__{message: message}
end
end
defmodule Sage.DuplicateFinalHookError do
@moduledoc """
Raised at runtime when duplicated final hook is added to Sage.
"""
defexception [:message]
@impl true
def exception(opts) do
sage = Keyword.fetch!(opts, :sage)
callback = Keyword.fetch!(opts, :hook)
message = """
#{format_callback(callback)} is already defined as final hook for Sage:
#{inspect(sage)}
"""
%__MODULE__{message: message}
end
defp format_callback({m, f, a}), do: "#{inspect(m)}.#{to_string(f)}/#{to_string(length(a) + 2)}"
defp format_callback(cb), do: inspect(cb)
end
defmodule Sage.MalformedTransactionReturnError do
@moduledoc """
Raised at runtime when the transaction or operation has an malformed return.
"""
defexception [:transaction, :return]
@impl true
def message(%__MODULE__{transaction: transaction, return: return}) do
"""
expected transaction #{inspect(transaction)} to return
{:ok, effect}, {:error, reason} or {:abort, reason}, got:
#{inspect(return)}
"""
end
end
defmodule Sage.MalformedCompensationReturnError do
@moduledoc """
Raised at runtime when the compensation or operation has an malformed return.
"""
defexception [:compensation, :return]
@impl true
def message(%__MODULE__{compensation: compensation, return: return}) do
"""
expected compensation #{inspect(compensation)} to return
:ok, :abort, {:retry, retry_opts} or {:continue, effect}, got:
#{inspect(return)}
"""
end
end
|
lib/sage/exceptions.ex
| 0.848659 | 0.409693 |
exceptions.ex
|
starcoder
|
defmodule TextBasedFPS.PlayerCommand.Fire do
import TextBasedFPS.CommandHelper
import TextBasedFPS.Text, only: [danger: 1, highlight: 1]
alias TextBasedFPS.{
GameMap,
Notification,
PlayerCommand,
Room,
RoomPlayer,
ServerState
}
@behaviour PlayerCommand
@impl true
def execute(state, player, _) do
with {:ok, room} <- require_alive_player(state, player) do
room_player = Room.get_player(room, player.key)
fire(state, player, room_player, room)
end
end
defp fire(state, _, %{ammo: {0, 0}}, _) do
{:error, state, "You're out of ammo"}
end
defp fire(state, _, %{ammo: {0, _}}, _) do
{:error, state, "Reload your gun by typing #{highlight("reload")}"}
end
defp fire(state, player, room_player, room) do
shot_players =
players_on_path(room.game_map.matrix, room_player.coordinates, room_player.direction)
|> Enum.with_index()
|> Enum.map(fn {{shot_player_key, distance}, index} ->
apply_damage(room, {shot_player_key, distance, index})
end)
updated_state =
ServerState.update_room(state, room.name, fn room ->
shot_players
|> Enum.reduce(room, fn shot_player, room ->
apply_update(room, room_player, shot_player)
end)
|> Room.update_player(room_player.player_key, fn player ->
RoomPlayer.decrement(player, :ammo)
end)
end)
updated_state = push_notifications(updated_state, player, shot_players)
{:ok, updated_state, generate_message(state, shot_players)}
end
defp players_on_path(matrix, {x, y}, direction) do
%{players: players} =
GameMap.Matrix.iterate_towards(
matrix,
{x, y},
direction,
%{distance: 1, players: []},
fn coordinate, acc ->
cond do
GameMap.Matrix.wall_at?(matrix, coordinate) ->
{:stop, acc}
GameMap.Matrix.player_at?(matrix, coordinate) ->
player = GameMap.Matrix.at(matrix, coordinate)
updated_acc =
acc
|> Map.put(:players, acc.players ++ [{player.player_key, acc.distance}])
|> Map.put(:distance, acc.distance + 1)
{:continue, updated_acc}
true ->
{:continue, Map.put(acc, :distance, acc.distance + 1)}
end
end
)
players
end
defp apply_damage(room, {shot_player_key, distance_to_shooter, shot_player_order}) do
shot_player = Room.get_player(room, shot_player_key)
shoot_power = shoot_power(distance_to_shooter, shot_player_order)
subtract_health(shot_player, shoot_power)
end
defp shoot_power(distance_to_shooter, enemy_index) do
power = 30 - (distance_to_shooter - 1) - enemy_index * 10
max(0, power)
end
defp subtract_health(shot_player, shoot_power) do
new_health = max(0, shot_player.health - shoot_power)
Map.put(shot_player, :health, new_health)
end
defp apply_update(room, shooter, shot_player) do
coordinates = shot_player.coordinates
put_in(room.players[shot_player.player_key], shot_player)
|> maybe_remove_player_from_map(shot_player)
|> maybe_add_item(shot_player, coordinates)
|> maybe_update_score(shooter, shot_player)
end
defp maybe_remove_player_from_map(room, shot_player = %{health: 0}) do
Room.remove_player_from_map(room, shot_player.player_key)
end
defp maybe_remove_player_from_map(room, _shot_player), do: room
defp maybe_add_item(room, _shot_player = %{health: 0}, coordinates) do
Room.add_random_object(room, coordinates)
end
defp maybe_add_item(room, _shot_player, _coordinates), do: room
defp maybe_update_score(room, shooter, shot_player = %{health: 0}) do
room
|> Room.update_player(shooter.player_key, &RoomPlayer.increment(&1, :kills))
|> Room.update_player(shot_player.player_key, &RoomPlayer.increment(&1, :killed))
end
defp maybe_update_score(room, _shooter, _shot_player), do: room
defp generate_message(_state, []) do
"You've shot the wall."
end
defp generate_message(state, shot_players) do
killed = Enum.filter(shot_players, &RoomPlayer.dead?/1)
hit = shot_players -- killed
phrase_parts =
[action_message("hit", state, hit), action_message("killed", state, killed)]
|> Stream.filter(fn part -> part != nil end)
|> Enum.join(" and ")
"You've #{phrase_parts}"
end
defp action_message(verb, state, shot_players) do
names =
shot_players
|> Stream.map(fn shot -> ServerState.get_player(state, shot.player_key) end)
|> Enum.map(& &1.name)
case length(names) do
0 -> nil
_ -> "#{verb} #{Enum.join(names, ", ")}"
end
end
defp push_notifications(state, shooter_player, shot_players) do
notifications = Enum.map(shot_players, &build_shot_notification(shooter_player, &1))
ServerState.add_notifications(state, notifications)
end
defp build_shot_notification(shooter_player, shot_player = %{health: 0}) do
Notification.new(
shot_player.player_key,
danger(
"#{shooter_player.name} killed you! Type #{highlight("respawn")} to return to the game"
)
)
end
defp build_shot_notification(shooter_player, shot_player) do
Notification.new(
shot_player.player_key,
danger("uh oh! #{shooter_player.name} shot you!")
)
end
end
|
lib/text_based_fps/player_commands/fire.ex
| 0.625781 | 0.409988 |
fire.ex
|
starcoder
|
defmodule FarmbotExt.AMQP.PingPongChannel do
@moduledoc """
AMQP channel responsible for responding to `ping` messages.
Simply echos the exact data received on the `ping` channel
onto the `pong` channel.
Also has a ~15-20 minute timer that will do an `HTTP` request
to `/api/device`. This refreshes the `last_seen_api` field which
is required for devices that have `auto_sync` enabled as with
that field enabled, the device would never do an HTTP request
"""
use GenServer
use AMQP
alias FarmbotExt.{
APIFetcher,
AMQP.Support
}
require Logger
require FarmbotCore.Logger
require FarmbotTelemetry
alias FarmbotCore.Leds
@exchange "amq.topic"
@lower_bound_ms 900_000
@upper_bound_ms 1_200_000
defstruct [:conn, :chan, :jwt, :http_ping_timer, :ping_fails]
alias __MODULE__, as: State
@doc false
def start_link(args, opts \\ [name: __MODULE__]) do
GenServer.start_link(__MODULE__, args, opts)
end
def init(args) do
jwt = Keyword.fetch!(args, :jwt)
http_ping_timer = FarmbotExt.Time.send_after(self(), :http_ping, 5000)
send(self(), :connect_amqp)
_ = Leds.blue(:off)
state = %State{
conn: nil,
chan: nil,
jwt: jwt,
http_ping_timer: http_ping_timer,
ping_fails: 0
}
{:ok, state}
end
def terminate(r, s) do
_ = Leds.blue(:off)
Support.handle_termination(r, s, "PingPong")
end
def handle_info(:connect_amqp, state) do
bot = state.jwt.bot
ping = bot <> "_ping"
route = "bot.#{bot}.ping.#"
result = Support.create_bind_consume(ping, route)
do_connect(result, state)
end
def handle_info(:http_ping, state) do
rand = Enum.random(@lower_bound_ms..@upper_bound_ms)
ms = rand
case APIFetcher.get(APIFetcher.client(), "/api/device") do
{:ok, _} ->
_ = Leds.blue(:solid)
http_ping_timer = FarmbotExt.Time.send_after(self(), :http_ping, ms)
{:noreply, %{state | http_ping_timer: http_ping_timer, ping_fails: 0}}
error ->
ping_fails = state.ping_fails + 1
FarmbotCore.Logger.error(3, "Ping failed (#{ping_fails}). #{inspect(error)}")
_ = Leds.blue(:off)
http_ping_timer = FarmbotExt.Time.send_after(self(), :http_ping, ms)
{:noreply, %{state | http_ping_timer: http_ping_timer, ping_fails: ping_fails}}
end
end
# Confirmation sent by the broker after registering this process as a consumer
def handle_info({:basic_consume_ok, _}, state) do
{:noreply, state}
end
# Sent by the broker when the consumer is
# unexpectedly cancelled (such as after a queue deletion)
def handle_info({:basic_cancel, _}, state) do
{:stop, :normal, state}
end
# Confirmation sent by the broker to the consumer process after a Basic.cancel
def handle_info({:basic_cancel_ok, _}, state) do
{:noreply, state}
end
def handle_info({:basic_deliver, payload, %{routing_key: routing_key}}, state) do
routing_key = String.replace(routing_key, "ping", "pong")
:ok = Basic.publish(state.chan, @exchange, routing_key, payload)
{:noreply, state}
end
def do_connect({:ok, {conn, chan}}, state) do
FarmbotCore.Logger.debug(3, "connected to PingPong channel")
_ = Leds.blue(:solid)
{:noreply, %{state | conn: conn, chan: chan}}
end
def do_connect(nil, state) do
FarmbotExt.Time.send_after(self(), :connect_amqp, 5000)
{:noreply, %{state | conn: nil, chan: nil}}
end
def do_connect(err, state) do
Support.handle_error(state, err, "PingPong")
end
end
|
farmbot_ext/lib/farmbot_ext/amqp/ping_pong_channel.ex
| 0.770335 | 0.406214 |
ping_pong_channel.ex
|
starcoder
|
defmodule AWS.LexRuntime do
@moduledoc """
Amazon Lex provides both build and runtime endpoints.
Each endpoint provides a set of operations (API). Your conversational bot uses
the runtime API to understand user utterances (user input text or voice). For
example, suppose a user says "I want pizza", your bot sends this input to Amazon
Lex using the runtime API. Amazon Lex recognizes that the user request is for
the OrderPizza intent (one of the intents defined in the bot). Then Amazon Lex
engages in user conversation on behalf of the bot to elicit required information
(slot values, such as pizza size and crust type), and then performs fulfillment
activity (that you configured when you created the bot). You use the build-time
API to create and manage your Amazon Lex bot. For a list of build-time
operations, see the build-time API, .
"""
@doc """
Removes session information for a specified bot, alias, and user ID.
"""
def delete_session(client, bot_alias, bot_name, user_id, input, options \\ []) do
path_ = "/bot/#{URI.encode(bot_name)}/alias/#{URI.encode(bot_alias)}/user/#{URI.encode(user_id)}/session"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Returns session information for a specified bot, alias, and user ID.
"""
def get_session(client, bot_alias, bot_name, user_id, checkpoint_label_filter \\ nil, options \\ []) do
path_ = "/bot/#{URI.encode(bot_name)}/alias/#{URI.encode(bot_alias)}/user/#{URI.encode(user_id)}/session/"
headers = []
query_ = []
query_ = if !is_nil(checkpoint_label_filter) do
[{"checkpointLabelFilter", checkpoint_label_filter} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Sends user input (text or speech) to Amazon Lex.
Clients use this API to send text and audio requests to Amazon Lex at runtime.
Amazon Lex interprets the user input using the machine learning model that it
built for the bot.
The `PostContent` operation supports audio input at 8kHz and 16kHz. You can use
8kHz audio to achieve higher speech recognition accuracy in telephone audio
applications.
In response, Amazon Lex returns the next message to convey to the user. Consider
the following example messages:
* For a user input "I would like a pizza," Amazon Lex might return a
response with a message eliciting slot data (for example, `PizzaSize`): "What
size pizza would you like?".
* After the user provides all of the pizza order information, Amazon
Lex might return a response with a message to get user confirmation: "Order the
pizza?".
* After the user replies "Yes" to the confirmation prompt, Amazon
Lex might return a conclusion statement: "Thank you, your cheese pizza has been
ordered.".
Not all Amazon Lex messages require a response from the user. For example,
conclusion statements do not require a response. Some messages require only a
yes or no response. In addition to the `message`, Amazon Lex provides additional
context about the message in the response that you can use to enhance client
behavior, such as displaying the appropriate client user interface. Consider the
following examples:
* If the message is to elicit slot data, Amazon Lex returns the
following context information:
* `x-amz-lex-dialog-state` header set to `ElicitSlot`
* `x-amz-lex-intent-name` header set to the intent name
in the current context
* `x-amz-lex-slot-to-elicit` header set to the slot name
for which the `message` is eliciting information
* `x-amz-lex-slots` header set to a map of slots
configured for the intent with their current values
* If the message is a confirmation prompt, the
`x-amz-lex-dialog-state` header is set to `Confirmation` and the
`x-amz-lex-slot-to-elicit` header is omitted.
* If the message is a clarification prompt configured for the
intent, indicating that the user intent is not understood, the
`x-amz-dialog-state` header is set to `ElicitIntent` and the
`x-amz-slot-to-elicit` header is omitted.
In addition, Amazon Lex also returns your application-specific
`sessionAttributes`. For more information, see [Managing Conversation Context](https://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html).
"""
def post_content(client, bot_alias, bot_name, user_id, input, options \\ []) do
path_ = "/bot/#{URI.encode(bot_name)}/alias/#{URI.encode(bot_alias)}/user/#{URI.encode(user_id)}/content"
{headers, input} =
[
{"accept", "Accept"},
{"contentType", "Content-Type"},
{"requestAttributes", "x-amz-lex-request-attributes"},
{"sessionAttributes", "x-amz-lex-session-attributes"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :post, path_, query_, headers, input, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"x-amz-lex-alternative-intents", "alternativeIntents"},
{"x-amz-lex-bot-version", "botVersion"},
{"Content-Type", "contentType"},
{"x-amz-lex-dialog-state", "dialogState"},
{"x-amz-lex-input-transcript", "inputTranscript"},
{"x-amz-lex-intent-name", "intentName"},
{"x-amz-lex-message", "message"},
{"x-amz-lex-message-format", "messageFormat"},
{"x-amz-lex-nlu-intent-confidence", "nluIntentConfidence"},
{"x-amz-lex-sentiment", "sentimentResponse"},
{"x-amz-lex-session-attributes", "sessionAttributes"},
{"x-amz-lex-session-id", "sessionId"},
{"x-amz-lex-slot-to-elicit", "slotToElicit"},
{"x-amz-lex-slots", "slots"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Sends user input to Amazon Lex.
Client applications can use this API to send requests to Amazon Lex at runtime.
Amazon Lex then interprets the user input using the machine learning model it
built for the bot.
In response, Amazon Lex returns the next `message` to convey to the user an
optional `responseCard` to display. Consider the following example messages:
* For a user input "I would like a pizza", Amazon Lex might return a
response with a message eliciting slot data (for example, PizzaSize): "What size
pizza would you like?"
* After the user provides all of the pizza order information, Amazon
Lex might return a response with a message to obtain user confirmation "Proceed
with the pizza order?".
* After the user replies to a confirmation prompt with a "yes",
Amazon Lex might return a conclusion statement: "Thank you, your cheese pizza
has been ordered.".
Not all Amazon Lex messages require a user response. For example, a conclusion
statement does not require a response. Some messages require only a "yes" or
"no" user response. In addition to the `message`, Amazon Lex provides additional
context about the message in the response that you might use to enhance client
behavior, for example, to display the appropriate client user interface. These
are the `slotToElicit`, `dialogState`, `intentName`, and `slots` fields in the
response. Consider the following examples:
* If the message is to elicit slot data, Amazon Lex returns the
following context information:
* `dialogState` set to ElicitSlot
* `intentName` set to the intent name in the current
context
* `slotToElicit` set to the slot name for which the
`message` is eliciting information
* `slots` set to a map of slots, configured for the
intent, with currently known values
* If the message is a confirmation prompt, the `dialogState` is set
to ConfirmIntent and `SlotToElicit` is set to null.
* If the message is a clarification prompt (configured for the
intent) that indicates that user intent is not understood, the `dialogState` is
set to ElicitIntent and `slotToElicit` is set to null.
In addition, Amazon Lex also returns your application-specific
`sessionAttributes`. For more information, see [Managing Conversation Context](https://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html).
"""
def post_text(client, bot_alias, bot_name, user_id, input, options \\ []) do
path_ = "/bot/#{URI.encode(bot_name)}/alias/#{URI.encode(bot_alias)}/user/#{URI.encode(user_id)}/text"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Creates a new session or modifies an existing session with an Amazon Lex bot.
Use this operation to enable your application to set the state of the bot.
For more information, see [Managing Sessions](https://docs.aws.amazon.com/lex/latest/dg/how-session-api.html).
"""
def put_session(client, bot_alias, bot_name, user_id, input, options \\ []) do
path_ = "/bot/#{URI.encode(bot_name)}/alias/#{URI.encode(bot_alias)}/user/#{URI.encode(user_id)}/session"
{headers, input} =
[
{"accept", "Accept"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :post, path_, query_, headers, input, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"Content-Type", "contentType"},
{"x-amz-lex-dialog-state", "dialogState"},
{"x-amz-lex-intent-name", "intentName"},
{"x-amz-lex-message", "message"},
{"x-amz-lex-message-format", "messageFormat"},
{"x-amz-lex-session-attributes", "sessionAttributes"},
{"x-amz-lex-session-id", "sessionId"},
{"x-amz-lex-slot-to-elicit", "slotToElicit"},
{"x-amz-lex-slots", "slots"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@spec request(AWS.Client.t(), binary(), binary(), list(), list(), map(), list(), pos_integer()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, method, path, query, headers, input, options, success_status_code) do
client = %{client | service: "lex"}
host = build_host("runtime.lex", client)
url = host
|> build_url(path, client)
|> add_query(query, client)
additional_headers = [{"Host", host}, {"Content-Type", "application/x-amz-json-1.1"}]
headers = AWS.Request.add_headers(additional_headers, headers)
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, method, url, headers, payload)
perform_request(client, method, url, payload, headers, options, success_status_code)
end
defp perform_request(client, method, url, payload, headers, options, success_status_code) do
case AWS.Client.request(client, method, url, payload, headers, options) do
{:ok, %{status_code: status_code, body: body} = response}
when is_nil(success_status_code) and status_code in [200, 202, 204]
when status_code == success_status_code ->
body = if(body != "", do: decode!(client, body))
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, path, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}#{path}"
end
defp add_query(url, [], _client) do
url
end
defp add_query(url, query, client) do
querystring = encode!(client, query, :query)
"#{url}?#{querystring}"
end
defp encode!(client, payload, format \\ :json) do
AWS.Client.encode!(client, payload, format)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/lex_runtime.ex
| 0.912034 | 0.523359 |
lex_runtime.ex
|
starcoder
|
defmodule Brook do
@moduledoc ~S"""
Brook provides an event stream client interface for distributed applications
to communicate indirectly and asynchronously. Brook sends and receives messages
with the event stream (typically a message queue service) via a driver module
and persists an application-specific view of the event stream via a storage module
(defaulting to ETS).
## Sample Configuration
Brook is configured within the application environment by defining
a keyword list with three primary keys: driver, handler, and storage.
config :my_app, :brook,
driver: [
module: Brook.Driver.Json,
init_arg: []
],
handler: [MyApp.Event.Handler],
storage: [
modules: Brook.Storage.Ets,
init_arg: []
]
### Driver
The Brook driver implements a behaviour that sends messages to the event stream.
Events are Brook structs that contain an event type, an author (or source), a
creation timestamp, the event data, and an ack reference and ack data (following
the lead of the [`broadway`](https://github.com/plataformatec/broadway) library.)
The default driver sends the event message to the Brook server via `Genserver.cast`
Additional drivers provided at this time are a json-encoding version of the default
driver and a Kafka driver using the [`elsa`](https://github.com/bbalser/elsa) library.
### Handler
The Brook handler implements a behaviour that provides a `handle_event/1` function.
Handlers receive a Brook event and take appropriate action according to the implementing
application's business logic.
Applications implement as many function heads for the event handler as necessary and return
one of four tuples depending on how the storage module should treat the event with
respect to persistence. Events can:
* create a record in the view state via the `{:create, collection, key, value}` return
* update an existing record via the `{:merge, collection, key, value}` return
* delete a record via the `{:delete, collection, key}` return
* discard the record and do not effect the persistent view via the `:discard` return
### Storage
The Brook storage module implements yet another behaviour that persists event data to
an application view state specific to the application importing the Brook library, allowing
the application to only store information received from the event stream that is relevant to
its own domain and retrieve it when necessary.
Storage modules implement basic CRUD operations that correlate to the return values of
the event handler module.
The default module uses ETS for fast, local, in-memory storage and retrieval (great for
testing purposes!) with an additional Redis-based module as well.
"""
@typedoc "The catagory of event to contextualize the data of an event message"
@type event_type :: String.t()
@typedoc "The data component of an event message"
@type event :: term()
@typedoc "The source application generating an event message"
@type author :: String.Chars.t()
@typedoc "The grouping of events by catagory for persisted view storage"
@type view_collection :: String.Chars.t()
@typedoc "The index by which events are stored in a view state collection"
@type view_key :: String.Chars.t()
@typedoc "The event data stored within a view state collection, indexed by key"
@type view_value :: term()
@typedoc "The potential negative return value of a view state query."
@type reason :: term()
@typedoc "The name of this instance of Brook"
@type instance :: atom()
defmodule Uninitialized do
@moduledoc """
An exception that is raised when calls are made to retrieve values
from the view state, but Brook is Uninitialized.
"""
defexception [:message]
end
defmodule InvalidInstance do
@moduledoc """
An exception that is raised when an instance of brook cannot be found.
"""
defexception [:message]
end
defmodule InvalidEvent do
@moduledoc """
An exception that is raised when no valid event can be found.
"""
defexception [:message]
end
@doc """
Starts a Brook process linked to the current process.
"""
defdelegate start_link(opts), to: Brook.Supervisor
@doc """
Provides a Brook Supervisor child spec for defining a custom Brook supervisor.
"""
defdelegate child_spec(args), to: Brook.Supervisor
@doc """
Return an item from the Brook view state for the implementing application wrapped in an
`:ok` tuple or else an `:error` tuple with a reason.
"""
@spec get(instance(), view_collection(), view_key()) :: {:ok, view_value()} | {:error, reason()}
defdelegate get(instance, collection, key), to: Brook.ViewState
@doc """
Returns the value stored under the given key and collection from the
Brook view state or else raises an exception.
"""
@spec get!(instance(), view_collection(), view_key()) :: view_value()
def get!(instance, collection, key) do
case get(instance, collection, key) do
{:ok, value} -> value
{:error, reason} -> raise reason
end
end
@doc """
Returns a list of Brook events that produced the value stored under the given key
within a collection from the Brook view state, wrapped in an `:ok` tuple or else
an `:error` tuple with reason.
"""
@spec get_events(instance(), view_collection(), view_key()) :: {:ok, list(Brook.Event.t())} | {:error, reason()}
def get_events(instance, collection, key) do
storage = Brook.Config.storage(instance)
apply(storage.module, :get_events, [instance, collection, key])
end
@doc """
Returns a list of Brook events matching the provided type that produced the value stored under the given key
within a collection from the Brook view state, wrapped in an `:ok` tuple or else
an `:error` tuple with reason.
"""
@spec get_events(instance(), view_collection(), view_key(), event_type()) ::
{:ok, list(Brook.Event.t())} | {:error, reason()}
def get_events(instance, collection, key, type) do
storage = Brook.Config.storage(instance)
apply(storage.module, :get_events, [instance, collection, key, type])
end
@doc """
Returns a list of Brook events that produced the value stored under the given key
within a collection from the Brook view state or else raises an exception.
"""
@spec get_events!(instance(), view_collection(), view_key()) :: list(Brook.Event.t())
def get_events!(instance, collection, key) do
case get_events(instance, collection, key) do
{:ok, value} -> value
{:error, reason} -> raise reason
end
end
@doc """
Returns a list of Brook events matching the provided type that produced the value stored under the given key
within a collection from the Brook view state or else raises an exception.
"""
@spec get_events!(instance(), view_collection(), view_key(), event_type()) :: list(Brook.Event.t())
def get_events!(instance, collection, key, type) do
case get_events(instance, collection, key, type) do
{:ok, value} -> value
{:error, reason} -> raise reason
end
end
@doc """
Return all values saved to the Brook view state for a given collection, wrapped in an `:ok` tuple or
else an `:error` tuple with reason. Values are returned as a map where the keys are the keys used to
index the saved values and the values are anything saved under a given key based on processing events.
"""
@spec get_all(instance(), view_collection()) :: {:ok, %{required(view_key()) => view_value()}} | {:error, reason()}
defdelegate get_all(instance, collection), to: Brook.ViewState
@doc """
Return all values saved to the Brook view state for a given collection or else raises an exception.
Values are returned as a map where the keys are the keys used to index the saved values and the values
are anything saved under a given key.
"""
@spec get_all!(instance, view_collection()) :: %{required(view_key()) => view_value()}
def get_all!(instance, collection) do
case get_all(instance, collection) do
{:ok, value} -> value
{:error, reason} -> raise reason
end
end
@doc """
Returns a list of all values saved to a given collection of the Brook view state, indepentent of
the key used to index them. Results is wrapped in an `:ok` tuple or else an `:error` tuple with
reason is returned.
"""
@spec get_all_values(instance(), view_collection()) :: {:ok, [view_value()]} | {:error, reason()}
def get_all_values(instance, collection) do
case get_all(instance, collection) do
{:ok, map} -> {:ok, Map.values(map)}
error -> error
end
end
@doc """
Returns a list of all values saved to a given collection of the Brook view state, independent of
the key used to index them, or else raises an exception.
"""
@spec get_all_values!(instance, view_collection()) :: [view_value()]
def get_all_values!(instance, collection) do
case get_all_values(instance, collection) do
{:ok, values} -> values
{:error, reason} -> raise reason
end
end
@spec serialize(term) :: {:ok, String.t()} | {:error, term}
def serialize(data) do
serializer()
|> apply(:serialize, [data])
end
@spec deserialize(String.t()) :: {:ok, term} | {:error, term}
def deserialize(string) do
serializer()
|> apply(:deserialize, [string])
end
defp serializer() do
Application.get_env(:brook, :serializer, Brook.Serde)
end
end
|
lib/brook.ex
| 0.912148 | 0.759504 |
brook.ex
|
starcoder
|
defmodule ExWire.Packet.Capability.Par.SnapshotData.StateChunk do
@moduledoc """
State chunks store the entire state of a given block. A "rich" account
structure is used to save space. Each state chunk consists of a list of
lists, each with two items: an address' sha3 hash and a rich account
structure correlating with it.
"""
defmodule RichAccount do
@moduledoc """
The rich account structure encodes the usual account data such as the
nonce, balance, and code, as well as the full storage.
Note: `code_flag` is a single byte which will determine what the `code`
data will be:
* if 0x00, the account has no code and code is the single byte 0x80,
signifying RLP empty data.
* if 0x01, the account has code, and code stores an arbitrary-length list
of bytes containing the code.
* if 0x02, the account has code, and code stores a 32-byte big-endian
integer which is the hash of the code. The codeβs hash must be
substituted if and only if another account which has a smaller
account entry has the same code.
Note: `storage` is a list of the entire accountβs storage, where the items
are RLP lists of length two β the first item being sha3(key), and the
second item being the storage value. This storage list must be sorted
in ascending order by key-hash.
Note: If `storage` is large enough that the rich account structure would
bring the internal size (see the Validity section) of the chunk to
over `CHUNK_SIZE`, only the prefix of storage that would keep the
internal size of the chunk within `CHUNK_SIZE` will be included. We
will call the unincluded remainder storage'. A new state chunk will
begin with an account entry of the same account, but with storage set
to the prefix of storage which will fit in the chunk, and so on.
"""
@type code_flag :: :no_code | :has_code | :has_repeat_code
@type storage_tuple :: {EVM.hash(), <<_::256>>}
@type t :: %__MODULE__{
nonce: EVM.hash(),
balance: integer(),
code_flag: code_flag(),
code: binary(),
storage: list(storage_tuple())
}
defstruct [
:nonce,
:balance,
:code_flag,
:code,
:storage
]
@spec decode_code_flag(0 | 1 | 2) :: code_flag()
def decode_code_flag(0), do: :no_code
def decode_code_flag(1), do: :has_code
def decode_code_flag(2), do: :has_repeat_code
@spec encode_code_flag(code_flag()) :: 0 | 1 | 2
def encode_code_flag(:no_code), do: 0
def encode_code_flag(:has_code), do: 1
def encode_code_flag(:has_repeat_code), do: 2
end
@type account_entry :: {
EVM.hash(),
RichAccount.t()
}
@type t() :: %__MODULE__{
account_entries: list(account_entry())
}
defstruct account_entries: []
@doc """
Given a `StateChunk`, serializes for transport within a SnapshotData packet.
## Examples
iex> %ExWire.Packet.Capability.Par.SnapshotData.StateChunk{
...> account_entries: [
...> {
...> <<1::256>>,
...> %ExWire.Packet.Capability.Par.SnapshotData.StateChunk.RichAccount{
...> nonce: 2,
...> balance: 3,
...> code_flag: :has_code,
...> code: <<5::256>>,
...> storage: [{<<1::256>>, <<2::256>>}]
...> }
...> }
...> ]
...> }
...> |> ExWire.Packet.Capability.Par.SnapshotData.StateChunk.serialize()
[
[ <<1::256>>,
[
2,
3,
1,
<<5::256>>,
[[<<1::256>>, <<2::256>>]]
]
]
]
"""
@spec serialize(t()) :: ExRLP.t()
def serialize(state_chunk = %__MODULE__{}) do
for {hash, rich_account} <- state_chunk.account_entries do
[
hash,
[
rich_account.nonce,
rich_account.balance,
RichAccount.encode_code_flag(rich_account.code_flag),
rich_account.code,
for {key, val} <- rich_account.storage do
[key, val]
end
]
]
end
end
@doc """
Given an RLP-encoded `StateChunk` from a SnapshotData packet, decodes into a
`StateChunk` struct.
## Examples
iex> [
...> [ <<1::256>>,
...> [
...> 2,
...> 3,
...> 1,
...> <<5::256>>,
...> [[<<1::256>>, <<2::256>>]]
...> ]
...> ]
...> ]
...> |> ExWire.Packet.Capability.Par.SnapshotData.StateChunk.deserialize()
%ExWire.Packet.Capability.Par.SnapshotData.StateChunk{
account_entries: [
{
<<1::256>>,
%ExWire.Packet.Capability.Par.SnapshotData.StateChunk.RichAccount{
nonce: 2,
balance: 3,
code_flag: :has_code,
code: <<5::256>>,
storage: [{<<1::256>>, <<2::256>>}]
}
}
]
}
"""
@spec deserialize(ExRLP.t()) :: t()
def deserialize(rlp) do
account_entries_rlp = rlp
account_entries =
for [hash, rich_account_rlp] <- account_entries_rlp do
[
nonce,
balance,
encoded_code_flag,
code,
storage_rlp
] = rich_account_rlp
storage =
for [key, val] <- storage_rlp do
{key, val}
end
{hash,
%RichAccount{
nonce: Exth.maybe_decode_unsigned(nonce),
balance: Exth.maybe_decode_unsigned(balance),
code_flag:
encoded_code_flag
|> Exth.maybe_decode_unsigned()
|> RichAccount.decode_code_flag(),
code: code,
storage: storage
}}
end
%__MODULE__{
account_entries: account_entries
}
end
end
|
apps/ex_wire/lib/ex_wire/packet/capability/par/snapshot_data/state_chunk.ex
| 0.877135 | 0.798462 |
state_chunk.ex
|
starcoder
|
defmodule AWS.Route53 do
@moduledoc """
Amazon Route 53 is a highly available and scalable Domain Name System (DNS)
web service.
"""
@doc """
Associates an Amazon VPC with a private hosted zone.
<note> To perform the association, the VPC and the private hosted zone must
already exist. Also, you can't convert a public hosted zone into a private
hosted zone.
</note> If you want to associate a VPC that was created by one AWS account
with a private hosted zone that was created by a different account, do one
of the following:
<ul> <li> Use the AWS account that created the private hosted zone to
submit a
[CreateVPCAssociationAuthorization](https://docs.aws.amazon.com/Route53/latest/APIReference/API_CreateVPCAssociationAuthorization.html)
request. Then use the account that created the VPC to submit an
`AssociateVPCWithHostedZone` request.
</li> <li> If a subnet in the VPC was shared with another account, you can
use the account that the subnet was shared with to submit an
`AssociateVPCWithHostedZone` request. For more information about sharing
subnets, see [Working with Shared
VPCs](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-sharing.html).
</li> </ul>
"""
def associate_v_p_c_with_hosted_zone(client, hosted_zone_id, input, options \\ []) do
path_ = "/2013-04-01/hostedzone/#{URI.encode(hosted_zone_id)}/associatevpc"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Creates, changes, or deletes a resource record set, which contains
authoritative DNS information for a specified domain name or subdomain
name. For example, you can use `ChangeResourceRecordSets` to create a
resource record set that routes traffic for test.example.com to a web
server that has an IP address of 192.0.2.44.
**Deleting Resource Record Sets**
To delete a resource record set, you must specify all the same values that
you specified when you created it.
**Change Batches and Transactional Changes**
The request body must include a document with a
`ChangeResourceRecordSetsRequest` element. The request body contains a list
of change items, known as a change batch. Change batches are considered
transactional changes. Route 53 validates the changes in the request and
then either makes all or none of the changes in the change batch request.
This ensures that DNS routing isn't adversely affected by partial changes
to the resource record sets in a hosted zone.
For example, suppose a change batch request contains two changes: it
deletes the `CNAME` resource record set for www.example.com and creates an
alias resource record set for www.example.com. If validation for both
records succeeds, Route 53 deletes the first resource record set and
creates the second resource record set in a single operation. If validation
for either the `DELETE` or the `CREATE` action fails, then the request is
canceled, and the original `CNAME` record continues to exist.
<note> If you try to delete the same resource record set more than once in
a single change batch, Route 53 returns an `InvalidChangeBatch` error.
</note> **Traffic Flow**
To create resource record sets for complex routing configurations, use
either the traffic flow visual editor in the Route 53 console or the API
actions for traffic policies and traffic policy instances. Save the
configuration as a traffic policy, then associate the traffic policy with
one or more domain names (such as example.com) or subdomain names (such as
www.example.com), in the same hosted zone or in multiple hosted zones. You
can roll back the updates if the new configuration isn't performing as
expected. For more information, see [Using Traffic Flow to Route DNS
Traffic](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/traffic-flow.html)
in the *Amazon Route 53 Developer Guide*.
**Create, Delete, and Upsert**
Use `ChangeResourceRecordsSetsRequest` to perform the following actions:
<ul> <li> `CREATE`: Creates a resource record set that has the specified
values.
</li> <li> `DELETE`: Deletes an existing resource record set that has the
specified values.
</li> <li> `UPSERT`: If a resource record set does not already exist, AWS
creates it. If a resource set does exist, Route 53 updates it with the
values in the request.
</li> </ul> **Syntaxes for Creating, Updating, and Deleting Resource Record
Sets**
The syntax for a request depends on the type of resource record set that
you want to create, delete, or update, such as weighted, alias, or
failover. The XML elements in your request must appear in the order listed
in the syntax.
For an example for each type of resource record set, see "Examples."
Don't refer to the syntax in the "Parameter Syntax" section, which includes
all of the elements for every kind of resource record set that you can
create, delete, or update by using `ChangeResourceRecordSets`.
**Change Propagation to Route 53 DNS Servers**
When you submit a `ChangeResourceRecordSets` request, Route 53 propagates
your changes to all of the Route 53 authoritative DNS servers. While your
changes are propagating, `GetChange` returns a status of `PENDING`. When
propagation is complete, `GetChange` returns a status of `INSYNC`. Changes
generally propagate to all Route 53 name servers within 60 seconds. For
more information, see
[GetChange](https://docs.aws.amazon.com/Route53/latest/APIReference/API_GetChange.html).
**Limits on ChangeResourceRecordSets Requests**
For information about the limits on a `ChangeResourceRecordSets` request,
see
[Limits](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html)
in the *Amazon Route 53 Developer Guide*.
"""
def change_resource_record_sets(client, hosted_zone_id, input, options \\ []) do
path_ = "/2013-04-01/hostedzone/#{URI.encode(hosted_zone_id)}/rrset/"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Adds, edits, or deletes tags for a health check or a hosted zone.
For information about using tags for cost allocation, see [Using Cost
Allocation
Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html)
in the *AWS Billing and Cost Management User Guide*.
"""
def change_tags_for_resource(client, resource_id, resource_type, input, options \\ []) do
path_ = "/2013-04-01/tags/#{URI.encode(resource_type)}/#{URI.encode(resource_id)}"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Creates a new health check.
For information about adding health checks to resource record sets, see
[HealthCheckId](https://docs.aws.amazon.com/Route53/latest/APIReference/API_ResourceRecordSet.html#Route53-Type-ResourceRecordSet-HealthCheckId)
in
[ChangeResourceRecordSets](https://docs.aws.amazon.com/Route53/latest/APIReference/API_ChangeResourceRecordSets.html).
**ELB Load Balancers**
If you're registering EC2 instances with an Elastic Load Balancing (ELB)
load balancer, do not create Amazon Route 53 health checks for the EC2
instances. When you register an EC2 instance with a load balancer, you
configure settings for an ELB health check, which performs a similar
function to a Route 53 health check.
**Private Hosted Zones**
You can associate health checks with failover resource record sets in a
private hosted zone. Note the following:
<ul> <li> Route 53 health checkers are outside the VPC. To check the health
of an endpoint within a VPC by IP address, you must assign a public IP
address to the instance in the VPC.
</li> <li> You can configure a health checker to check the health of an
external resource that the instance relies on, such as a database server.
</li> <li> You can create a CloudWatch metric, associate an alarm with the
metric, and then create a health check that is based on the state of the
alarm. For example, you might create a CloudWatch metric that checks the
status of the Amazon EC2 `StatusCheckFailed` metric, add an alarm to the
metric, and then create a health check that is based on the state of the
alarm. For information about creating CloudWatch metrics and alarms by
using the CloudWatch console, see the [Amazon CloudWatch User
Guide](https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/WhatIsCloudWatch.html).
</li> </ul>
"""
def create_health_check(client, input, options \\ []) do
path_ = "/2013-04-01/healthcheck"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} ->
body =
[
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Creates a new public or private hosted zone. You create records in a public
hosted zone to define how you want to route traffic on the internet for a
domain, such as example.com, and its subdomains (apex.example.com,
acme.example.com). You create records in a private hosted zone to define
how you want to route traffic for a domain and its subdomains within one or
more Amazon Virtual Private Clouds (Amazon VPCs).
<important> You can't convert a public hosted zone to a private hosted zone
or vice versa. Instead, you must create a new hosted zone with the same
name and create new resource record sets.
</important> For more information about charges for hosted zones, see
[Amazon Route 53 Pricing](http://aws.amazon.com/route53/pricing/).
Note the following:
<ul> <li> You can't create a hosted zone for a top-level domain (TLD) such
as .com.
</li> <li> For public hosted zones, Route 53 automatically creates a
default SOA record and four NS records for the zone. For more information
about SOA and NS records, see [NS and SOA Records that Route 53 Creates for
a Hosted
Zone](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/SOA-NSrecords.html)
in the *Amazon Route 53 Developer Guide*.
If you want to use the same name servers for multiple public hosted zones,
you can optionally associate a reusable delegation set with the hosted
zone. See the `DelegationSetId` element.
</li> <li> If your domain is registered with a registrar other than Route
53, you must update the name servers with your registrar to make Route 53
the DNS service for the domain. For more information, see [Migrating DNS
Service for an Existing Domain to Amazon Route
53](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/MigratingDNS.html)
in the *Amazon Route 53 Developer Guide*.
</li> </ul> When you submit a `CreateHostedZone` request, the initial
status of the hosted zone is `PENDING`. For public hosted zones, this means
that the NS and SOA records are not yet available on all Route 53 DNS
servers. When the NS and SOA records are available, the status of the zone
changes to `INSYNC`.
"""
def create_hosted_zone(client, input, options \\ []) do
path_ = "/2013-04-01/hostedzone"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} ->
body =
[
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Creates a configuration for DNS query logging. After you create a query
logging configuration, Amazon Route 53 begins to publish log data to an
Amazon CloudWatch Logs log group.
DNS query logs contain information about the queries that Route 53 receives
for a specified public hosted zone, such as the following:
<ul> <li> Route 53 edge location that responded to the DNS query
</li> <li> Domain or subdomain that was requested
</li> <li> DNS record type, such as A or AAAA
</li> <li> DNS response code, such as `NoError` or `ServFail`
</li> </ul> <dl> <dt>Log Group and Resource Policy</dt> <dd> Before you
create a query logging configuration, perform the following operations.
<note> If you create a query logging configuration using the Route 53
console, Route 53 performs these operations automatically.
</note> <ol> <li> Create a CloudWatch Logs log group, and make note of the
ARN, which you specify when you create a query logging configuration. Note
the following:
<ul> <li> You must create the log group in the us-east-1 region.
</li> <li> You must use the same AWS account to create the log group and
the hosted zone that you want to configure query logging for.
</li> <li> When you create log groups for query logging, we recommend that
you use a consistent prefix, for example:
`/aws/route53/*hosted zone name* `
In the next step, you'll create a resource policy, which controls access to
one or more log groups and the associated AWS resources, such as Route 53
hosted zones. There's a limit on the number of resource policies that you
can create, so we recommend that you use a consistent prefix so you can use
the same resource policy for all the log groups that you create for query
logging.
</li> </ul> </li> <li> Create a CloudWatch Logs resource policy, and give
it the permissions that Route 53 needs to create log streams and to send
query logs to log streams. For the value of `Resource`, specify the ARN for
the log group that you created in the previous step. To use the same
resource policy for all the CloudWatch Logs log groups that you created for
query logging configurations, replace the hosted zone name with `*`, for
example:
`arn:aws:logs:us-east-1:123412341234:log-group:/aws/route53/*`
<note> You can't use the CloudWatch console to create or edit a resource
policy. You must use the CloudWatch API, one of the AWS SDKs, or the AWS
CLI.
</note> </li> </ol> </dd> <dt>Log Streams and Edge Locations</dt> <dd> When
Route 53 finishes creating the configuration for DNS query logging, it does
the following:
<ul> <li> Creates a log stream for an edge location the first time that the
edge location responds to DNS queries for the specified hosted zone. That
log stream is used to log all queries that Route 53 responds to for that
edge location.
</li> <li> Begins to send query logs to the applicable log stream.
</li> </ul> The name of each log stream is in the following format:
` *hosted zone ID*/*edge location code* `
The edge location code is a three-letter code and an arbitrarily assigned
number, for example, DFW3. The three-letter code typically corresponds with
the International Air Transport Association airport code for an airport
near the edge location. (These abbreviations might change in the future.)
For a list of edge locations, see "The Route 53 Global Network" on the
[Route 53 Product Details](http://aws.amazon.com/route53/details/) page.
</dd> <dt>Queries That Are Logged</dt> <dd> Query logs contain only the
queries that DNS resolvers forward to Route 53. If a DNS resolver has
already cached the response to a query (such as the IP address for a load
balancer for example.com), the resolver will continue to return the cached
response. It doesn't forward another query to Route 53 until the TTL for
the corresponding resource record set expires. Depending on how many DNS
queries are submitted for a resource record set, and depending on the TTL
for that resource record set, query logs might contain information about
only one query out of every several thousand queries that are submitted to
DNS. For more information about how DNS works, see [Routing Internet
Traffic to Your Website or Web
Application](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/welcome-dns-service.html)
in the *Amazon Route 53 Developer Guide*.
</dd> <dt>Log File Format</dt> <dd> For a list of the values in each query
log and the format of each value, see [Logging DNS
Queries](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/query-logs.html)
in the *Amazon Route 53 Developer Guide*.
</dd> <dt>Pricing</dt> <dd> For information about charges for query logs,
see [Amazon CloudWatch Pricing](http://aws.amazon.com/cloudwatch/pricing/).
</dd> <dt>How to Stop Logging</dt> <dd> If you want Route 53 to stop
sending query logs to CloudWatch Logs, delete the query logging
configuration. For more information, see
[DeleteQueryLoggingConfig](https://docs.aws.amazon.com/Route53/latest/APIReference/API_DeleteQueryLoggingConfig.html).
</dd> </dl>
"""
def create_query_logging_config(client, input, options \\ []) do
path_ = "/2013-04-01/queryloggingconfig"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} ->
body =
[
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Creates a delegation set (a group of four name servers) that can be reused
by multiple hosted zones that were created by the same AWS account.
You can also create a reusable delegation set that uses the four name
servers that are associated with an existing hosted zone. Specify the
hosted zone ID in the `CreateReusableDelegationSet` request.
<note> You can't associate a reusable delegation set with a private hosted
zone.
</note> For information about using a reusable delegation set to configure
white label name servers, see [Configuring White Label Name
Servers](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/white-label-name-servers.html).
The process for migrating existing hosted zones to use a reusable
delegation set is comparable to the process for configuring white label
name servers. You need to perform the following steps:
<ol> <li> Create a reusable delegation set.
</li> <li> Recreate hosted zones, and reduce the TTL to 60 seconds or less.
</li> <li> Recreate resource record sets in the new hosted zones.
</li> <li> Change the registrar's name servers to use the name servers for
the new hosted zones.
</li> <li> Monitor traffic for the website or application.
</li> <li> Change TTLs back to their original values.
</li> </ol> If you want to migrate existing hosted zones to use a reusable
delegation set, the existing hosted zones can't use any of the name servers
that are assigned to the reusable delegation set. If one or more hosted
zones do use one or more name servers that are assigned to the reusable
delegation set, you can do one of the following:
<ul> <li> For small numbers of hosted zonesβup to a few hundredβit's
relatively easy to create reusable delegation sets until you get one that
has four name servers that don't overlap with any of the name servers in
your hosted zones.
</li> <li> For larger numbers of hosted zones, the easiest solution is to
use more than one reusable delegation set.
</li> <li> For larger numbers of hosted zones, you can also migrate hosted
zones that have overlapping name servers to hosted zones that don't have
overlapping name servers, then migrate the hosted zones again to use the
reusable delegation set.
</li> </ul>
"""
def create_reusable_delegation_set(client, input, options \\ []) do
path_ = "/2013-04-01/delegationset"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} ->
body =
[
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Creates a traffic policy, which you use to create multiple DNS resource
record sets for one domain name (such as example.com) or one subdomain name
(such as www.example.com).
"""
def create_traffic_policy(client, input, options \\ []) do
path_ = "/2013-04-01/trafficpolicy"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} ->
body =
[
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Creates resource record sets in a specified hosted zone based on the
settings in a specified traffic policy version. In addition,
`CreateTrafficPolicyInstance` associates the resource record sets with a
specified domain name (such as example.com) or subdomain name (such as
www.example.com). Amazon Route 53 responds to DNS queries for the domain or
subdomain name by using the resource record sets that
`CreateTrafficPolicyInstance` created.
"""
def create_traffic_policy_instance(client, input, options \\ []) do
path_ = "/2013-04-01/trafficpolicyinstance"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} ->
body =
[
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Creates a new version of an existing traffic policy. When you create a new
version of a traffic policy, you specify the ID of the traffic policy that
you want to update and a JSON-formatted document that describes the new
version. You use traffic policies to create multiple DNS resource record
sets for one domain name (such as example.com) or one subdomain name (such
as www.example.com). You can create a maximum of 1000 versions of a traffic
policy. If you reach the limit and need to create another version, you'll
need to start a new traffic policy.
"""
def create_traffic_policy_version(client, id, input, options \\ []) do
path_ = "/2013-04-01/trafficpolicy/#{URI.encode(id)}"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} ->
body =
[
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Authorizes the AWS account that created a specified VPC to submit an
`AssociateVPCWithHostedZone` request to associate the VPC with a specified
hosted zone that was created by a different account. To submit a
`CreateVPCAssociationAuthorization` request, you must use the account that
created the hosted zone. After you authorize the association, use the
account that created the VPC to submit an `AssociateVPCWithHostedZone`
request.
<note> If you want to associate multiple VPCs that you created by using one
account with a hosted zone that you created by using a different account,
you must submit one authorization request for each VPC.
</note>
"""
def create_v_p_c_association_authorization(client, hosted_zone_id, input, options \\ []) do
path_ = "/2013-04-01/hostedzone/#{URI.encode(hosted_zone_id)}/authorizevpcassociation"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Deletes a health check.
<important> Amazon Route 53 does not prevent you from deleting a health
check even if the health check is associated with one or more resource
record sets. If you delete a health check and you don't update the
associated resource record sets, the future status of the health check
can't be predicted and may change. This will affect the routing of DNS
queries for your DNS failover configuration. For more information, see
[Replacing and Deleting Health
Checks](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/health-checks-creating-deleting.html#health-checks-deleting.html)
in the *Amazon Route 53 Developer Guide*.
</important> If you're using AWS Cloud Map and you configured Cloud Map to
create a Route 53 health check when you register an instance, you can't use
the Route 53 `DeleteHealthCheck` command to delete the health check. The
health check is deleted automatically when you deregister the instance;
there can be a delay of several hours before the health check is deleted
from Route 53.
"""
def delete_health_check(client, health_check_id, input, options \\ []) do
path_ = "/2013-04-01/healthcheck/#{URI.encode(health_check_id)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Deletes a hosted zone.
If the hosted zone was created by another service, such as AWS Cloud Map,
see [Deleting Public Hosted Zones That Were Created by Another
Service](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DeleteHostedZone.html#delete-public-hosted-zone-created-by-another-service)
in the *Amazon Route 53 Developer Guide* for information about how to
delete it. (The process is the same for public and private hosted zones
that were created by another service.)
If you want to keep your domain registration but you want to stop routing
internet traffic to your website or web application, we recommend that you
delete resource record sets in the hosted zone instead of deleting the
hosted zone.
<important> If you delete a hosted zone, you can't undelete it. You must
create a new hosted zone and update the name servers for your domain
registration, which can require up to 48 hours to take effect. (If you
delegated responsibility for a subdomain to a hosted zone and you delete
the child hosted zone, you must update the name servers in the parent
hosted zone.) In addition, if you delete a hosted zone, someone could
hijack the domain and route traffic to their own resources using your
domain name.
</important> If you want to avoid the monthly charge for the hosted zone,
you can transfer DNS service for the domain to a free DNS service. When you
transfer DNS service, you have to update the name servers for the domain
registration. If the domain is registered with Route 53, see
[UpdateDomainNameservers](https://docs.aws.amazon.com/Route53/latest/APIReference/API_domains_UpdateDomainNameservers.html)
for information about how to replace Route 53 name servers with name
servers for the new DNS service. If the domain is registered with another
registrar, use the method provided by the registrar to update name servers
for the domain registration. For more information, perform an internet
search on "free DNS service."
You can delete a hosted zone only if it contains only the default SOA
record and NS resource record sets. If the hosted zone contains other
resource record sets, you must delete them before you can delete the hosted
zone. If you try to delete a hosted zone that contains other resource
record sets, the request fails, and Route 53 returns a `HostedZoneNotEmpty`
error. For information about deleting records from your hosted zone, see
[ChangeResourceRecordSets](https://docs.aws.amazon.com/Route53/latest/APIReference/API_ChangeResourceRecordSets.html).
To verify that the hosted zone has been deleted, do one of the following:
<ul> <li> Use the `GetHostedZone` action to request information about the
hosted zone.
</li> <li> Use the `ListHostedZones` action to get a list of the hosted
zones associated with the current AWS account.
</li> </ul>
"""
def delete_hosted_zone(client, id, input, options \\ []) do
path_ = "/2013-04-01/hostedzone/#{URI.encode(id)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Deletes a configuration for DNS query logging. If you delete a
configuration, Amazon Route 53 stops sending query logs to CloudWatch Logs.
Route 53 doesn't delete any logs that are already in CloudWatch Logs.
For more information about DNS query logs, see
[CreateQueryLoggingConfig](https://docs.aws.amazon.com/Route53/latest/APIReference/API_CreateQueryLoggingConfig.html).
"""
def delete_query_logging_config(client, id, input, options \\ []) do
path_ = "/2013-04-01/queryloggingconfig/#{URI.encode(id)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Deletes a reusable delegation set.
<important> You can delete a reusable delegation set only if it isn't
associated with any hosted zones.
</important> To verify that the reusable delegation set is not associated
with any hosted zones, submit a
[GetReusableDelegationSet](https://docs.aws.amazon.com/Route53/latest/APIReference/API_GetReusableDelegationSet.html)
request and specify the ID of the reusable delegation set that you want to
delete.
"""
def delete_reusable_delegation_set(client, id, input, options \\ []) do
path_ = "/2013-04-01/delegationset/#{URI.encode(id)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Deletes a traffic policy.
"""
def delete_traffic_policy(client, id, version, input, options \\ []) do
path_ = "/2013-04-01/trafficpolicy/#{URI.encode(id)}/#{URI.encode(version)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Deletes a traffic policy instance and all of the resource record sets that
Amazon Route 53 created when you created the instance.
<note> In the Route 53 console, traffic policy instances are known as
policy records.
</note>
"""
def delete_traffic_policy_instance(client, id, input, options \\ []) do
path_ = "/2013-04-01/trafficpolicyinstance/#{URI.encode(id)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Removes authorization to submit an `AssociateVPCWithHostedZone` request to
associate a specified VPC with a hosted zone that was created by a
different account. You must use the account that created the hosted zone to
submit a `DeleteVPCAssociationAuthorization` request.
<important> Sending this request only prevents the AWS account that created
the VPC from associating the VPC with the Amazon Route 53 hosted zone in
the future. If the VPC is already associated with the hosted zone,
`DeleteVPCAssociationAuthorization` won't disassociate the VPC from the
hosted zone. If you want to delete an existing association, use
`DisassociateVPCFromHostedZone`.
</important>
"""
def delete_v_p_c_association_authorization(client, hosted_zone_id, input, options \\ []) do
path_ = "/2013-04-01/hostedzone/#{URI.encode(hosted_zone_id)}/deauthorizevpcassociation"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Disassociates an Amazon Virtual Private Cloud (Amazon VPC) from an Amazon
Route 53 private hosted zone. Note the following:
<ul> <li> You can't disassociate the last Amazon VPC from a private hosted
zone.
</li> <li> You can't convert a private hosted zone into a public hosted
zone.
</li> <li> You can submit a `DisassociateVPCFromHostedZone` request using
either the account that created the hosted zone or the account that created
the Amazon VPC.
</li> <li> Some services, such as AWS Cloud Map and Amazon Elastic File
System (Amazon EFS) automatically create hosted zones and associate VPCs
with the hosted zones. A service can create a hosted zone using your
account or using its own account. You can disassociate a VPC from a hosted
zone only if the service created the hosted zone using your account.
When you run
[DisassociateVPCFromHostedZone](https://docs.aws.amazon.com/Route53/latest/APIReference/API_ListHostedZonesByVPC.html),
if the hosted zone has a value for `OwningAccount`, you can use
`DisassociateVPCFromHostedZone`. If the hosted zone has a value for
`OwningService`, you can't use `DisassociateVPCFromHostedZone`.
</li> </ul>
"""
def disassociate_v_p_c_from_hosted_zone(client, hosted_zone_id, input, options \\ []) do
path_ = "/2013-04-01/hostedzone/#{URI.encode(hosted_zone_id)}/disassociatevpc"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Gets the specified limit for the current account, for example, the maximum
number of health checks that you can create using the account.
For the default limit, see
[Limits](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html)
in the *Amazon Route 53 Developer Guide*. To request a higher limit, [open
a
case](https://console.aws.amazon.com/support/home#/case/create?issueType=service-limit-increase&limitType=service-code-route53).
<note> You can also view account limits in AWS Trusted Advisor. Sign in to
the AWS Management Console and open the Trusted Advisor console at
[https://console.aws.amazon.com/trustedadvisor/](https://console.aws.amazon.com/trustedadvisor).
Then choose **Service limits** in the navigation pane.
</note>
"""
def get_account_limit(client, type, options \\ []) do
path_ = "/2013-04-01/accountlimit/#{URI.encode(type)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns the current status of a change batch request. The status is one of
the following values:
<ul> <li> `PENDING` indicates that the changes in this request have not
propagated to all Amazon Route 53 DNS servers. This is the initial status
of all change batch requests.
</li> <li> `INSYNC` indicates that the changes have propagated to all Route
53 DNS servers.
</li> </ul>
"""
def get_change(client, id, options \\ []) do
path_ = "/2013-04-01/change/#{URI.encode(id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
<important> `GetCheckerIpRanges` still works, but we recommend that you
download ip-ranges.json, which includes IP address ranges for all AWS
services. For more information, see [IP Address Ranges of Amazon Route 53
Servers](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/route-53-ip-addresses.html)
in the *Amazon Route 53 Developer Guide*.
</important>
"""
def get_checker_ip_ranges(client, options \\ []) do
path_ = "/2013-04-01/checkeripranges"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets information about whether a specified geographic location is supported
for Amazon Route 53 geolocation resource record sets.
Use the following syntax to determine whether a continent is supported for
geolocation:
`GET /2013-04-01/geolocation?continentcode=*two-letter abbreviation for a
continent* `
Use the following syntax to determine whether a country is supported for
geolocation:
`GET /2013-04-01/geolocation?countrycode=*two-character country code* `
Use the following syntax to determine whether a subdivision of a country is
supported for geolocation:
`GET /2013-04-01/geolocation?countrycode=*two-character country
code*&subdivisioncode=*subdivision code* `
"""
def get_geo_location(client, continent_code \\ nil, country_code \\ nil, subdivision_code \\ nil, options \\ []) do
path_ = "/2013-04-01/geolocation"
headers = []
query_ = []
query_ = if !is_nil(subdivision_code) do
[{"subdivisioncode", subdivision_code} | query_]
else
query_
end
query_ = if !is_nil(country_code) do
[{"countrycode", country_code} | query_]
else
query_
end
query_ = if !is_nil(continent_code) do
[{"continentcode", continent_code} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets information about a specified health check.
"""
def get_health_check(client, health_check_id, options \\ []) do
path_ = "/2013-04-01/healthcheck/#{URI.encode(health_check_id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieves the number of health checks that are associated with the current
AWS account.
"""
def get_health_check_count(client, options \\ []) do
path_ = "/2013-04-01/healthcheckcount"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets the reason that a specified health check failed most recently.
"""
def get_health_check_last_failure_reason(client, health_check_id, options \\ []) do
path_ = "/2013-04-01/healthcheck/#{URI.encode(health_check_id)}/lastfailurereason"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets status of a specified health check.
"""
def get_health_check_status(client, health_check_id, options \\ []) do
path_ = "/2013-04-01/healthcheck/#{URI.encode(health_check_id)}/status"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets information about a specified hosted zone including the four name
servers assigned to the hosted zone.
"""
def get_hosted_zone(client, id, options \\ []) do
path_ = "/2013-04-01/hostedzone/#{URI.encode(id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieves the number of hosted zones that are associated with the current
AWS account.
"""
def get_hosted_zone_count(client, options \\ []) do
path_ = "/2013-04-01/hostedzonecount"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets the specified limit for a specified hosted zone, for example, the
maximum number of records that you can create in the hosted zone.
For the default limit, see
[Limits](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html)
in the *Amazon Route 53 Developer Guide*. To request a higher limit, [open
a
case](https://console.aws.amazon.com/support/home#/case/create?issueType=service-limit-increase&limitType=service-code-route53).
"""
def get_hosted_zone_limit(client, hosted_zone_id, type, options \\ []) do
path_ = "/2013-04-01/hostedzonelimit/#{URI.encode(hosted_zone_id)}/#{URI.encode(type)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets information about a specified configuration for DNS query logging.
For more information about DNS query logs, see
[CreateQueryLoggingConfig](https://docs.aws.amazon.com/Route53/latest/APIReference/API_CreateQueryLoggingConfig.html)
and [Logging DNS
Queries](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/query-logs.html).
"""
def get_query_logging_config(client, id, options \\ []) do
path_ = "/2013-04-01/queryloggingconfig/#{URI.encode(id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieves information about a specified reusable delegation set, including
the four name servers that are assigned to the delegation set.
"""
def get_reusable_delegation_set(client, id, options \\ []) do
path_ = "/2013-04-01/delegationset/#{URI.encode(id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets the maximum number of hosted zones that you can associate with the
specified reusable delegation set.
For the default limit, see
[Limits](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html)
in the *Amazon Route 53 Developer Guide*. To request a higher limit, [open
a
case](https://console.aws.amazon.com/support/home#/case/create?issueType=service-limit-increase&limitType=service-code-route53).
"""
def get_reusable_delegation_set_limit(client, delegation_set_id, type, options \\ []) do
path_ = "/2013-04-01/reusabledelegationsetlimit/#{URI.encode(delegation_set_id)}/#{URI.encode(type)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets information about a specific traffic policy version.
"""
def get_traffic_policy(client, id, version, options \\ []) do
path_ = "/2013-04-01/trafficpolicy/#{URI.encode(id)}/#{URI.encode(version)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets information about a specified traffic policy instance.
<note> After you submit a `CreateTrafficPolicyInstance` or an
`UpdateTrafficPolicyInstance` request, there's a brief delay while Amazon
Route 53 creates the resource record sets that are specified in the traffic
policy definition. For more information, see the `State` response element.
</note> <note> In the Route 53 console, traffic policy instances are known
as policy records.
</note>
"""
def get_traffic_policy_instance(client, id, options \\ []) do
path_ = "/2013-04-01/trafficpolicyinstance/#{URI.encode(id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets the number of traffic policy instances that are associated with the
current AWS account.
"""
def get_traffic_policy_instance_count(client, options \\ []) do
path_ = "/2013-04-01/trafficpolicyinstancecount"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieves a list of supported geographic locations.
Countries are listed first, and continents are listed last. If Amazon Route
53 supports subdivisions for a country (for example, states or provinces),
the subdivisions for that country are listed in alphabetical order
immediately after the corresponding country.
For a list of supported geolocation codes, see the
[GeoLocation](https://docs.aws.amazon.com/Route53/latest/APIReference/API_GeoLocation.html)
data type.
"""
def list_geo_locations(client, max_items \\ nil, start_continent_code \\ nil, start_country_code \\ nil, start_subdivision_code \\ nil, options \\ []) do
path_ = "/2013-04-01/geolocations"
headers = []
query_ = []
query_ = if !is_nil(start_subdivision_code) do
[{"startsubdivisioncode", start_subdivision_code} | query_]
else
query_
end
query_ = if !is_nil(start_country_code) do
[{"startcountrycode", start_country_code} | query_]
else
query_
end
query_ = if !is_nil(start_continent_code) do
[{"startcontinentcode", start_continent_code} | query_]
else
query_
end
query_ = if !is_nil(max_items) do
[{"maxitems", max_items} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieve a list of the health checks that are associated with the current
AWS account.
"""
def list_health_checks(client, marker \\ nil, max_items \\ nil, options \\ []) do
path_ = "/2013-04-01/healthcheck"
headers = []
query_ = []
query_ = if !is_nil(max_items) do
[{"maxitems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieves a list of the public and private hosted zones that are associated
with the current AWS account. The response includes a `HostedZones` child
element for each hosted zone.
Amazon Route 53 returns a maximum of 100 items in each response. If you
have a lot of hosted zones, you can use the `maxitems` parameter to list
them in groups of up to 100.
"""
def list_hosted_zones(client, delegation_set_id \\ nil, marker \\ nil, max_items \\ nil, options \\ []) do
path_ = "/2013-04-01/hostedzone"
headers = []
query_ = []
query_ = if !is_nil(max_items) do
[{"maxitems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"marker", marker} | query_]
else
query_
end
query_ = if !is_nil(delegation_set_id) do
[{"delegationsetid", delegation_set_id} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieves a list of your hosted zones in lexicographic order. The response
includes a `HostedZones` child element for each hosted zone created by the
current AWS account.
`ListHostedZonesByName` sorts hosted zones by name with the labels
reversed. For example:
`com.example.www.`
Note the trailing dot, which can change the sort order in some
circumstances.
If the domain name includes escape characters or Punycode,
`ListHostedZonesByName` alphabetizes the domain name using the escaped or
Punycoded value, which is the format that Amazon Route 53 saves in its
database. For example, to create a hosted zone for exΓ€mple.com, you specify
ex\344mple.com for the domain name. `ListHostedZonesByName` alphabetizes it
as:
`com.ex\344mple.`
The labels are reversed and alphabetized using the escaped value. For more
information about valid domain name formats, including internationalized
domain names, see [DNS Domain Name
Format](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DomainNameFormat.html)
in the *Amazon Route 53 Developer Guide*.
Route 53 returns up to 100 items in each response. If you have a lot of
hosted zones, use the `MaxItems` parameter to list them in groups of up to
100. The response includes values that help navigate from one group of
`MaxItems` hosted zones to the next:
<ul> <li> The `DNSName` and `HostedZoneId` elements in the response contain
the values, if any, specified for the `dnsname` and `hostedzoneid`
parameters in the request that produced the current response.
</li> <li> The `MaxItems` element in the response contains the value, if
any, that you specified for the `maxitems` parameter in the request that
produced the current response.
</li> <li> If the value of `IsTruncated` in the response is true, there are
more hosted zones associated with the current AWS account.
If `IsTruncated` is false, this response includes the last hosted zone that
is associated with the current account. The `NextDNSName` element and
`NextHostedZoneId` elements are omitted from the response.
</li> <li> The `NextDNSName` and `NextHostedZoneId` elements in the
response contain the domain name and the hosted zone ID of the next hosted
zone that is associated with the current AWS account. If you want to list
more hosted zones, make another call to `ListHostedZonesByName`, and
specify the value of `NextDNSName` and `NextHostedZoneId` in the `dnsname`
and `hostedzoneid` parameters, respectively.
</li> </ul>
"""
def list_hosted_zones_by_name(client, d_n_s_name \\ nil, hosted_zone_id \\ nil, max_items \\ nil, options \\ []) do
path_ = "/2013-04-01/hostedzonesbyname"
headers = []
query_ = []
query_ = if !is_nil(max_items) do
[{"maxitems", max_items} | query_]
else
query_
end
query_ = if !is_nil(hosted_zone_id) do
[{"hostedzoneid", hosted_zone_id} | query_]
else
query_
end
query_ = if !is_nil(d_n_s_name) do
[{"dnsname", d_n_s_name} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Lists all the private hosted zones that a specified VPC is associated with,
regardless of which AWS account or AWS service owns the hosted zones. The
`HostedZoneOwner` structure in the response contains one of the following
values:
<ul> <li> An `OwningAccount` element, which contains the account number of
either the current AWS account or another AWS account. Some services, such
as AWS Cloud Map, create hosted zones using the current account.
</li> <li> An `OwningService` element, which identifies the AWS service
that created and owns the hosted zone. For example, if a hosted zone was
created by Amazon Elastic File System (Amazon EFS), the value of `Owner` is
`efs.amazonaws.com`.
</li> </ul>
"""
def list_hosted_zones_by_v_p_c(client, max_items \\ nil, next_token \\ nil, v_p_c_id, v_p_c_region, options \\ []) do
path_ = "/2013-04-01/hostedzonesbyvpc"
headers = []
query_ = []
query_ = if !is_nil(v_p_c_region) do
[{"vpcregion", v_p_c_region} | query_]
else
query_
end
query_ = if !is_nil(v_p_c_id) do
[{"vpcid", v_p_c_id} | query_]
else
query_
end
query_ = if !is_nil(next_token) do
[{"nexttoken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_items) do
[{"maxitems", max_items} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Lists the configurations for DNS query logging that are associated with the
current AWS account or the configuration that is associated with a
specified hosted zone.
For more information about DNS query logs, see
[CreateQueryLoggingConfig](https://docs.aws.amazon.com/Route53/latest/APIReference/API_CreateQueryLoggingConfig.html).
Additional information, including the format of DNS query logs, appears in
[Logging DNS
Queries](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/query-logs.html)
in the *Amazon Route 53 Developer Guide*.
"""
def list_query_logging_configs(client, hosted_zone_id \\ nil, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/2013-04-01/queryloggingconfig"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nexttoken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxresults", max_results} | query_]
else
query_
end
query_ = if !is_nil(hosted_zone_id) do
[{"hostedzoneid", hosted_zone_id} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Lists the resource record sets in a specified hosted zone.
`ListResourceRecordSets` returns up to 100 resource record sets at a time
in ASCII order, beginning at a position specified by the `name` and `type`
elements.
**Sort order**
`ListResourceRecordSets` sorts results first by DNS name with the labels
reversed, for example:
`com.example.www.`
Note the trailing dot, which can change the sort order when the record name
contains characters that appear before `.` (decimal 46) in the ASCII table.
These characters include the following: `! " # $ % & ' ( ) * + , -`
When multiple records have the same DNS name, `ListResourceRecordSets`
sorts results by the record type.
**Specifying where to start listing records**
You can use the name and type elements to specify the resource record set
that the list begins with:
<dl> <dt>If you do not specify Name or Type</dt> <dd> The results begin
with the first resource record set that the hosted zone contains.
</dd> <dt>If you specify Name but not Type</dt> <dd> The results begin with
the first resource record set in the list whose name is greater than or
equal to `Name`.
</dd> <dt>If you specify Type but not Name</dt> <dd> Amazon Route 53
returns the `InvalidInput` error.
</dd> <dt>If you specify both Name and Type</dt> <dd> The results begin
with the first resource record set in the list whose name is greater than
or equal to `Name`, and whose type is greater than or equal to `Type`.
</dd> </dl> **Resource record sets that are PENDING**
This action returns the most current version of the records. This includes
records that are `PENDING`, and that are not yet available on all Route 53
DNS servers.
**Changing resource record sets**
To ensure that you get an accurate listing of the resource record sets for
a hosted zone at a point in time, do not submit a
`ChangeResourceRecordSets` request while you're paging through the results
of a `ListResourceRecordSets` request. If you do, some pages may display
results without the latest changes while other pages display results with
the latest changes.
**Displaying the next page of results**
If a `ListResourceRecordSets` command returns more than one page of
results, the value of `IsTruncated` is `true`. To display the next page of
results, get the values of `NextRecordName`, `NextRecordType`, and
`NextRecordIdentifier` (if any) from the response. Then submit another
`ListResourceRecordSets` request, and specify those values for
`StartRecordName`, `StartRecordType`, and `StartRecordIdentifier`.
"""
def list_resource_record_sets(client, hosted_zone_id, max_items \\ nil, start_record_identifier \\ nil, start_record_name \\ nil, start_record_type \\ nil, options \\ []) do
path_ = "/2013-04-01/hostedzone/#{URI.encode(hosted_zone_id)}/rrset"
headers = []
query_ = []
query_ = if !is_nil(start_record_type) do
[{"type", start_record_type} | query_]
else
query_
end
query_ = if !is_nil(start_record_name) do
[{"name", start_record_name} | query_]
else
query_
end
query_ = if !is_nil(start_record_identifier) do
[{"identifier", start_record_identifier} | query_]
else
query_
end
query_ = if !is_nil(max_items) do
[{"maxitems", max_items} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieves a list of the reusable delegation sets that are associated with
the current AWS account.
"""
def list_reusable_delegation_sets(client, marker \\ nil, max_items \\ nil, options \\ []) do
path_ = "/2013-04-01/delegationset"
headers = []
query_ = []
query_ = if !is_nil(max_items) do
[{"maxitems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Lists tags for one health check or hosted zone.
For information about using tags for cost allocation, see [Using Cost
Allocation
Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html)
in the *AWS Billing and Cost Management User Guide*.
"""
def list_tags_for_resource(client, resource_id, resource_type, options \\ []) do
path_ = "/2013-04-01/tags/#{URI.encode(resource_type)}/#{URI.encode(resource_id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Lists tags for up to 10 health checks or hosted zones.
For information about using tags for cost allocation, see [Using Cost
Allocation
Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html)
in the *AWS Billing and Cost Management User Guide*.
"""
def list_tags_for_resources(client, resource_type, input, options \\ []) do
path_ = "/2013-04-01/tags/#{URI.encode(resource_type)}"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Gets information about the latest version for every traffic policy that is
associated with the current AWS account. Policies are listed in the order
that they were created in.
"""
def list_traffic_policies(client, max_items \\ nil, traffic_policy_id_marker \\ nil, options \\ []) do
path_ = "/2013-04-01/trafficpolicies"
headers = []
query_ = []
query_ = if !is_nil(traffic_policy_id_marker) do
[{"trafficpolicyid", traffic_policy_id_marker} | query_]
else
query_
end
query_ = if !is_nil(max_items) do
[{"maxitems", max_items} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets information about the traffic policy instances that you created by
using the current AWS account.
<note> After you submit an `UpdateTrafficPolicyInstance` request, there's a
brief delay while Amazon Route 53 creates the resource record sets that are
specified in the traffic policy definition. For more information, see the
`State` response element.
</note> Route 53 returns a maximum of 100 items in each response. If you
have a lot of traffic policy instances, you can use the `MaxItems`
parameter to list them in groups of up to 100.
"""
def list_traffic_policy_instances(client, hosted_zone_id_marker \\ nil, max_items \\ nil, traffic_policy_instance_name_marker \\ nil, traffic_policy_instance_type_marker \\ nil, options \\ []) do
path_ = "/2013-04-01/trafficpolicyinstances"
headers = []
query_ = []
query_ = if !is_nil(traffic_policy_instance_type_marker) do
[{"trafficpolicyinstancetype", traffic_policy_instance_type_marker} | query_]
else
query_
end
query_ = if !is_nil(traffic_policy_instance_name_marker) do
[{"trafficpolicyinstancename", traffic_policy_instance_name_marker} | query_]
else
query_
end
query_ = if !is_nil(max_items) do
[{"maxitems", max_items} | query_]
else
query_
end
query_ = if !is_nil(hosted_zone_id_marker) do
[{"hostedzoneid", hosted_zone_id_marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets information about the traffic policy instances that you created in a
specified hosted zone.
<note> After you submit a `CreateTrafficPolicyInstance` or an
`UpdateTrafficPolicyInstance` request, there's a brief delay while Amazon
Route 53 creates the resource record sets that are specified in the traffic
policy definition. For more information, see the `State` response element.
</note> Route 53 returns a maximum of 100 items in each response. If you
have a lot of traffic policy instances, you can use the `MaxItems`
parameter to list them in groups of up to 100.
"""
def list_traffic_policy_instances_by_hosted_zone(client, hosted_zone_id, max_items \\ nil, traffic_policy_instance_name_marker \\ nil, traffic_policy_instance_type_marker \\ nil, options \\ []) do
path_ = "/2013-04-01/trafficpolicyinstances/hostedzone"
headers = []
query_ = []
query_ = if !is_nil(traffic_policy_instance_type_marker) do
[{"trafficpolicyinstancetype", traffic_policy_instance_type_marker} | query_]
else
query_
end
query_ = if !is_nil(traffic_policy_instance_name_marker) do
[{"trafficpolicyinstancename", traffic_policy_instance_name_marker} | query_]
else
query_
end
query_ = if !is_nil(max_items) do
[{"maxitems", max_items} | query_]
else
query_
end
query_ = if !is_nil(hosted_zone_id) do
[{"id", hosted_zone_id} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets information about the traffic policy instances that you created by
using a specify traffic policy version.
<note> After you submit a `CreateTrafficPolicyInstance` or an
`UpdateTrafficPolicyInstance` request, there's a brief delay while Amazon
Route 53 creates the resource record sets that are specified in the traffic
policy definition. For more information, see the `State` response element.
</note> Route 53 returns a maximum of 100 items in each response. If you
have a lot of traffic policy instances, you can use the `MaxItems`
parameter to list them in groups of up to 100.
"""
def list_traffic_policy_instances_by_policy(client, hosted_zone_id_marker \\ nil, max_items \\ nil, traffic_policy_id, traffic_policy_instance_name_marker \\ nil, traffic_policy_instance_type_marker \\ nil, traffic_policy_version, options \\ []) do
path_ = "/2013-04-01/trafficpolicyinstances/trafficpolicy"
headers = []
query_ = []
query_ = if !is_nil(traffic_policy_version) do
[{"version", traffic_policy_version} | query_]
else
query_
end
query_ = if !is_nil(traffic_policy_instance_type_marker) do
[{"trafficpolicyinstancetype", traffic_policy_instance_type_marker} | query_]
else
query_
end
query_ = if !is_nil(traffic_policy_instance_name_marker) do
[{"trafficpolicyinstancename", traffic_policy_instance_name_marker} | query_]
else
query_
end
query_ = if !is_nil(traffic_policy_id) do
[{"id", traffic_policy_id} | query_]
else
query_
end
query_ = if !is_nil(max_items) do
[{"maxitems", max_items} | query_]
else
query_
end
query_ = if !is_nil(hosted_zone_id_marker) do
[{"hostedzoneid", hosted_zone_id_marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets information about all of the versions for a specified traffic policy.
Traffic policy versions are listed in numerical order by `VersionNumber`.
"""
def list_traffic_policy_versions(client, id, max_items \\ nil, traffic_policy_version_marker \\ nil, options \\ []) do
path_ = "/2013-04-01/trafficpolicies/#{URI.encode(id)}/versions"
headers = []
query_ = []
query_ = if !is_nil(traffic_policy_version_marker) do
[{"trafficpolicyversion", traffic_policy_version_marker} | query_]
else
query_
end
query_ = if !is_nil(max_items) do
[{"maxitems", max_items} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets a list of the VPCs that were created by other accounts and that can be
associated with a specified hosted zone because you've submitted one or
more `CreateVPCAssociationAuthorization` requests.
The response includes a `VPCs` element with a `VPC` child element for each
VPC that can be associated with the hosted zone.
"""
def list_v_p_c_association_authorizations(client, hosted_zone_id, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/2013-04-01/hostedzone/#{URI.encode(hosted_zone_id)}/authorizevpcassociation"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nexttoken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxresults", max_results} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets the value that Amazon Route 53 returns in response to a DNS request
for a specified record name and type. You can optionally specify the IP
address of a DNS resolver, an EDNS0 client subnet IP address, and a subnet
mask.
"""
def test_d_n_s_answer(client, e_d_n_s0_client_subnet_i_p \\ nil, e_d_n_s0_client_subnet_mask \\ nil, hosted_zone_id, record_name, record_type, resolver_i_p \\ nil, options \\ []) do
path_ = "/2013-04-01/testdnsanswer"
headers = []
query_ = []
query_ = if !is_nil(resolver_i_p) do
[{"resolverip", resolver_i_p} | query_]
else
query_
end
query_ = if !is_nil(record_type) do
[{"recordtype", record_type} | query_]
else
query_
end
query_ = if !is_nil(record_name) do
[{"recordname", record_name} | query_]
else
query_
end
query_ = if !is_nil(hosted_zone_id) do
[{"hostedzoneid", hosted_zone_id} | query_]
else
query_
end
query_ = if !is_nil(e_d_n_s0_client_subnet_mask) do
[{"edns0clientsubnetmask", e_d_n_s0_client_subnet_mask} | query_]
else
query_
end
query_ = if !is_nil(e_d_n_s0_client_subnet_i_p) do
[{"edns0clientsubnetip", e_d_n_s0_client_subnet_i_p} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Updates an existing health check. Note that some values can't be updated.
For more information about updating health checks, see [Creating, Updating,
and Deleting Health
Checks](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/health-checks-creating-deleting.html)
in the *Amazon Route 53 Developer Guide*.
"""
def update_health_check(client, health_check_id, input, options \\ []) do
path_ = "/2013-04-01/healthcheck/#{URI.encode(health_check_id)}"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Updates the comment for a specified hosted zone.
"""
def update_hosted_zone_comment(client, id, input, options \\ []) do
path_ = "/2013-04-01/hostedzone/#{URI.encode(id)}"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Updates the comment for a specified traffic policy version.
"""
def update_traffic_policy_comment(client, id, version, input, options \\ []) do
path_ = "/2013-04-01/trafficpolicy/#{URI.encode(id)}/#{URI.encode(version)}"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Updates the resource record sets in a specified hosted zone that were
created based on the settings in a specified traffic policy version.
When you update a traffic policy instance, Amazon Route 53 continues to
respond to DNS queries for the root resource record set name (such as
example.com) while it replaces one group of resource record sets with
another. Route 53 performs the following operations:
<ol> <li> Route 53 creates a new group of resource record sets based on the
specified traffic policy. This is true regardless of how significant the
differences are between the existing resource record sets and the new
resource record sets.
</li> <li> When all of the new resource record sets have been created,
Route 53 starts to respond to DNS queries for the root resource record set
name (such as example.com) by using the new resource record sets.
</li> <li> Route 53 deletes the old group of resource record sets that are
associated with the root resource record set name.
</li> </ol>
"""
def update_traffic_policy_instance(client, id, input, options \\ []) do
path_ = "/2013-04-01/trafficpolicyinstance/#{URI.encode(id)}"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@spec request(AWS.Client.t(), binary(), binary(), list(), list(), map(), list(), pos_integer()) ::
{:ok, Poison.Parser.t(), Poison.Response.t()}
| {:error, Poison.Parser.t()}
| {:error, HTTPoison.Error.t()}
defp request(client, method, path, query, headers, input, options, success_status_code) do
client = %{client | service: "route53",
region: "us-east-1"}
host = build_host("route53", client)
url = host
|> build_url(path, client)
|> add_query(query)
additional_headers = [{"Host", host}, {"Content-Type", "text/xml"}]
headers = AWS.Request.add_headers(additional_headers, headers)
payload = encode_payload(input)
headers = AWS.Request.sign_v4(client, method, url, headers, payload)
perform_request(method, url, payload, headers, options, success_status_code)
end
defp perform_request(method, url, payload, headers, options, nil) do
case HTTPoison.request(method, url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} ->
{:ok, response}
{:ok, %HTTPoison.Response{status_code: status_code, body: body} = response}
when status_code == 200 or status_code == 202 or status_code == 204 ->
{:ok, AWS.Util.decode_xml(body), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = AWS.Util.decode_xml(body)
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp perform_request(method, url, payload, headers, options, success_status_code) do
case HTTPoison.request(method, url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: ^success_status_code, body: ""} = response} ->
{:ok, %{}, response}
{:ok, %HTTPoison.Response{status_code: ^success_status_code, body: body} = response} ->
{:ok, AWS.Util.decode_xml(body), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = AWS.Util.decode_xml(body)
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{endpoint: endpoint}) do
"#{endpoint_prefix}.#{endpoint}"
end
defp build_url(host, path, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}#{path}"
end
defp add_query(url, []) do
url
end
defp add_query(url, query) do
querystring = AWS.Util.encode_query(query)
"#{url}?#{querystring}"
end
defp encode_payload(input) do
if input != nil, do: AWS.Util.encode_xml(input), else: ""
end
end
|
lib/aws/route53.ex
| 0.899583 | 0.471892 |
route53.ex
|
starcoder
|
defmodule OT.Server.Impl do
@moduledoc """
Implements the business logic of interacting with data in an OT system.
"""
@adapter Application.get_env(:ot_server, :adapter, OT.Server.ETSAdapter)
@max_retries Application.get_env(:ot_server, :max_retries)
@doc """
Get a datum using the configured `OT.Server.Adapter`.
"""
@spec get_datum(OT.Server.datum_id) ::
{:ok, OT.Server.datum} | {:error, any}
def get_datum(id) do
@adapter.get_datum(id)
end
@doc """
Submit an operation using the configured `OT.Server.Adapter`, transforming it
against concurrent operations, if necessary.
"""
@spec submit_operation(OT.Server.datum_id,
OT.Server.operation_info, any, non_neg_integer) ::
{:ok, OT.Server.operation} | {:error, any}
def submit_operation(datum_id, op_vsn, op_meta, retries \\ 0)
def submit_operation(_, _, _, retries) when retries > @max_retries do
{:error, :max_retries_exceeded}
end
def submit_operation(datum_id, {op, vsn}, op_meta, retries) do
txn_result =
@adapter.transact(datum_id, fn ->
case attempt_submit_operation(datum_id, {op, vsn}, op_meta) do
{:ok, new_op} -> new_op
{:error, err} -> @adapter.rollback(err)
end
end)
case txn_result do
{:ok, new_op} ->
{:ok, new_op}
{:error, err} ->
case @adapter.handle_submit_error(err, datum_id, {op, vsn}) do
:retry -> submit_operation(datum_id, {op, vsn}, retries + 1)
err -> err
end
end
end
defp attempt_submit_operation(datum_id, {op, vsn}, op_meta) do
with {:ok, datum} <- @adapter.get_datum(datum_id),
{:ok, type} <- lookup_type(Map.get(datum, :type)),
{:ok, vsn} <- check_datum_version(Map.get(datum, :version), vsn),
{op, vsn} = get_new_operation(datum, {op, vsn}, type),
{:ok, datum} <- update_datum(datum, op, type) do
@adapter.insert_operation(datum, {op, vsn}, op_meta)
end
end
defp lookup_type(type_key) do
case Application.get_env(:ot_server, :ot_types, %{})[type_key] do
type when not is_nil(type) -> {:ok, type}
_ -> {:error, :type_not_found}
end
end
defp check_datum_version(datum_vsn, op_vsn) do
if op_vsn > datum_vsn + 1 do
{:error, {:version_mismatch, op_vsn, datum_vsn}}
else
{:ok, op_vsn}
end
end
defp get_new_operation(datum, {op, vsn}, type) do
case @adapter.get_conflicting_operations(datum, vsn) do
[] ->
{op, vsn}
conflicting_ops ->
new_vsn =
conflicting_ops
|> Enum.max_by(&(elem(&1, 1)))
|> elem(1)
|> Kernel.+(1)
new_op =
conflicting_ops
|> Enum.reduce(op, &type.transform(&2, elem(&1, 0), :left))
{new_op, new_vsn}
end
end
defp update_datum(datum, op, type) do
case type.apply(Map.get(datum, :content), op) do
{:ok, content} ->
@adapter.update_datum(datum, content)
err ->
err
end
end
end
|
lib/ot/server/impl.ex
| 0.780537 | 0.476945 |
impl.ex
|
starcoder
|
defmodule AWS.PinpointEmail do
@moduledoc """
Amazon Pinpoint Email Service
Welcome to the *Amazon Pinpoint Email API Reference*. This guide provides
information about the Amazon Pinpoint Email API (version 1.0), including
supported operations, data types, parameters, and schemas.
[Amazon Pinpoint](https://aws.amazon.com/pinpoint) is an AWS service that
you can use to engage with your customers across multiple messaging
channels. You can use Amazon Pinpoint to send email, SMS text messages,
voice messages, and push notifications. The Amazon Pinpoint Email API
provides programmatic access to options that are unique to the email
channel and supplement the options provided by the Amazon Pinpoint API.
If you're new to Amazon Pinpoint, you might find it helpful to also review
the [Amazon Pinpoint Developer
Guide](https://docs.aws.amazon.com/pinpoint/latest/developerguide/welcome.html).
The *Amazon Pinpoint Developer Guide* provides tutorials, code samples, and
procedures that demonstrate how to use Amazon Pinpoint features
programmatically and how to integrate Amazon Pinpoint functionality into
mobile apps and other types of applications. The guide also provides
information about key topics such as Amazon Pinpoint integration with other
AWS services and the limits that apply to using the service.
The Amazon Pinpoint Email API is available in several AWS Regions and it
provides an endpoint for each of these Regions. For a list of all the
Regions and endpoints where the API is currently available, see [AWS
Service
Endpoints](https://docs.aws.amazon.com/general/latest/gr/rande.html#pinpoint_region)
in the *Amazon Web Services General Reference*. To learn more about AWS
Regions, see [Managing AWS
Regions](https://docs.aws.amazon.com/general/latest/gr/rande-manage.html)
in the *Amazon Web Services General Reference*.
In each Region, AWS maintains multiple Availability Zones. These
Availability Zones are physically isolated from each other, but are united
by private, low-latency, high-throughput, and highly redundant network
connections. These Availability Zones enable us to provide very high levels
of availability and redundancy, while also minimizing latency. To learn
more about the number of Availability Zones that are available in each
Region, see [AWS Global
Infrastructure](http://aws.amazon.com/about-aws/global-infrastructure/).
"""
@doc """
Create a configuration set. *Configuration sets* are groups of rules that
you can apply to the emails you send using Amazon Pinpoint. You apply a
configuration set to an email by including a reference to the configuration
set in the headers of the email. When you apply a configuration set to an
email, all of the rules in that configuration set are applied to the email.
"""
def create_configuration_set(client, input, options \\ []) do
path_ = "/v1/email/configuration-sets"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Create an event destination. In Amazon Pinpoint, *events* include message
sends, deliveries, opens, clicks, bounces, and complaints. *Event
destinations* are places that you can send information about these events
to. For example, you can send event data to Amazon SNS to receive
notifications when you receive bounces or complaints, or you can use Amazon
Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.
A single configuration set can include more than one event destination.
"""
def create_configuration_set_event_destination(client, configuration_set_name, input, options \\ []) do
path_ = "/v1/email/configuration-sets/#{URI.encode(configuration_set_name)}/event-destinations"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Create a new pool of dedicated IP addresses. A pool can include one or more
dedicated IP addresses that are associated with your Amazon Pinpoint
account. You can associate a pool with a configuration set. When you send
an email that uses that configuration set, Amazon Pinpoint sends it using
only the IP addresses in the associated pool.
"""
def create_dedicated_ip_pool(client, input, options \\ []) do
path_ = "/v1/email/dedicated-ip-pools"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Create a new predictive inbox placement test. Predictive inbox placement
tests can help you predict how your messages will be handled by various
email providers around the world. When you perform a predictive inbox
placement test, you provide a sample message that contains the content that
you plan to send to your customers. Amazon Pinpoint then sends that message
to special email addresses spread across several major email providers.
After about 24 hours, the test is complete, and you can use the
`GetDeliverabilityTestReport` operation to view the results of the test.
"""
def create_deliverability_test_report(client, input, options \\ []) do
path_ = "/v1/email/deliverability-dashboard/test"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Verifies an email identity for use with Amazon Pinpoint. In Amazon
Pinpoint, an identity is an email address or domain that you use when you
send email. Before you can use an identity to send email with Amazon
Pinpoint, you first have to verify it. By verifying an address, you
demonstrate that you're the owner of the address, and that you've given
Amazon Pinpoint permission to send email from the address.
When you verify an email address, Amazon Pinpoint sends an email to the
address. Your email address is verified as soon as you follow the link in
the verification email.
When you verify a domain, this operation provides a set of DKIM tokens,
which you can convert into CNAME tokens. You add these CNAME tokens to the
DNS configuration for your domain. Your domain is verified when Amazon
Pinpoint detects these records in the DNS configuration for your domain. It
usually takes around 72 hours to complete the domain verification process.
"""
def create_email_identity(client, input, options \\ []) do
path_ = "/v1/email/identities"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Delete an existing configuration set.
In Amazon Pinpoint, *configuration sets* are groups of rules that you can
apply to the emails you send. You apply a configuration set to an email by
including a reference to the configuration set in the headers of the email.
When you apply a configuration set to an email, all of the rules in that
configuration set are applied to the email.
"""
def delete_configuration_set(client, configuration_set_name, input, options \\ []) do
path_ = "/v1/email/configuration-sets/#{URI.encode(configuration_set_name)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Delete an event destination.
In Amazon Pinpoint, *events* include message sends, deliveries, opens,
clicks, bounces, and complaints. *Event destinations* are places that you
can send information about these events to. For example, you can send event
data to Amazon SNS to receive notifications when you receive bounces or
complaints, or you can use Amazon Kinesis Data Firehose to stream data to
Amazon S3 for long-term storage.
"""
def delete_configuration_set_event_destination(client, configuration_set_name, event_destination_name, input, options \\ []) do
path_ = "/v1/email/configuration-sets/#{URI.encode(configuration_set_name)}/event-destinations/#{URI.encode(event_destination_name)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Delete a dedicated IP pool.
"""
def delete_dedicated_ip_pool(client, pool_name, input, options \\ []) do
path_ = "/v1/email/dedicated-ip-pools/#{URI.encode(pool_name)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Deletes an email identity that you previously verified for use with Amazon
Pinpoint. An identity can be either an email address or a domain name.
"""
def delete_email_identity(client, email_identity, input, options \\ []) do
path_ = "/v1/email/identities/#{URI.encode(email_identity)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Obtain information about the email-sending status and capabilities of your
Amazon Pinpoint account in the current AWS Region.
"""
def get_account(client, options \\ []) do
path_ = "/v1/email/account"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieve a list of the blacklists that your dedicated IP addresses appear
on.
"""
def get_blacklist_reports(client, blacklist_item_names, options \\ []) do
path_ = "/v1/email/deliverability-dashboard/blacklist-report"
headers = []
query_ = []
query_ = if !is_nil(blacklist_item_names) do
[{"BlacklistItemNames", blacklist_item_names} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Get information about an existing configuration set, including the
dedicated IP pool that it's associated with, whether or not it's enabled
for sending email, and more.
In Amazon Pinpoint, *configuration sets* are groups of rules that you can
apply to the emails you send. You apply a configuration set to an email by
including a reference to the configuration set in the headers of the email.
When you apply a configuration set to an email, all of the rules in that
configuration set are applied to the email.
"""
def get_configuration_set(client, configuration_set_name, options \\ []) do
path_ = "/v1/email/configuration-sets/#{URI.encode(configuration_set_name)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieve a list of event destinations that are associated with a
configuration set.
In Amazon Pinpoint, *events* include message sends, deliveries, opens,
clicks, bounces, and complaints. *Event destinations* are places that you
can send information about these events to. For example, you can send event
data to Amazon SNS to receive notifications when you receive bounces or
complaints, or you can use Amazon Kinesis Data Firehose to stream data to
Amazon S3 for long-term storage.
"""
def get_configuration_set_event_destinations(client, configuration_set_name, options \\ []) do
path_ = "/v1/email/configuration-sets/#{URI.encode(configuration_set_name)}/event-destinations"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Get information about a dedicated IP address, including the name of the
dedicated IP pool that it's associated with, as well information about the
automatic warm-up process for the address.
"""
def get_dedicated_ip(client, ip, options \\ []) do
path_ = "/v1/email/dedicated-ips/#{URI.encode(ip)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
List the dedicated IP addresses that are associated with your Amazon
Pinpoint account.
"""
def get_dedicated_ips(client, next_token \\ nil, page_size \\ nil, pool_name \\ nil, options \\ []) do
path_ = "/v1/email/dedicated-ips"
headers = []
query_ = []
query_ = if !is_nil(pool_name) do
[{"PoolName", pool_name} | query_]
else
query_
end
query_ = if !is_nil(page_size) do
[{"PageSize", page_size} | query_]
else
query_
end
query_ = if !is_nil(next_token) do
[{"NextToken", next_token} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieve information about the status of the Deliverability dashboard for
your Amazon Pinpoint account. When the Deliverability dashboard is enabled,
you gain access to reputation, deliverability, and other metrics for the
domains that you use to send email using Amazon Pinpoint. You also gain the
ability to perform predictive inbox placement tests.
When you use the Deliverability dashboard, you pay a monthly subscription
charge, in addition to any other fees that you accrue by using Amazon
Pinpoint. For more information about the features and cost of a
Deliverability dashboard subscription, see [Amazon Pinpoint
Pricing](http://aws.amazon.com/pinpoint/pricing/).
"""
def get_deliverability_dashboard_options(client, options \\ []) do
path_ = "/v1/email/deliverability-dashboard"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieve the results of a predictive inbox placement test.
"""
def get_deliverability_test_report(client, report_id, options \\ []) do
path_ = "/v1/email/deliverability-dashboard/test-reports/#{URI.encode(report_id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieve all the deliverability data for a specific campaign. This data is
available for a campaign only if the campaign sent email by using a domain
that the Deliverability dashboard is enabled for
(`PutDeliverabilityDashboardOption` operation).
"""
def get_domain_deliverability_campaign(client, campaign_id, options \\ []) do
path_ = "/v1/email/deliverability-dashboard/campaigns/#{URI.encode(campaign_id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieve inbox placement and engagement rates for the domains that you use
to send email.
"""
def get_domain_statistics_report(client, domain, end_date, start_date, options \\ []) do
path_ = "/v1/email/deliverability-dashboard/statistics-report/#{URI.encode(domain)}"
headers = []
query_ = []
query_ = if !is_nil(start_date) do
[{"StartDate", start_date} | query_]
else
query_
end
query_ = if !is_nil(end_date) do
[{"EndDate", end_date} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Provides information about a specific identity associated with your Amazon
Pinpoint account, including the identity's verification status, its DKIM
authentication status, and its custom Mail-From settings.
"""
def get_email_identity(client, email_identity, options \\ []) do
path_ = "/v1/email/identities/#{URI.encode(email_identity)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
List all of the configuration sets associated with your Amazon Pinpoint
account in the current region.
In Amazon Pinpoint, *configuration sets* are groups of rules that you can
apply to the emails you send. You apply a configuration set to an email by
including a reference to the configuration set in the headers of the email.
When you apply a configuration set to an email, all of the rules in that
configuration set are applied to the email.
"""
def list_configuration_sets(client, next_token \\ nil, page_size \\ nil, options \\ []) do
path_ = "/v1/email/configuration-sets"
headers = []
query_ = []
query_ = if !is_nil(page_size) do
[{"PageSize", page_size} | query_]
else
query_
end
query_ = if !is_nil(next_token) do
[{"NextToken", next_token} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
List all of the dedicated IP pools that exist in your Amazon Pinpoint
account in the current AWS Region.
"""
def list_dedicated_ip_pools(client, next_token \\ nil, page_size \\ nil, options \\ []) do
path_ = "/v1/email/dedicated-ip-pools"
headers = []
query_ = []
query_ = if !is_nil(page_size) do
[{"PageSize", page_size} | query_]
else
query_
end
query_ = if !is_nil(next_token) do
[{"NextToken", next_token} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Show a list of the predictive inbox placement tests that you've performed,
regardless of their statuses. For predictive inbox placement tests that are
complete, you can use the `GetDeliverabilityTestReport` operation to view
the results.
"""
def list_deliverability_test_reports(client, next_token \\ nil, page_size \\ nil, options \\ []) do
path_ = "/v1/email/deliverability-dashboard/test-reports"
headers = []
query_ = []
query_ = if !is_nil(page_size) do
[{"PageSize", page_size} | query_]
else
query_
end
query_ = if !is_nil(next_token) do
[{"NextToken", next_token} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieve deliverability data for all the campaigns that used a specific
domain to send email during a specified time range. This data is available
for a domain only if you enabled the Deliverability dashboard
(`PutDeliverabilityDashboardOption` operation) for the domain.
"""
def list_domain_deliverability_campaigns(client, subscribed_domain, end_date, next_token \\ nil, page_size \\ nil, start_date, options \\ []) do
path_ = "/v1/email/deliverability-dashboard/domains/#{URI.encode(subscribed_domain)}/campaigns"
headers = []
query_ = []
query_ = if !is_nil(start_date) do
[{"StartDate", start_date} | query_]
else
query_
end
query_ = if !is_nil(page_size) do
[{"PageSize", page_size} | query_]
else
query_
end
query_ = if !is_nil(next_token) do
[{"NextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(end_date) do
[{"EndDate", end_date} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns a list of all of the email identities that are associated with your
Amazon Pinpoint account. An identity can be either an email address or a
domain. This operation returns identities that are verified as well as
those that aren't.
"""
def list_email_identities(client, next_token \\ nil, page_size \\ nil, options \\ []) do
path_ = "/v1/email/identities"
headers = []
query_ = []
query_ = if !is_nil(page_size) do
[{"PageSize", page_size} | query_]
else
query_
end
query_ = if !is_nil(next_token) do
[{"NextToken", next_token} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieve a list of the tags (keys and values) that are associated with a
specified resource. AΒ *tag*Β is a label that you optionally define and
associate with a resource in Amazon Pinpoint. Each tag consists of a
requiredΒ *tag key*Β and an optional associatedΒ *tag value*. A tag key is a
general label that acts as a category for more specific tag values. A tag
value acts as a descriptor within a tag key.
"""
def list_tags_for_resource(client, resource_arn, options \\ []) do
path_ = "/v1/email/tags"
headers = []
query_ = []
query_ = if !is_nil(resource_arn) do
[{"ResourceArn", resource_arn} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Enable or disable the automatic warm-up feature for dedicated IP addresses.
"""
def put_account_dedicated_ip_warmup_attributes(client, input, options \\ []) do
path_ = "/v1/email/account/dedicated-ips/warmup"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Enable or disable the ability of your account to send email.
"""
def put_account_sending_attributes(client, input, options \\ []) do
path_ = "/v1/email/account/sending"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Associate a configuration set with a dedicated IP pool. You can use
dedicated IP pools to create groups of dedicated IP addresses for sending
specific types of email.
"""
def put_configuration_set_delivery_options(client, configuration_set_name, input, options \\ []) do
path_ = "/v1/email/configuration-sets/#{URI.encode(configuration_set_name)}/delivery-options"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Enable or disable collection of reputation metrics for emails that you send
using a particular configuration set in a specific AWS Region.
"""
def put_configuration_set_reputation_options(client, configuration_set_name, input, options \\ []) do
path_ = "/v1/email/configuration-sets/#{URI.encode(configuration_set_name)}/reputation-options"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Enable or disable email sending for messages that use a particular
configuration set in a specific AWS Region.
"""
def put_configuration_set_sending_options(client, configuration_set_name, input, options \\ []) do
path_ = "/v1/email/configuration-sets/#{URI.encode(configuration_set_name)}/sending"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Specify a custom domain to use for open and click tracking elements in
email that you send using Amazon Pinpoint.
"""
def put_configuration_set_tracking_options(client, configuration_set_name, input, options \\ []) do
path_ = "/v1/email/configuration-sets/#{URI.encode(configuration_set_name)}/tracking-options"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Move a dedicated IP address to an existing dedicated IP pool.
<note> The dedicated IP address that you specify must already exist, and
must be associated with your Amazon Pinpoint account.
The dedicated IP pool you specify must already exist. You can create a new
pool by using the `CreateDedicatedIpPool` operation.
</note>
"""
def put_dedicated_ip_in_pool(client, ip, input, options \\ []) do
path_ = "/v1/email/dedicated-ips/#{URI.encode(ip)}/pool"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
<p/>
"""
def put_dedicated_ip_warmup_attributes(client, ip, input, options \\ []) do
path_ = "/v1/email/dedicated-ips/#{URI.encode(ip)}/warmup"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Enable or disable the Deliverability dashboard for your Amazon Pinpoint
account. When you enable the Deliverability dashboard, you gain access to
reputation, deliverability, and other metrics for the domains that you use
to send email using Amazon Pinpoint. You also gain the ability to perform
predictive inbox placement tests.
When you use the Deliverability dashboard, you pay a monthly subscription
charge, in addition to any other fees that you accrue by using Amazon
Pinpoint. For more information about the features and cost of a
Deliverability dashboard subscription, see [Amazon Pinpoint
Pricing](http://aws.amazon.com/pinpoint/pricing/).
"""
def put_deliverability_dashboard_option(client, input, options \\ []) do
path_ = "/v1/email/deliverability-dashboard"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Used to enable or disable DKIM authentication for an email identity.
"""
def put_email_identity_dkim_attributes(client, email_identity, input, options \\ []) do
path_ = "/v1/email/identities/#{URI.encode(email_identity)}/dkim"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Used to enable or disable feedback forwarding for an identity. This setting
determines what happens when an identity is used to send an email that
results in a bounce or complaint event.
When you enable feedback forwarding, Amazon Pinpoint sends you email
notifications when bounce or complaint events occur. Amazon Pinpoint sends
this notification to the address that you specified in the Return-Path
header of the original email.
When you disable feedback forwarding, Amazon Pinpoint sends notifications
through other mechanisms, such as by notifying an Amazon SNS topic. You're
required to have a method of tracking bounces and complaints. If you
haven't set up another mechanism for receiving bounce or complaint
notifications, Amazon Pinpoint sends an email notification when these
events occur (even if this setting is disabled).
"""
def put_email_identity_feedback_attributes(client, email_identity, input, options \\ []) do
path_ = "/v1/email/identities/#{URI.encode(email_identity)}/feedback"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Used to enable or disable the custom Mail-From domain configuration for an
email identity.
"""
def put_email_identity_mail_from_attributes(client, email_identity, input, options \\ []) do
path_ = "/v1/email/identities/#{URI.encode(email_identity)}/mail-from"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Sends an email message. You can use the Amazon Pinpoint Email API to send
two types of messages:
<ul> <li> **Simple** β A standard email message. When you create this type
of message, you specify the sender, the recipient, and the message body,
and Amazon Pinpoint assembles the message for you.
</li> <li> **Raw** β A raw, MIME-formatted email message. When you send
this type of email, you have to specify all of the message headers, as well
as the message body. You can use this message type to send messages that
contain attachments. The message that you specify has to be a valid MIME
message.
</li> </ul>
"""
def send_email(client, input, options \\ []) do
path_ = "/v1/email/outbound-emails"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Add one or more tags (keys and values) to a specified resource. A *tag*Β is
a label that you optionally define and associate with a resource in Amazon
Pinpoint. Tags can help you categorize and manage resources in different
ways, such as by purpose, owner, environment, or other criteria. A resource
can have as many as 50 tags.
Each tag consists of a requiredΒ *tag key*Β and an associatedΒ *tag value*,
both of which you define. A tag key is a general label that acts as a
category for more specific tag values. A tag value acts as a descriptor
within a tag key.
"""
def tag_resource(client, input, options \\ []) do
path_ = "/v1/email/tags"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Remove one or more tags (keys and values) from a specified resource.
"""
def untag_resource(client, input, options \\ []) do
path_ = "/v1/email/tags"
headers = []
{query_, input} =
[
{"ResourceArn", "ResourceArn"},
{"TagKeys", "TagKeys"},
]
|> AWS.Request.build_params(input)
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Update the configuration of an event destination for a configuration set.
In Amazon Pinpoint, *events* include message sends, deliveries, opens,
clicks, bounces, and complaints. *Event destinations* are places that you
can send information about these events to. For example, you can send event
data to Amazon SNS to receive notifications when you receive bounces or
complaints, or you can use Amazon Kinesis Data Firehose to stream data to
Amazon S3 for long-term storage.
"""
def update_configuration_set_event_destination(client, configuration_set_name, event_destination_name, input, options \\ []) do
path_ = "/v1/email/configuration-sets/#{URI.encode(configuration_set_name)}/event-destinations/#{URI.encode(event_destination_name)}"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@spec request(AWS.Client.t(), binary(), binary(), list(), list(), map(), list(), pos_integer()) ::
{:ok, Poison.Parser.t(), Poison.Response.t()}
| {:error, Poison.Parser.t()}
| {:error, HTTPoison.Error.t()}
defp request(client, method, path, query, headers, input, options, success_status_code) do
client = %{client | service: "ses"}
host = build_host("email", client)
url = host
|> build_url(path, client)
|> add_query(query)
additional_headers = [{"Host", host}, {"Content-Type", "application/x-amz-json-1.1"}]
headers = AWS.Request.add_headers(additional_headers, headers)
payload = encode_payload(input)
headers = AWS.Request.sign_v4(client, method, url, headers, payload)
perform_request(method, url, payload, headers, options, success_status_code)
end
defp perform_request(method, url, payload, headers, options, nil) do
case HTTPoison.request(method, url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} ->
{:ok, response}
{:ok, %HTTPoison.Response{status_code: status_code, body: body} = response}
when status_code == 200 or status_code == 202 or status_code == 204 ->
{:ok, Poison.Parser.parse!(body, %{}), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body, %{})
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp perform_request(method, url, payload, headers, options, success_status_code) do
case HTTPoison.request(method, url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: ^success_status_code, body: ""} = response} ->
{:ok, %{}, response}
{:ok, %HTTPoison.Response{status_code: ^success_status_code, body: body} = response} ->
{:ok, Poison.Parser.parse!(body, %{}), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body, %{})
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, path, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}#{path}"
end
defp add_query(url, []) do
url
end
defp add_query(url, query) do
querystring = AWS.Util.encode_query(query)
"#{url}?#{querystring}"
end
defp encode_payload(input) do
if input != nil, do: Poison.Encoder.encode(input, %{}), else: ""
end
end
|
lib/aws/pinpoint_email.ex
| 0.863837 | 0.630799 |
pinpoint_email.ex
|
starcoder
|
defmodule RDF.XSD.Datatype do
@moduledoc """
A behaviour for XSD datatypes.
A XSD datatype has three properties:
- A _value space_, which is a set of values.
- A _lexical space_, which is a set of _literals_ used to denote the values.
- A collection of functions associated with the datatype.
### Builtin XSD datatypes
RDF.ex comes with the following builtin implementations of XSD datatypes:
| `xsd:boolean` | `RDF.XSD.Boolean` |
| `xsd:float` | `RDF.XSD.Float` |
| `xsd:double` | `RDF.XSD.Double` |
| `xsd:decimal` | `RDF.XSD.Decimal` |
| `xsd:integer` | `RDF.XSD.Integer` |
| `xsd:long` | `RDF.XSD.Long` |
| `xsd:int` | `RDF.XSD.Int` |
| `xsd:short` | `RDF.XSD.Short` |
| `xsd:byte` | `RDF.XSD.Byte` |
| `xsd:nonPositiveInteger` | `RDF.XSD.NonPositiveInteger` |
| `xsd:negativeInteger` | `RDF.XSD.NegativeInteger` |
| `xsd:nonNegativeInteger` | `RDF.XSD.NonNegativeInteger` |
| `xsd:positiveInteger` | `RDF.XSD.PositiveInteger` |
| `xsd:unsignedLong` | `RDF.XSD.UnsignedLong` |
| `xsd:unsignedInt` | `RDF.XSD.UnsignedInt` |
| `xsd:unsignedShort` | `RDF.XSD.UnsignedShort` |
| `xsd:unsignedByte` | `RDF.XSD.UnsignedByte` |
| `xsd:string` | `RDF.XSD.String` |
| `xsd:normalizedString` | β |
| `xsd:token` | β |
| `xsd:language` | β |
| `xsd:Name` | β |
| `xsd:NCName` | β |
| `xsd:ID` | β |
| `xsd:IDREF` | β |
| `xsd:ENTITY` | β |
| `xsd:NMTOKEN` | β |
| `xsd:dateTime` | `RDF.XSD.DateTime` |
| `xsd:dateTimeStamp` | β |
| `xsd:date` | `RDF.XSD.Date` |
| `xsd:time` | `RDF.XSD.Time` |
| `xsd:duration` | β |
| `xsd:dayTimeDuration` | β |
| `xsd:yearMonthDuration` | β |
| `xsd:gYearMonth` | β |
| `xsd:gYear` | β |
| `xsd:gMonthDay` | β |
| `xsd:gDay` | β |
| `xsd:gMonth` | β |
| `xsd:base64Binary` | `RDF.XSD.Base64Binary` |
| `xsd:hexBinary` | β |
| `xsd:anyURI` | `RDF.XSD.AnyURI` |
| `xsd:QName` | β |
| `xsd:NOTATION` | β |
There are some notable difference in the implementations of some datatypes compared to
the original spec:
- `RDF.XSD.Integer` is not derived from `RDF.XSD.Decimal`, but implemented as a primitive datatype
- `RDF.XSD.Float` is not implemented as a primitive datatype, but derived from `RDF.XSD.Double`
without further restrictions instead, since Erlang doesn't have a corresponding datatype
see <https://www.w3.org/TR/xmlschema11-2/#built-in-datatypes>
"""
@type t :: module
@type uncanonical_lexical :: String.t() | nil
@type literal :: %{
:__struct__ => t(),
:value => any(),
:uncanonical_lexical => uncanonical_lexical()
}
import RDF.Utils.Guards
@doc """
Returns if the `RDF.XSD.Datatype` is a primitive datatype.
"""
@callback primitive?() :: boolean
@doc """
The base datatype from which a `RDF.XSD.Datatype` is derived.
Note: Since this library focuses on atomic types and the special `xsd:anyAtomicType`
specified as the base type of all primitive types in the W3C spec wouldn't serve any
purpose here, all primitive datatypes just return `nil` instead.
"""
@callback base :: t() | nil
@doc """
The primitive `RDF.XSD.Datatype` from which a `RDF.XSD.Datatype` is derived.
In case of a primitive `RDF.XSD.Datatype` this function returns this `RDF.XSD.Datatype` itself.
"""
@callback base_primitive :: t()
@doc """
Checks if the `RDF.XSD.Datatype` is directly or indirectly derived from the given `RDF.XSD.Datatype`.
Note that this is just a basic datatype reflection function on the module level
and does not work with `RDF.Literal`s. See `c:RDF.Literal.Datatype.datatype?/1` instead.
"""
@callback derived_from?(t()) :: boolean
@doc """
The set of applicable facets of a `RDF.XSD.Datatype`.
"""
@callback applicable_facets :: [RDF.XSD.Facet.t()]
@doc """
A mapping from the lexical space of a `RDF.XSD.Datatype` into its value space.
"""
@callback lexical_mapping(String.t(), Keyword.t()) :: any
@doc """
A mapping from Elixir values into the value space of a `RDF.XSD.Datatype`.
If the Elixir mapping for the given value can not be mapped into value space of
the XSD datatype an implementation should return `@invalid_value`
(which is just `nil` at the moment, so `nil` is never a valid value of a value space).
Otherwise a tuple `{value, lexical}` with `value` being the internal representation
of the mapped value from the value space and `lexical` being the lexical representation
to be used for the Elixir value or `nil` if `c:init_valid_lexical/3` should be used
to determine the lexical form in general (i.e. also when initialized with a string
via the `c:lexical_mapping/2`). Since the later case is most often what you want,
you can also return `value` directly, as long as it is not a two element tuple.
"""
@callback elixir_mapping(any, Keyword.t()) :: any | {any, uncanonical_lexical}
@doc """
Returns the standard lexical representation for a value of the value space of a `RDF.XSD.Datatype`.
"""
@callback canonical_mapping(any) :: String.t()
@doc """
Produces the lexical representation to be used for a `RDF.XSD.Datatype` literal.
By default the lexical representation of a `RDF.XSD.Datatype` is either the
canonical form in case it is created from a non-string Elixir value or, if it
is created from a string, just with that string as the lexical form.
But there can be various reasons for why this should be different for certain
datatypes. For example, for `RDF.XSD.Double`s given as Elixir floats, we want the
default lexical representation to be the decimal and not the canonical
exponential form. Another reason might be that additional options are given
which should be taken into account in the lexical form.
If the lexical representation for a given `value` and `lexical` should be the
canonical one, an implementation should return `nil`.
"""
@callback init_valid_lexical(any, uncanonical_lexical, Keyword.t()) :: uncanonical_lexical
@doc """
Produces the lexical representation of an invalid value.
The default implementation of the `_using__` macro just returns the `to_string/1`
representation of the value.
"""
@callback init_invalid_lexical(any, Keyword.t()) :: String.t()
@doc """
Returns the `RDF.XSD.Datatype` for a datatype IRI.
"""
defdelegate get(id), to: RDF.Literal.Datatype.Registry, as: :xsd_datatype
@doc false
def most_specific(left, right)
def most_specific(datatype, datatype), do: datatype
def most_specific(left, right) do
cond do
left.datatype?(right) -> right
right.datatype?(left) -> left
true -> nil
end
end
defmacro __using__(opts) do
quote do
defstruct [:value, :uncanonical_lexical]
@behaviour unquote(__MODULE__)
use RDF.Literal.Datatype, unquote(opts)
@invalid_value nil
@type invalid_value :: nil
@type value :: valid_value | invalid_value
@type t :: %__MODULE__{
value: value,
uncanonical_lexical: RDF.XSD.Datatype.uncanonical_lexical()
}
@doc !"""
This function is just used to check if a module is a RDF.XSD.Datatype.
See `RDF.Literal.Datatype.Registry.is_xsd_datatype?/1`.
"""
def __xsd_datatype_indicator__, do: true
@doc """
Checks if the given literal has datatype this or a datatype that is derived of it.
"""
@impl RDF.Literal.Datatype
def datatype?(%RDF.Literal{literal: literal}), do: datatype?(literal)
def datatype?(%datatype{}), do: datatype?(datatype)
def datatype?(__MODULE__), do: true
def datatype?(datatype) when maybe_module(datatype) do
RDF.XSD.datatype?(datatype) and datatype.derived_from?(__MODULE__)
end
def datatype?(_), do: false
@doc false
def datatype!(%__MODULE__{}), do: true
def datatype!(%datatype{} = literal) do
datatype?(datatype) ||
raise RDF.XSD.Datatype.Mismatch, value: literal, expected_type: __MODULE__
end
def datatype!(value),
do: raise(RDF.XSD.Datatype.Mismatch, value: value, expected_type: __MODULE__)
@doc """
Creates a new `RDF.Literal` with this datatype and the given `value`.
"""
@impl RDF.Literal.Datatype
def new(value, opts \\ [])
def new(lexical, opts) when is_binary(lexical) do
if Keyword.get(opts, :as_value) do
from_value(lexical, opts)
else
from_lexical(lexical, opts)
end
end
def new(value, opts) do
from_value(value, opts)
end
@doc """
Creates a new `RDF.Literal` with this datatype and the given `value` or fails when it is not valid.
"""
@impl RDF.Literal.Datatype
def new!(value, opts \\ []) do
literal = new(value, opts)
if valid?(literal) do
literal
else
raise ArgumentError, "#{inspect(value)} is not a valid #{inspect(__MODULE__)}"
end
end
@doc false
# Dialyzer causes a warning on all primitives since the facet_conform?/2 call
# always returns true there, so the other branch is unnecessary. This could
# be fixed by generating a special version for primitives, but it's not worth
# maintaining different versions of this function which must be kept in-sync.
@dialyzer {:nowarn_function, from_lexical: 2}
def from_lexical(lexical, opts \\ []) when is_binary(lexical) do
case lexical_mapping(lexical, opts) do
@invalid_value ->
build_invalid(lexical, opts)
value ->
if facet_conform?(value, lexical) do
build_valid(value, lexical, opts)
else
build_invalid(lexical, opts)
end
end
end
@doc false
# Dialyzer causes a warning on all primitives since the facet_conform?/2 call
# always returns true there, so the other branch is unnecessary. This could
# be fixed by generating a special version for primitives, but it's not worth
# maintaining different versions of this function which must be kept in-sync.
@dialyzer {:nowarn_function, from_value: 2}
def from_value(value, opts \\ []) do
case elixir_mapping(value, opts) do
@invalid_value ->
build_invalid(value, opts)
value ->
{value, lexical} =
case value do
{value, lexical} -> {value, lexical}
value -> {value, nil}
end
if facet_conform?(value, lexical) do
build_valid(value, lexical, opts)
else
build_invalid(value, opts)
end
end
end
@doc false
@spec build_valid(any, RDF.XSD.Datatype.uncanonical_lexical(), Keyword.t()) ::
RDF.Literal.t()
def build_valid(value, lexical, opts) do
if Keyword.get(opts, :canonicalize) do
literal(%__MODULE__{value: value})
else
initial_lexical = init_valid_lexical(value, lexical, opts)
literal(%__MODULE__{
value: value,
uncanonical_lexical:
if(initial_lexical && initial_lexical != canonical_mapping(value),
do: initial_lexical
)
})
end
end
@dialyzer {:nowarn_function, build_invalid: 2}
defp build_invalid(lexical, opts) do
literal(%__MODULE__{uncanonical_lexical: init_invalid_lexical(lexical, opts)})
end
@doc """
Returns the value of a `RDF.Literal` of this or a derived datatype.
"""
@impl RDF.Literal.Datatype
def value(%RDF.Literal{literal: literal}), do: value(literal)
def value(%__MODULE__{} = literal), do: literal.value
def value(literal) do
datatype!(literal)
literal.value
end
@doc """
Returns the lexical form of a `RDF.Literal` of this datatype.
"""
@impl RDF.Literal.Datatype
def lexical(lexical)
def lexical(%RDF.Literal{literal: literal}), do: lexical(literal)
def lexical(%__MODULE__{value: value, uncanonical_lexical: nil}),
do: canonical_mapping(value)
def lexical(%__MODULE__{uncanonical_lexical: lexical}), do: lexical
@doc """
Returns the canonical lexical form of a `RDF.Literal` of this datatype.
"""
@impl RDF.Literal.Datatype
def canonical_lexical(%RDF.Literal{literal: literal}), do: canonical_lexical(literal)
def canonical_lexical(%__MODULE__{value: value}) when not is_nil(value),
do: canonical_mapping(value)
def canonical_lexical(_), do: nil
@doc """
Produces the canonical representation of a `RDF.Literal` of this datatype.
"""
@impl RDF.Literal.Datatype
def canonical(literal)
def canonical(%RDF.Literal{literal: %__MODULE__{uncanonical_lexical: nil}} = literal),
do: literal
def canonical(%RDF.Literal{literal: %__MODULE__{value: @invalid_value}} = literal),
do: literal
def canonical(%RDF.Literal{literal: %__MODULE__{} = literal}),
do: canonical(literal)
def canonical(%__MODULE__{} = literal),
do: literal(%__MODULE__{literal | uncanonical_lexical: nil})
@doc """
Determines if the lexical form of a `RDF.Literal` of this datatype is the canonical form.
"""
@impl RDF.Literal.Datatype
def canonical?(literal)
def canonical?(%RDF.Literal{literal: literal}), do: canonical?(literal)
def canonical?(%__MODULE__{uncanonical_lexical: nil}), do: true
def canonical?(%__MODULE__{}), do: false
@doc """
Determines if a `RDF.Literal` of this or a derived datatype has a proper value of its value space.
"""
@impl RDF.Literal.Datatype
def valid?(literal)
def valid?(%RDF.Literal{literal: literal}), do: valid?(literal)
def valid?(%__MODULE__{value: @invalid_value}), do: false
def valid?(%__MODULE__{}), do: true
def valid?(%datatype{} = literal),
do: datatype?(datatype) and datatype.valid?(literal)
def valid?(_), do: false
@doc false
defp equality_path(left_datatype, right_datatype)
defp equality_path(datatype, datatype), do: {:same_or_derived, datatype}
defp equality_path(left_datatype, right_datatype) do
if RDF.XSD.datatype?(left_datatype) and RDF.XSD.datatype?(right_datatype) do
if datatype = RDF.XSD.Datatype.most_specific(left_datatype, right_datatype) do
{:same_or_derived, datatype}
else
{:different, left_datatype}
end
else
{:different, left_datatype}
end
end
@doc """
Compares two `RDF.Literal`s.
If the first literal is greater than the second `:gt` is returned, if less than `:lt` is returned.
If both literal are equal `:eq` is returned.
If the literals can not be compared either `nil` is returned, when they generally can be compared
due to their datatype, or `:indeterminate` is returned, when the order of the given values is
not defined on only partially ordered datatypes.
"""
@spec compare(RDF.Literal.t() | any, RDF.Literal.t() | any) ::
RDF.Literal.Datatype.comparison_result() | :indeterminate | nil
def compare(left, right)
def compare(left, %RDF.Literal{literal: right}), do: compare(left, right)
def compare(%RDF.Literal{literal: left}, right), do: compare(left, right)
def compare(left, right) do
if RDF.XSD.datatype?(left) and RDF.XSD.datatype?(right) and
RDF.Literal.Datatype.valid?(left) and RDF.Literal.Datatype.valid?(right) do
do_compare(left, right)
end
end
defimpl Inspect do
"Elixir.Inspect." <> datatype_name = to_string(__MODULE__)
@datatype_name datatype_name
def inspect(literal, _opts) do
"%#{@datatype_name}{value: #{inspect(literal.value)}, lexical: #{literal |> literal.__struct__.lexical() |> inspect()}}"
end
end
end
end
end
|
lib/rdf/xsd/datatype.ex
| 0.901531 | 0.916596 |
datatype.ex
|
starcoder
|
defmodule Rummage.Phoenix.SortController do
@moduledoc """
`SortController` a controller helper in `Rummage.Phoenix` which stores
helpers for Sort hook in `Rummage`. This formats params before `index`
action into a format that is expected by the default `Rummage.Ecto`'s sort
hook: `Rummage.Ecto.Sort`
```
"""
@doc """
This function formats params into `rumamge` params, that are expected by
`Rummage.Ecto`'s default sort hook:
## Examples
When `rummage` passed is an empty `Map`, it returns
and empty `Map`:
iex> alias Rummage.Phoenix.SortController
iex> rummage = %{}
iex> SortController.rummage(rummage)
%{}
When `rummage` passed is not an empty `Map`, but
doesn't have a `"sort"` key, it returns
and empty `Map`:
iex> alias Rummage.Phoenix.SortController
iex> rummage = %{"pizza" => "eat"}
iex> SortController.rummage(rummage)
%{}
When `rummage` passed is not an empty `Map`, but
the value corresponding to `"sort"` key is an empty `String`,
it returns and empty `Map`:
iex> alias Rummage.Phoenix.SortController
iex> rummage = %{"sort" => ""}
iex> SortController.rummage(rummage)
%{}
When `rummage` passed is not an empty `Map`, but
the value corresponding to `"sort"` key is a non-empty `String`,
it decodes the value returns it:
iex> alias Rummage.Phoenix.SortController
iex> rummage = %{"sort" => "1"}
iex> SortController.rummage(rummage)
1
When `rummage` passed is not an empty `Map`, but
the value corresponding to `"sort"` key is a `Map`,
it returns the `Map` itself:
iex> alias Rummage.Phoenix.SortController
iex> rummage = %{"sort" => %{"h" => "i"}}
iex> SortController.rummage(rummage)
%{"h" => "i"}
"""
def rummage(rummage) do
sort_params = Map.get(rummage, "sort")
case sort_params do
s when s in ["", nil] -> %{}
s when is_binary(s) ->
sort_params
|> Poison.decode!
_ -> sort_params
end
end
end
|
lib/rummage_phoenix/hooks/controllers/sort_controller.ex
| 0.792022 | 0.760295 |
sort_controller.ex
|
starcoder
|
defmodule EZCalendar.HTML do
@moduledoc """
Functions for rendering the calendars with HTML.
For easy access to the HTML render functions
add `EZCalendar` to your view.
defmodule MyApp.ShiftView do
use MyApp.Web, :view
import EZCalendar.HTML
end
This will import the functions in `EZCalendar.HTML` and `EZCalendar.HTML.Navigation`
View example:
<%= calendar_prev @calendar, "/shifts/:year/:month" %>
<%= @calendar.title %>
<%= calendar_next @calendar, "/shifts/:year/:month" %>
<%= render_calendar @calendar, fn(date)-> %>
<!-- calendar date -->
<%= for shift <- date.data do %>
<!-- query results for date -->
<% end %>
<% end %>
"""
defmacro __using__(_opts \\ []) do
quote do
import EZCalendar.HTML
import EZCalendar.HTML.Navigation
end
end
@doc """
Renders a calendar struct for a given module.
Useful for using an HTML module that isn't the calendars default.
Takes a HTML calendar module, a calendar struct and a function as arguments.
The provided function will be called with each calendar date to render its contents.
<%= render_calendar MyApp.CustomMonthCalendar, @calendar, fn(date)-> %>
<!-- calendar date -->
<%= for shift <- date.data do %>
<!-- query results for date -->
<% end %>
<% end %>
"""
def render_calendar(html_module, calendar_struct, render_func) do
html_module.build(calendar_struct, render_func)
end
@doc """
Renders a calendar, the HTML module used will be inferred from the calendar type.
Takes a calendar struct and a function as arguments.
The provided function will be called with each calendar date to render its contents.
<%= render_calendar @calendar, fn(date)-> %>
<!-- calendar date -->
<%= for shift <- date.data do %>
<!-- query results for date -->
<% end %>
<% end %>
"""
def render_calendar(calendar_struct, render_func) do
calendar_struct
|> get_html_module
|> render_calendar(calendar_struct, render_func)
end
defp get_html_module calendar_struct do
calendar_struct
|> Map.get(:__struct__)
|> apply(:html_module, [])
end
end
|
lib/ez_calendar/html.ex
| 0.753829 | 0.415936 |
html.ex
|
starcoder
|
defmodule Etso.Adapter.TableRegistry do
@moduledoc """
Provides convenience function to spin up a Registry, which is used to hold the Table Servers
(registered by GenServer when starting up), alongside their ETS tables (registered when the
Table Server starts).
"""
@spec child_spec(Etso.repo()) :: Supervisor.child_spec()
@spec get_table(Etso.repo(), Etso.schema()) :: {:ok, Etso.table()} | {:error, term()}
@spec register_table(Etso.repo(), Etso.schema(), Etso.table()) :: :ok | {:error, term()}
alias Etso.Adapter.TableServer
alias Etso.Adapter.TableSupervisor
@doc """
Returns Child Specification for the Table Registry that will be associated with the `repo`.
"""
def child_spec(repo) do
Registry.child_spec(keys: :unique, name: build_name(repo))
end
@doc """
Returns the ETS table associated with the given `repo` which is used to hold data for `schema`.
"""
def get_table(repo, schema) do
case lookup_table(repo, schema) do
{:ok, table_reference} -> {:ok, table_reference}
{:error, :not_found} -> start_table(repo, schema)
end
end
@doc """
Registers the ETS table associated with the given `repo` which is used to hold data for `schema`.
"""
def register_table(repo, schema, table_reference) do
with {:ok, _} <- Registry.register(build_name(repo), {schema, :ets_table}, table_reference) do
:ok
end
end
defp lookup_table(repo, schema) do
case Registry.lookup(build_name(repo), {schema, :ets_table}) do
[{_, table_reference}] -> {:ok, table_reference}
[] -> {:error, :not_found}
end
end
defp start_table(repo, schema) do
with {:ok, _} <- ensure_server_started(repo, schema) do
lookup_table(repo, schema)
end
end
defp ensure_server_started(repo, schema) do
case start_server(repo, schema) do
{:ok, pid} -> {:ok, pid}
{:ok, pid, _} -> {:ok, pid}
{:error, {:already_started, pid}} -> {:ok, pid}
_ -> :error
end
end
defp start_server(repo, schema) do
name = {:via, Registry, {build_name(repo), schema}}
child_spec = {TableServer, {repo, schema, name}}
TableSupervisor.start_child(repo, child_spec)
end
defp build_name(repo) do
Module.concat([repo, Enum.at(Module.split(__MODULE__), -1)])
end
end
|
lib/etso/adapter/table_registry.ex
| 0.778186 | 0.474449 |
table_registry.ex
|
starcoder
|
defmodule Indicado.WR do
@moduledoc """
This is the WR module used for calculating Williams %R.
"""
@doc """
Calculates WR for the list.
Returns `{:ok, rs_list}` or `{:error, reason}`
## Examples
iex> Indicado.WR.eval([1, 3, 4, 3, 1, 5], 4)
{:ok, [0.3333333333333333, 1.0, 0.0]}
iex> Indicado.WR.eval([1, 10, 5, 3, 9, 12, 6, 3, 4], 5)
{:ok, [0.1111111111111111, 0.0, 0.6666666666666666, 1.0, 0.8888888888888888]}
iex> Indicado.WR.eval([1, 3], 3)
{:error, :not_enough_data}
iex> Indicado.WR.eval([1, 5], 0)
{:error, :bad_period}
"""
@spec eval(nonempty_list(list), pos_integer) :: {:ok, nonempty_list(float)} | {:error, atom}
def eval(list, period), do: calc(list, period)
@doc """
Calculates WR for the list. Raises exceptions when arguments does not satisfy needed conditions to calculate WR.
Raises `NotEnoughDataError` if the given list is not long enough for calculating WR.
Raises `BadPeriodError` if period is an unacceptable number.
## Examples
iex> Indicado.WR.eval!([1, 3, 4, 3, 1, 5], 4)
[0.3333333333333333, 1.0, 0.0]
iex> Indicado.WR.eval!([1, 3], 3)
** (NotEnoughDataError) not enough data
iex> Indicado.WR.eval!([1, 5], 0)
** (BadPeriodError) bad period
"""
@spec eval!(nonempty_list(list), pos_integer) :: nonempty_list(float) | no_return
def eval!(list, period) do
case calc(list, period) do
{:ok, result} -> result
{:error, :not_enough_data} -> raise NotEnoughDataError
{:error, :bad_period} -> raise BadPeriodError
end
end
defp calc(list, period, results \\ [])
defp calc([], _period, []), do: {:error, :not_enough_data}
defp calc(_list, period, _results) when period < 1, do: {:error, :bad_period}
defp calc([], _period, results), do: {:ok, Enum.reverse(results)}
defp calc([_head | tail] = list, period, results) when length(list) < period do
calc(tail, period, results)
end
defp calc([_head | tail] = list, period, results) do
[close | _] =
list
|> Enum.take(period)
|> Enum.take(-1)
{min, max} =
list
|> Enum.take(period)
|> Enum.min_max()
wr = (max - close) / (max - min)
calc(tail, period, [wr | results])
end
end
|
lib/indicado/wr.ex
| 0.911682 | 0.506836 |
wr.ex
|
starcoder
|
defmodule Beanstix do
alias Beanstix.Connection
@moduledoc """
Beanstix - A beanstalkd client coding with Elixir
Fored from ElixirTalk
Copyright 2014-2016 by jsvisa(<EMAIL>)
"""
@type connection_error :: :timeout | :closed | :inet.posix()
@type result :: {:ok, non_neg_integer} | {:error, term}
@vsn 1.0
@doc """
Connect to the beanstalkd server.
"""
@spec connect(List.t()) :: {:ok, pid} | {:error, term}
def connect(opts) when is_list(opts) do
Connection.start_link(opts)
end
@spec connect(:inet.ip_address() | :inet.hostname(), integer, timeout) ::
{:ok, pid} | {:error, term}
def connect(host \\ '127.0.0.1', port \\ 11300, timeout \\ :infinity) do
connect(host: host, port: port, recv_timeout: timeout, connect_timeout: 5_000)
end
@doc """
Close the connection to server.
"""
@spec quit(pid) :: :ok
def quit(pid) do
Connection.quit(pid)
end
def pipeline(pid, commands, timeout \\ 5000)
def pipeline(pid, commands, timeout) when length(commands) > 0, do: Connection.call(pid, commands, timeout)
def pipeline(_, _, _), do: []
def command(pid, command, timeout \\ 5000) do
case pipeline(pid, [command], timeout) do
result when is_list(result) -> hd(result)
error -> error
end
end
@doc """
Put a job to the current tube.
The opts can be any combination of
* `:priority` - an integer < 2**32. Jobs with smaller priority values will be
scheduled before jobs with larger priorities. The most urgent priority is 0;
the least urgent priority is 4,294,967,295.
* `:delay` - an integer number of seconds to wait before putting the job in
the ready queue. The job will be in the "delayed" state during this time.
* `:ttr` - time to run -- is an integer number of seconds to allow a worker
to run this job. This time is counted from the moment a worker reserves
this job. If the worker does not delete, release, or bury the job within
`:ttr` seconds, the job will time out and the server will release the job.
The minimum ttr is 1. If the client sends 0, the server will silently
increase the ttr to 1.
"""
@spec put(pid, String.t()) :: result
@spec put(pid, String.t(), [{:priority, integer}, {:delay, integer}, {:ttr, integer}]) :: result
def put(pid, data, opts \\ []) do
command(pid, {:put, data, opts})
end
def put!(data, opts \\ []) do
case put(data, opts) do
{:ok, job_id} -> job_id
{:error, message} -> raise Beanstix.Error, message: message
end
end
@doc """
Put a job in the specified tube.
The opts are the same as `put`
"""
@spec put_in_tube(pid, String.t(), String.t()) :: result
@spec put_in_tube(pid, String.t(), String.t(), [{:priority, integer}, {:delay, integer}, {:ttr, integer}]) :: result
def put_in_tube(pid, tube, data, opts \\ []) do
case pipeline(pid, [{:use, tube}, {:put, data, opts}]) do
[{:ok, ^tube}, result] -> result
error -> {:error, "#{inspect(error)}"}
end
end
def put_in_tube!(pid, tube, data, opts \\ []) do
case put_in_tube(pid, tube, data, opts) do
{:ok, job_id} -> job_id
{:error, message} -> raise Beanstix.Error, message: message
end
end
@doc """
Use a tube to `put` jobs.
"""
@spec use(pid, String.t()) :: {:using, String.t()} | connection_error
def use(pid, tube) do
command(pid, {:use, tube})
end
@doc """
Add the named tube to the watch list for the current connection.
A reserve command will take a job from any of the tubes in the
watch list.
"""
@spec watch(pid, String.t()) :: {:watching, non_neg_integer} | connection_error
def watch(pid, tube) do
command(pid, {:watch, tube})
end
@doc """
Remove the named tube from the watch list for the current connection.
"""
@spec ignore(pid, String.t()) :: {:watching, non_neg_integer} | :not_ignored | connection_error
def ignore(pid, tube) do
command(pid, {:ignore, tube})
end
@doc """
Remove a job from the server entirely. It is normally used
by the client when the job has successfully run to completion. A client can
delete jobs that it has reserved, ready jobs, delayed jobs, and jobs that are
buried.
"""
@spec delete(pid, non_neg_integer) :: :deleted | :not_found | connection_error
def delete(pid, id) do
command(pid, {:delete, id})
end
def delete!(pid, id) do
case delete(pid, id) do
{:ok, :deleted} -> :deleted
{:error, message} -> raise Beanstix.Error, message: message
end
end
@doc """
Allow a worker to request more time to work on a job.
This is useful for jobs that potentially take a long time, but you still want
the benefits of a TTR pulling a job away from an unresponsive worker. A worker
may periodically tell the server that it's still alive and processing a job
(e.g. it may do this on DEADLINE_SOON). The command postpones the auto
release of a reserved job until TTR seconds from when the command is issued.
"""
@spec touch(pid, non_neg_integer) :: :touched | :not_found | connection_error
def touch(pid, id) do
command(pid, {:touch, id})
end
@doc """
Let the client inspect a job in the system. Peeking the given job id
"""
@spec peek(pid, non_neg_integer) :: {:found, non_neg_integer} | :not_found | connection_error
def peek(pid, id) do
command(pid, {:peek, id})
end
@doc """
Peeking the next ready job.
"""
@spec peek_ready(pid) :: {:found, non_neg_integer} | :not_found | connection_error
def peek_ready(pid) do
command(pid, :peek_ready)
end
@doc """
Peeking the delayed job with the shortest delay left.
"""
@spec peek_delayed(pid) :: {:found, non_neg_integer} | :not_found | connection_error
def peek_delayed(pid) do
command(pid, :peek_delayed)
end
@doc """
Peeking the next job in the list of buried jobs.
"""
@spec peek_buried(pid) :: {:found, non_neg_integer} | :not_found | connection_error
def peek_buried(pid) do
command(pid, :peek_buried)
end
@doc """
Move jobs into the ready queue. If there are any buried jobs, it will only kick buried jobs.
Otherwise it will kick delayed jobs.
Apply only to the currently used tube.
"""
@spec kick(pid, non_neg_integer) :: {:kicked, non_neg_integer} | connection_error
def kick(pid, bound \\ 1) do
command(pid, {:kick, [bound: bound]})
end
@doc """
Similar to `kick(bound)`, if the given job id exists and is in a buried or
delayed state, it will be moved to the ready queue of the the same tube where it
currently belongs.
"""
@spec kick_job(pid, non_neg_integer) :: :kicked | :not_found | connection_error
def kick_job(pid, id) do
command(pid, {:kick_job, id})
end
@doc """
Give statistical information about the system as a whole.
"""
@spec stats(pid) :: Map.t() | connection_error
def stats(pid) do
command(pid, :stats)
end
@doc """
Similar to `stats/0`, gives statistical information about the specified job if
it exists.
"""
@spec stats_job(pid, non_neg_integer) :: Map.t() | :not_found | connection_error
def stats_job(pid, id) do
command(pid, {:stats_job, id})
end
@doc """
Similar to `stats/0`, gives statistical information about the specified tube
if it exists.
"""
@spec stats_tube(pid, String.t()) :: Map.t() | :not_found | connection_error
def stats_tube(pid, tube) do
command(pid, {:stats_tube, tube})
end
def stats_tube!(pid, tube) do
case stats_tube(pid, tube) do
{:ok, stats} -> stats
{:error, message} -> raise Beanstix.Error, message: message
end
end
@doc """
Return a list of all existing tubes in the server.
"""
@spec list_tubes(pid) :: list | connection_error
def list_tubes(pid) do
command(pid, :list_tubes)
end
def list_tubes!(pid) do
case list_tubes(pid) do
{:ok, tubes} -> tubes
{:error, message} -> raise Beanstix.Error, message: message
end
end
@doc """
Return the tube currently being used by the client.
"""
@spec list_tube_used(pid) :: {:using, String.t()} | connection_error
def list_tube_used(pid) do
command(pid, :list_tube_used)
end
@doc """
Return the tubes currently being watched by the client.
"""
@spec list_tubes_watched(pid) :: list | connection_error
def list_tubes_watched(pid) do
command(pid, :list_tubes_watched)
end
@doc """
Get a job from the currently watched tubes.
"""
@spec reserve(pid) :: {:reserved, non_neg_integer, String.t()} | connection_error
def reserve(pid) do
command(pid, :reserve, :infinity)
end
def reserve!(pid) do
case reserve(pid) do
{:ok, {job_id, data}} -> {job_id, data}
{:error, message} -> raise Beanstix.Error, message: message
end
end
@doc """
Get a job from the currently watched tubes with timeout of seconds.
"""
@spec reserve(pid, non_neg_integer) ::
{:reserved, non_neg_integer, String.t()}
| :deadline_soon
| :timed_out
| connection_error
def reserve(pid, timeout) do
command(pid, {:reserve_with_timeout, timeout}, :infinity)
end
@doc """
Put a job into the "buried" state. Buried jobs are put into a
FIFO linked list and will not be touched by the server again until a client
kicks them with the `kick` command.
"""
@spec bury(pid, non_neg_integer) :: :buried | :not_found | connection_error
@spec bury(pid, non_neg_integer, [{:priority, integer}]) :: :buried | :not_found | connection_error
def bury(pid, id, opts \\ []) do
command(pid, {:bury, id, opts})
end
@doc """
Delay any new job being reserved for a given time.
"""
@spec pause_tube(pid, String.t(), [{:delay, integer}]) :: :paused | :not_found | connection_error
def pause_tube(pid, tube, opts \\ []) do
command(pid, {:pause_tube, tube, opts})
end
@doc """
Put a reserved job back into the ready queue (and marks its state as "ready")
to be run by any client. It is normally used when the job fails because of a transitory error.
The opts can any combination of
* `:priority` - a new priority to assign to the job;
* `:delay` - an integer number of seconds to wait before putting the job back in the ready queue.
The job will be in the "delayed" state during this time.
"""
@spec release(pid, non_neg_integer) ::
:released
| :buried
| :not_found
| connection_error
@spec release(pid, non_neg_integer, [{:priority, integer}, {:delay, integer}]) ::
:released
| :buried
| :not_found
| connection_error
def release(pid, id, opts \\ []) do
command(pid, {:release, id, opts})
end
@doc """
Delete all jobs in a given tube
"""
def purge_tube(pid, tube) do
{:ok, ^tube} = command(pid, {:use, tube})
delete_jobs(pid, :peek_ready)
delete_jobs(pid, :peek_delayed)
delete_jobs(pid, :peek_buried)
end
defp delete_jobs(pid, peek_cmd) do
case Beanstix.command(pid, peek_cmd) do
{:ok, {job_id, _}} ->
Beanstix.delete(pid, job_id)
delete_jobs(pid, peek_cmd)
_ ->
:ok
end
end
end
|
lib/beanstix.ex
| 0.805823 | 0.410697 |
beanstix.ex
|
starcoder
|
defmodule Cryptopunk.Derivation.Path do
@moduledoc """
Utility functions to work with deriviation path
See https://github.com/bitcoin/bips/blob/master/bip-0044.mediawiki
"""
defstruct [:type, :purpose, :coin_type, :account, :change, :address_index]
@type t :: %__MODULE__{}
@type raw_path :: {atom(), [non_neg_integer]}
@two_power_31 2_147_483_648
defguard is_hardened(x) when is_integer(x) and x >= @two_power_31
defguard is_normal(x) when is_integer(x) and x >= 0 and x < @two_power_31
@spec new(Keyword.t()) :: t()
def new(opts) do
type = Keyword.get(opts, :type, :private)
purpose = Keyword.get(opts, :purpose, 44)
change = Keyword.get(opts, :change, 0)
coin_type = Keyword.fetch!(opts, :coin_type)
account = Keyword.fetch!(opts, :account)
address_index = Keyword.fetch!(opts, :address_index)
%__MODULE__{
type: type,
purpose: purpose,
change: change,
coin_type: coin_type,
account: account,
address_index: address_index
}
end
@spec parse(String.t()) :: {:error, any()} | {:ok, t()}
def parse(string_path) do
string_path
|> String.split("/")
|> Enum.map(&String.trim/1)
|> do_parse()
end
@spec to_raw_path(t()) :: raw_path()
def to_raw_path(%__MODULE__{
type: type,
purpose: purpose,
coin_type: coin_type,
account: account,
change: change,
address_index: address_index
}) do
{type,
[
purpose + @two_power_31,
coin_type + @two_power_31,
account + @two_power_31,
change,
address_index
]}
end
@spec two_power_31() :: non_neg_integer()
def two_power_31, do: @two_power_31
defp do_parse([type, purpose, coin_type, account, change, address_index]) do
with {:ok, type} <- parse_type(type),
{:ok, purpose} <- parse_int(purpose, type: :purpose, hardened: true),
{:ok, coin_type} <- parse_int(coin_type, type: :coin_type, hardened: true),
{:ok, account} <- parse_int(account, type: :account, hardened: true),
{:ok, change} <- parse_int(change, type: :change),
{:ok, address_index} <- parse_int(address_index, type: :address_index) do
params = [
type: type,
purpose: purpose,
coin_type: coin_type,
account: account,
change: change,
address_index: address_index
]
{:ok, new(params)}
end
end
defp do_parse(_other), do: {:error, :invalid_path}
defp parse_type(type) do
case type do
"m" -> {:ok, :private}
"M" -> {:ok, :public}
_ -> {:error, {:invalid_level, :type}}
end
end
defp parse_int(int, type: type, hardened: true) do
case Integer.parse(int) do
{num, "'"} -> {:ok, num}
_ -> {:error, {:invalid_level, type}}
end
end
defp parse_int(int, type: type) do
case Integer.parse(int) do
{num, ""} -> {:ok, num}
_ -> {:error, {:invalid_level, type}}
end
end
end
|
lib/cryptopunk/derivation/path.ex
| 0.900275 | 0.689155 |
path.ex
|
starcoder
|
defmodule Rummage.Ecto.CustomHook.SimpleSort do
@moduledoc """
`Rummage.Ecto.CustomHook.SimpleSort` is the default sort hook that comes with
`Rummage.Ecto`.
This module provides a operations that can add sorting functionality to
a pipeline of `Ecto` queries. This module works by taking the `field` that should
be used to `order_by`, `order` which can be `asc` or `desc`.
This module doesn't support associations and hence is a simple alternative
to Rummage's default search hook.
NOTE: This module doesn't return a list of entries, but a `Ecto.Query.t`.
This module `uses` `Rummage.Ecto.Hook`.
_____________________________________________________________________________
# ABOUT:
## Arguments:
This Hook expects a `queryable` (an `Ecto.Queryable`) and
`sort_params` (a `Map`). The map should be in the format:
`%{field: :field_name, order: :asc}`
Details:
* `field`: The field name (atom) to sorted by.
* `order`: Specifies the type of order `asc` or `desc`.
* `ci` : Case Insensitivity. Defaults to `false`
For example, if we want to sort products with descending `price`, we would
do the following:
```elixir
Rummage.Ecto.CustomHook.SimpleSort.run(Product, %{field: :price,
order: :desc})
```
## Assoications:
This module doesn't support assocations.
____________________________________________________________________________
# ASSUMPTIONS/NOTES:
* This Hook has the default `order` of `:asc`.
* This Hook assumes that the searched field is a part of the schema passed
as the `queryable`.
____________________________________________________________________________
# USAGE:
For a regular sort:
This returns a `queryable` which upon running will give a list of `Parent`(s)
sorted by ascending `field_1`
```elixir
alias Rummage.Ecto.CustomHook.SimpleSort
sorted_queryable = SimpleSort.run(Parent, %{field: :name, order: :asc}})
```
For a case-insensitive sort:
This returns a `queryable` which upon running will give a list of `Parent`(s)
sorted by ascending case insensitive `field_1`.
Keep in mind that `case_insensitive` can only be called for `text` fields
```elixir
alias Rummage.Ecto.CustomHook.SimpleSort
sorted_queryable = SimpleSort.run(Parent, %{field: :name, order: :asc, ci: true}})
```
This module can be overridden with a custom module while using `Rummage.Ecto`
in `Ecto` struct module.
In the `Ecto` module:
```elixir
Rummage.Ecto.rummage(queryable, rummage, sort: CustomHook)
```
OR
Globally for all models in `config.exs`:
```elixir
config :rummage_ecto,
Rummage.Ecto,
.sort: CustomHook
```
The `CustomHook` must use `Rummage.Ecto.Hook`. For examples of `CustomHook`,
check out some `custom_hooks` that are shipped with `Rummage.Ecto`:
`Rummage.Ecto.CustomHook.SimpleSearch`, `Rummage.Ecto.CustomHook.SimpleSort`,
Rummage.Ecto.CustomHook.SimplePaginate
"""
use Rummage.Ecto.Hook
import Ecto.Query
@expected_keys ~w(field order)a
@err_msg "Error in params, No values given for keys: "
@doc """
This is the callback implementation of `Rummage.Ecto.Hook.run/2`.
Builds a sort `Ecto.Query.t` on top of the given `Ecto.Queryable` variable
using given `params`.
Besides an `Ecto.Query.t` an `Ecto.Schema` module can also be passed as it
implements `Ecto.Queryable`
Params is a `Map` which is expected to have the keys `#{Enum.join(@expected_keys, ", ")}`.
This funciton expects a `field` atom, `order` which can be `asc` or `desc`,
`ci` which is a boolean indicating the case-insensitivity.
## Examples
When an empty map is passed as `params`:
iex> alias Rummage.Ecto.CustomHook.SimpleSort
iex> SimpleSort.run(Parent, %{})
** (RuntimeError) Error in params, No values given for keys: field, order
When a non-empty map is passed as `params`, but with a missing key:
iex> alias Rummage.Ecto.CustomHook.SimpleSort
iex> SimpleSort.run(Parent, %{field: :name})
** (RuntimeError) Error in params, No values given for keys: order
When a valid map of params is passed with an `Ecto.Schema` module:
iex> alias Rummage.Ecto.CustomHook.SimpleSort
iex> SimpleSort.run(Rummage.Ecto.Product, %{field: :name, order: :asc})
#Ecto.Query<from p in subquery(from p in Rummage.Ecto.Product), order_by: [asc: p.name]>
When the `queryable` passed is an `Ecto.Query` variable:
iex> alias Rummage.Ecto.CustomHook.SimpleSort
iex> import Ecto.Query
iex> queryable = from u in "products"
#Ecto.Query<from p in "products">
iex> SimpleSort.run(queryable, %{field: :name, order: :asc})
#Ecto.Query<from p in subquery(from p in "products"), order_by: [asc: p.name]>
When the `queryable` passed is an `Ecto.Query` variable, with `desc` order:
iex> alias Rummage.Ecto.CustomHook.SimpleSort
iex> import Ecto.Query
iex> queryable = from u in "products"
#Ecto.Query<from p in "products">
iex> SimpleSort.run(queryable, %{field: :name, order: :desc})
#Ecto.Query<from p in subquery(from p in "products"), order_by: [desc: p.name]>
When the `queryable` passed is an `Ecto.Query` variable, with `ci` true:
iex> alias Rummage.Ecto.CustomHook.SimpleSort
iex> import Ecto.Query
iex> queryable = from u in "products"
#Ecto.Query<from p in "products">
iex> SimpleSort.run(queryable, %{field: :name, order: :asc, ci: true})
#Ecto.Query<from p in subquery(from p in "products"), order_by: [asc: fragment("lower(?)", p.name)]>
"""
@spec run(Ecto.Query.t(), map()) :: Ecto.Query.t()
def run(queryable, sort_params) do
:ok = validate_params(sort_params)
handle_sort(queryable, sort_params)
end
# Helper function which handles addition of paginated query on top of
# the sent queryable variable
defp handle_sort(queryable, sort_params) do
order = Map.get(sort_params, :order)
field = Map.get(sort_params, :field)
ci = Map.get(sort_params, :ci, false)
handle_ordering(from(e in subquery(queryable)), field, order, ci)
end
# This is a helper macro to get case_insensitive query using fragments
defmacrop case_insensitive(field) do
quote do
fragment("lower(?)", unquote(field))
end
end
# Helper function that handles adding order_by to a query based on order type
# case insensitivity and field
defp handle_ordering(queryable, field, order, ci) do
order_by_assoc(queryable, order, field, ci)
end
defp order_by_assoc(queryable, order_type, field, false) do
order_by(queryable, [p0, ..., p2], [{^order_type, field(p2, ^field)}])
end
defp order_by_assoc(queryable, order_type, field, true) do
order_by(queryable, [p0, ..., p2],
[{^order_type, case_insensitive(field(p2, ^field))}])
end
# Helper function that validates the list of params based on
# @expected_keys list
defp validate_params(params) do
key_validations = Enum.map(@expected_keys, &Map.fetch(params, &1))
case Enum.filter(key_validations, & &1 == :error) do
[] -> :ok
_ -> raise @err_msg <> missing_keys(key_validations)
end
end
# Helper function used to build error message using missing keys
defp missing_keys(key_validations) do
key_validations
|> Enum.with_index()
|> Enum.filter(fn {v, _i} -> v == :error end)
|> Enum.map(fn {_v, i} -> Enum.at(@expected_keys, i) end)
|> Enum.map(&to_string/1)
|> Enum.join(", ")
end
@doc """
Callback implementation for `Rummage.Ecto.Hook.format_params/2`.
This function ensures that params for each field have keys `assoc`, `order1
which are essential for running this hook module.
## Examples
iex> alias Rummage.Ecto.CustomHook.SimpleSort
iex> SimpleSort.format_params(Parent, %{}, [])
%{order: :asc}
"""
@spec format_params(Ecto.Query.t(), map(), keyword()) :: map()
def format_params(_queryable, sort_params, _opts) do
sort_params
|> Map.put_new(:order, :asc)
end
end
|
lib/rummage_ecto/custom_hooks/simple_sort.ex
| 0.838117 | 0.946745 |
simple_sort.ex
|
starcoder
|
defmodule Credo.Code.Parameters do
@moduledoc """
This module provides helper functions to analyse the parameters taken by a
function.
"""
@def_ops [:def, :defp, :defmacro]
@doc "Returns the parameter count for the given function's AST"
def count(nil), do: 0
for op <- @def_ops do
def count({unquote(op), _, arguments}) when is_list(arguments) do
case arguments |> List.first do
{_atom, _meta, nil} -> 0
{_atom, _meta, list} -> list |> Enum.count
_ -> 0
end
end
end
@doc "Returns the names of parameters for the given function's AST"
def names(nil), do: nil
for op <- @def_ops do
def names({unquote(op), _meta, arguments}) when is_list(arguments) do
arguments |> List.first |> get_param_names
end
end
defp get_param_names({:when, _meta, arguments}) do
arguments |> List.first |> get_param_names
end
defp get_param_names(arguments) when is_tuple(arguments) do
arguments
|> Tuple.to_list
|> List.last
|> Enum.map(&get_param_name/1)
|> Enum.reject(&is_nil/1)
end
defp get_param_name({:::, _, [var, _type]}) do
var |> get_param_name
end
defp get_param_name({:<<>>, _, arguments}) do
arguments
|> Enum.map(&get_param_name/1)
|> Enum.reject(&is_nil/1)
end
defp get_param_name({:=, _, arguments}) do
arguments
|> Enum.map(&get_param_name/1)
|> Enum.reject(&is_nil/1)
end
defp get_param_name({:%, _, [{:__aliases__, _meta, _mod_list}, {:%{}, _meta2, arguments}]}) do
arguments
|> get_param_name
end
defp get_param_name({:%{}, _, arguments}) do
arguments
|> get_param_name
end
defp get_param_name({:\\, _, arguments}) do
arguments
|> Enum.find_value(&get_param_name/1)
end
defp get_param_name(list) when is_list(list) do
list
|> Enum.map(fn
{atom, tuple} when is_atom(atom) and is_tuple(tuple) -> get_param_name(tuple)
end)
|> Enum.reject(&is_nil/1)
end
defp get_param_name({name, _, nil}) when is_atom(name), do: name
defp get_param_name(_) do
nil
end
end
|
lib/credo/code/parameters.ex
| 0.606848 | 0.520253 |
parameters.ex
|
starcoder
|
defmodule Bigtable.RowSet do
@moduledoc """
Provides functions to build a `Google.Bigtable.V2.RowSet` and apply it to a `Google.Bigtable.V2.ReadRowsRequest`
"""
alias Bigtable.ReadRows
alias Google.Bigtable.V2
@doc """
Adds a single or list of row keys to a `Google.Bigtable.V2.ReadRowsRequest`
Returns `Google.Bigtable.V2.ReadRowsRequest`
## Examples
#### Single Key
iex> request = Bigtable.ReadRows.build("table") |> Bigtable.RowSet.row_keys("Row#123")
iex> with %Google.Bigtable.V2.ReadRowsRequest{} <- request, do: request.rows
%Google.Bigtable.V2.RowSet{row_keys: ["Row#123"], row_ranges: []}
#### Multiple Keys
iex> request = Bigtable.ReadRows.build("table") |> Bigtable.RowSet.row_keys(["Row#123", "Row#124"])
iex> with %Google.Bigtable.V2.ReadRowsRequest{} <- request, do: request.rows
%Google.Bigtable.V2.RowSet{row_keys: ["Row#123", "Row#124"], row_ranges: []}
"""
@spec row_keys(V2.ReadRowsRequest.t(), [binary()]) :: V2.ReadRowsRequest.t()
def row_keys(%V2.ReadRowsRequest{} = request, keys) when is_list(keys) do
prev_row_ranges = get_row_ranges(request)
%{request | rows: V2.RowSet.new(row_keys: keys, row_ranges: prev_row_ranges)}
end
@spec row_keys(V2.ReadRowsRequest.t(), binary()) :: V2.ReadRowsRequest.t()
def row_keys(%V2.ReadRowsRequest{} = request, key) when is_binary(key) do
row_keys(request, [key])
end
@doc """
Adds a single or list of row keys to the default `Google.Bigtable.V2.ReadRowsRequest`
Returns `Google.Bigtable.V2.ReadRowsRequest`
## Examples
#### Single Key
iex> request = Bigtable.RowSet.row_keys("Row#123")
iex> with %Google.Bigtable.V2.ReadRowsRequest{} <- request, do: request.rows
%Google.Bigtable.V2.RowSet{row_keys: ["Row#123"], row_ranges: []}
#### Multiple Keys
iex> request = Bigtable.RowSet.row_keys(["Row#123", "Row#124"])
iex> with %Google.Bigtable.V2.ReadRowsRequest{} <- request, do: request.rows
%Google.Bigtable.V2.RowSet{row_keys: ["Row#123", "Row#124"], row_ranges: []}
"""
@spec row_keys([binary()]) :: V2.ReadRowsRequest.t()
def row_keys(keys) when is_list(keys) do
ReadRows.build() |> row_keys(keys)
end
@spec row_keys(binary()) :: V2.ReadRowsRequest.t()
def row_keys(key) when is_binary(key) do
ReadRows.build() |> row_keys(key)
end
@doc """
Adds a single or list of row ranges to a `Google.Bigtable.V2.ReadRowsRequest` with an optional boolean flag to specify the inclusivity of the range start and end.
Row ranges should be provided in the format {start, end} or {start, end, inclusive}.
Returns `Google.Bigtable.V2.ReadRowsRequest`
## Examples
#### Single Range
iex> request = Bigtable.ReadRows.build("table") |> Bigtable.RowSet.row_ranges({"start", "end"})
iex> with %Google.Bigtable.V2.ReadRowsRequest{} <- request, do: request.rows
%Google.Bigtable.V2.RowSet{
row_keys: [],
row_ranges: [
%Google.Bigtable.V2.RowRange{
end_key: {:end_key_closed, "end"},
start_key: {:start_key_closed, "start"}
}
]
}
#### Multiple Ranges
iex> ranges = [{"start1", "end1"}, {"start2", "end2", false}]
iex> request = Bigtable.ReadRows.build("table") |> Bigtable.RowSet.row_ranges(ranges)
iex> with %Google.Bigtable.V2.ReadRowsRequest{} <- request, do: request.rows
%Google.Bigtable.V2.RowSet{
row_keys: [],
row_ranges: [
%Google.Bigtable.V2.RowRange{
end_key: {:end_key_closed, "end1"},
start_key: {:start_key_closed, "start1"}
},
%Google.Bigtable.V2.RowRange{
end_key: {:end_key_open, "end2"},
start_key: {:start_key_open, "start2"}
}
]
}
"""
@spec row_ranges(
V2.ReadRowsRequest.t(),
[{binary(), binary(), binary()}]
| [{binary(), binary()}]
| {binary(), binary(), binary()}
| {binary(), binary()}
) :: V2.ReadRowsRequest.t()
def row_ranges(%V2.ReadRowsRequest{} = request, ranges) do
ranges = List.flatten([ranges])
ranges
|> Enum.map(&translate_range/1)
|> apply_ranges(request)
end
@doc """
Adds a single or list of row ranges to the default `Google.Bigtable.V2.ReadRowsRequest` with an optional boolean flag to specify the inclusivity of the range start and end.
Row ranges should be provided in the format {start, end} or {start, end, inclusive}.
Returns `Google.Bigtable.V2.ReadRowsRequest`
## Examples
#### Single Range
iex> request = Bigtable.RowSet.row_ranges({"start", "end"})
iex> with %Google.Bigtable.V2.ReadRowsRequest{} <- request, do: request.rows
%Google.Bigtable.V2.RowSet{
row_keys: [],
row_ranges: [
%Google.Bigtable.V2.RowRange{
end_key: {:end_key_closed, "end"},
start_key: {:start_key_closed, "start"}
}
]
}
#### Multiple Ranges
iex> ranges = [{"start1", "end1"}, {"start2", "end2", false}]
iex> request = Bigtable.RowSet.row_ranges(ranges)
iex> with %Google.Bigtable.V2.ReadRowsRequest{} <- request, do: request.rows
%Google.Bigtable.V2.RowSet{
row_keys: [],
row_ranges: [
%Google.Bigtable.V2.RowRange{
end_key: {:end_key_closed, "end1"},
start_key: {:start_key_closed, "start1"}
},
%Google.Bigtable.V2.RowRange{
end_key: {:end_key_open, "end2"},
start_key: {:start_key_open, "start2"}
}
]
}
"""
@spec row_ranges(
[{binary(), binary(), binary()}]
| [{binary(), binary()}]
| [{binary(), binary(), binary()}]
| {binary(), binary(), binary()}
) :: V2.ReadRowsRequest.t()
def row_ranges(ranges) do
ReadRows.build()
|> row_ranges(ranges)
end
# Fetches the previous row ranges from a ReadRowsRequest object
defp get_row_ranges(%V2.ReadRowsRequest{} = request) do
case request.rows do
%V2.RowSet{} = row_set ->
row_set.row_ranges
_ ->
[]
end
end
# Fetches the previous row keys from a ReadRowsRequest object
defp get_row_keys(%V2.ReadRowsRequest{} = request) do
case request.rows do
%V2.RowSet{} = row_set ->
row_set.row_keys
_ ->
[]
end
end
# Returns an inclusive or exclusive range depending on the boolean flag
defp translate_range({start_key, end_key, inclusive}) do
case inclusive do
true -> inclusive_range(start_key, end_key)
false -> exclusive_range(start_key, end_key)
end
end
defp translate_range({start_key, end_key}) do
inclusive_range(start_key, end_key)
end
defp exclusive_range(start_key, end_key) do
V2.RowRange.new(
start_key: {:start_key_open, start_key},
end_key: {:end_key_open, end_key}
)
end
defp inclusive_range(start_key, end_key) do
V2.RowRange.new(
start_key: {:start_key_closed, start_key},
end_key: {:end_key_closed, end_key}
)
end
# Applies row ranges to a ReadRows request
defp apply_ranges(ranges, %V2.ReadRowsRequest{} = request) do
prev_row_keys = get_row_keys(request)
%{request | rows: V2.RowSet.new(row_keys: prev_row_keys, row_ranges: ranges)}
end
end
|
lib/data/row_set.ex
| 0.921274 | 0.607721 |
row_set.ex
|
starcoder
|
defmodule Ash.Filter.Predicate do
@moduledoc "Represents a filter predicate"
defstruct [:resource, :attribute, :relationship_path, :predicate, :value]
alias Ash.Error.Query.UnsupportedPredicate
alias Ash.Filter
alias Ash.Filter.{Expression, Not}
@type predicate :: struct
@type comparison ::
:unknown
| :right_excludes_left
| :left_excludes_right
| :right_includes_left
| :left_includes_right
| :mutually_inclusive
# A simplification value for the right term
| {:simplify, term}
| {:simplify, term, term}
@type t :: %__MODULE__{
attribute: Ash.attribute(),
relationship_path: list(atom),
predicate: predicate
}
@callback new(Ash.resource(), Ash.attribute(), term) :: {:ok, struct} | {:error, term}
@callback compare(predicate(), predicate()) :: comparison()
@callback match?(predicate(), term, Ash.Type.t()) :: boolean | :unknown
defmacro __using__(_opts) do
quote do
@behaviour Ash.Filter.Predicate
@impl true
def compare(_, _), do: :unknown
@impl true
def match?(_, _, _), do: :unknown
defoverridable compare: 2, match?: 3
end
end
def match?(predicate, value, type) do
predicate.__struct__.match?(predicate, value, type)
end
@spec compare(predicate(), predicate()) :: comparison
def compare(%__MODULE__{predicate: left} = pred, right) do
case compare(left, right) do
{:simplify, simplification} ->
simplification =
Filter.map(simplification, fn
%struct{} = expr when struct in [__MODULE__, Not, Expression] ->
expr
other ->
wrap_in_predicate(pred, other)
end)
{:simplify, simplification}
other ->
other
end
end
def compare(left, %__MODULE__{predicate: right}), do: compare(left, right)
def compare(left, right) do
if left.__struct__ == right.__struct__ do
with {:right_to_left, :unknown} <- {:right_to_left, left.__struct__.compare(left, right)},
{:left_to_right, :unknown} <- {:left_to_right, right.__struct__.compare(left, right)} do
:mutually_exclusive
else
{:right_to_left, {:simplify, left, _}} -> {:simplify, left}
{:left_to_right, {:simplify, _, right}} -> {:simplify, right}
{_, other} -> other
end
else
with {:right_to_left, :unknown} <- {:right_to_left, left.__struct__.compare(left, right)},
{:right_to_left, :unknown} <- {:right_to_left, right.__struct__.compare(left, right)},
{:left_to_right, :unknown} <- {:left_to_right, right.__struct__.compare(left, right)},
{:left_to_right, :unknown} <- {:left_to_right, left.__struct__.compare(left, right)} do
:mutually_exclusive
else
{:right_to_left, {:simplify, left, _}} -> {:simplify, left}
{:left_to_right, {:simplify, _, right}} -> {:simplify, right}
{_, other} -> other
end
end
end
defp wrap_in_predicate(predicate, %struct{} = other) do
if Ash.implements_behaviour?(struct, Ash.Filter.Predicate) do
%{predicate | predicate: other}
else
other
end
end
def new(resource, attribute, predicate, value, relationship_path) do
case predicate.new(resource, attribute, value) do
{:ok, predicate} ->
if Ash.Resource.data_layer_can?(
resource,
{:filter_predicate, Ash.Type.storage_type(attribute.type), predicate}
) do
{:ok,
%__MODULE__{
resource: resource,
attribute: attribute,
predicate: predicate,
value: value,
relationship_path: relationship_path
}}
else
{:error,
UnsupportedPredicate.exception(
resource: resource,
predicate: predicate,
type: Ash.Type.storage_type(attribute.type)
)}
end
{:error, error} ->
{:error, error}
end
end
# custom_options not available in Elixir before 1.9
def add_inspect_path(inspect_opts, field) do
case inspect_opts do
%{custom_options: %{relationship_path: path}} ->
Enum.join(path, ".") <> "." <> to_string(field)
_ ->
to_string(field)
end
end
defimpl Inspect do
import Inspect.Algebra
def inspect(
%{relationship_path: relationship_path, predicate: predicate},
opts
) do
opts = %{
opts
| syntax_colors: [
atom: :yellow,
binary: :green,
boolean: :magenta,
list: :cyan,
map: :magenta,
number: :red,
regex: :violet,
tuple: :white
]
}
opts =
apply(Map, :put, [
opts,
:custom_options,
Keyword.put(opts.custom_options || [], :relationship_path, relationship_path)
])
# Above indirection required to avoid dialyzer warning in pre-1.9 Elixir
to_doc(predicate, opts)
end
end
end
|
lib/ash/filter/predicate.ex
| 0.891714 | 0.489015 |
predicate.ex
|
starcoder
|
defmodule GTFSRealtimeViz do
@moduledoc """
GTFSRealtimeViz is an OTP app that can be run by itself or as part of another
application. You can send it protobuf VehiclePositions.pb files, in sequence,
and then output them as an HTML fragment, to either open in a browser or embed
in another view.
Example usage as stand alone:
```
$ iex -S mix
iex(1)> proto = File.read!("filename.pb")
iex(2)> GTFSRealtimeViz.new_message(:prod, proto, "first protobuf file")
iex(3)> File.write!("output.html", GTFSRealtimeViz.visualize(:prod, %{}))
```
"""
alias GTFSRealtimeViz.State
alias GTFSRealtimeViz.Proto
@type route_opts :: %{String.t => [{String.t, String.t, String.t}]}
require EEx
EEx.function_from_file :defp, :gen_html, "lib/templates/viz.eex", [:assigns], [engine: Phoenix.HTML.Engine]
EEx.function_from_file :defp, :render_diff, "lib/templates/diff.eex", [:assigns], [engine: Phoenix.HTML.Engine]
EEx.function_from_file :defp, :render_single_file, "lib/templates/single_file.eex", [:assigns], [engine: Phoenix.HTML.Engine]
@doc """
Send protobuf files to the app's GenServer. The app can handle a series of files,
belonging to different groupings (e.g., test, dev, and prod). When sending the file,
you must also provide a comment (perhaps a time stamp or other information about the
file), which will be displayed along with the visualization.
"""
@spec new_message(term, Proto.raw, String.t) :: :ok
def new_message(group, raw, comment) do
State.single_pb(group, raw, comment)
end
def new_message(group, vehicle_positions, trip_updates, comment) do
State.new_data(group, vehicle_positions, trip_updates, comment)
end
@doc """
Renders the received protobuf files and comments into an HTML fragment that can either
be opened directly in a browser or embedded within the HTML layout of another app.
"""
@spec visualize(term, route_opts) :: String.t
def visualize(group, opts) do
routes = Map.keys(opts[:routes])
vehicle_archive = get_vehicle_archive(group, routes)
trip_update_archive = get_trip_update_archive(group, routes, opts[:timezone])
[trip_update_archive: trip_update_archive, vehicle_archive: vehicle_archive, routes: opts[:routes], render_diff?: false]
|> gen_html
|> Phoenix.HTML.safe_to_string
end
@doc """
Renders an HTML fragment that displays the vehicle differences
between two pb files.
"""
@spec visualize_diff(term, term, route_opts) :: String.t
def visualize_diff(group_1, group_2, opts) do
routes = Map.keys(opts[:routes])
vehicle_archive_1 = get_vehicle_archive(group_1, routes)
trip_archive_1 = get_trip_update_archive(group_1, routes, opts[:timezone])
vehicle_archive_2 = get_vehicle_archive(group_2, routes)
trip_archive_2 = get_trip_update_archive(group_2, routes, opts[:timezone])
[trip_update_archive: Enum.zip(trip_archive_1, trip_archive_2), vehicle_archive: Enum.zip(vehicle_archive_1, vehicle_archive_2), routes: opts[:routes], render_diff?: true]
|> gen_html()
|> Phoenix.HTML.safe_to_string()
end
defp get_trip_update_archive(group, routes, timezone) do
group
|> State.trip_updates
|> trips_we_care_about(routes)
|> trip_updates_by_stop_direction_id(timezone)
end
def trips_we_care_about(state, routes) do
Enum.map(state,
fn {descriptor, update_list} ->
filtered_positions = update_list
|> Enum.filter(fn trip_update ->
trip_update.trip.route_id in routes
end)
{descriptor, filtered_positions}
end)
end
defp trip_updates_by_stop_direction_id(state, timezone) do
Enum.map(state, fn {_descriptor, trip_updates} ->
trip_updates
|> Enum.flat_map(fn trip_update ->
trip_update.stop_time_update
|> Enum.reduce(%{}, fn stop_update, stop_update_acc ->
if stop_update.arrival && stop_update.arrival.time do
Map.put(stop_update_acc, {stop_update.stop_id, trip_update.trip.direction_id}, {trip_update.trip.trip_id, stop_update.arrival.time})
else
stop_update_acc
end
end)
end)
end)
|> Enum.map(fn predictions ->
Enum.reduce(predictions, %{}, fn {stop_id, {trip_id, time}}, acc ->
Map.update(acc, stop_id, [{trip_id, timestamp(time, timezone)}], fn timestamps -> timestamps ++ [{trip_id, timestamp(time, timezone)}] end)
end)
end)
end
defp timestamp(diff_time, timezone) do
diff_datetime = diff_time
|> DateTime.from_unix!()
|> Timex.Timezone.convert(timezone)
diff_datetime
end
defp get_vehicle_archive(group, routes) do
group
|> State.vehicles
|> vehicles_we_care_about(routes)
|> vehicles_by_stop_direction_id()
end
def vehicles_we_care_about(state, routes) do
Enum.map(state,
fn {descriptor, position_list} ->
filtered_positions = position_list
|> Enum.filter(fn position ->
position.trip && position.trip.route_id in routes
end)
{descriptor, filtered_positions}
end)
end
@spec vehicles_by_stop_direction_id([{String.t, [Proto.vehicle_position]}]) :: [{String.t, %{required(String.t) => [Proto.vehicle_position]}}]
defp vehicles_by_stop_direction_id(state) do
Enum.map(state, fn {comment, vehicles} ->
vehicles_by_stop = Enum.reduce(vehicles, %{}, fn v, acc ->
update_in acc, [{v.stop_id, v.trip.direction_id}], fn vs ->
[v | (vs || [])]
end
end)
{comment, vehicles_by_stop}
end)
end
@spec format_times([{String.t, DateTime.t}] | nil) :: [Phoenix.HTML.Safe.t]
def format_times(nil) do
[]
end
def format_times(time_list) do
time_list
|> sort_by_time()
|> Enum.take(2)
|> Enum.map(&format_time/1)
end
def sort_by_time(time_list) do
Enum.sort(time_list, &time_list_sorter/2)
end
defp time_list_sorter({_, time1}, {_, time2}) do
Timex.before?(time1, time2)
end
defp format_time({_, nil}) do
nil
end
defp format_time({trip_id, time}) do
ascii = Timex.format!(time, "{h24}:{m}:{s}")
span_for_id({ascii, trip_id})
end
@spec format_time_diff(time_list, time_list) :: [{time_output, time_output}]
when time_list: {String.t, DateTime.t} | nil, time_output: Phoenix.HTML.Safe.t | nil
def format_time_diff(base_list, nil) do
for format <- format_times(base_list) do
{format, nil}
end
end
def format_time_diff(nil, diff_list) do
for format <- format_times(diff_list) do
{nil, format}
end
end
def format_time_diff(base_list, diff_list) do
base_map = Map.new(base_list)
diff_map = Map.new(diff_list)
trips = for trip_id <- Map.keys(Map.merge(base_map, diff_map)),
base = base_map[trip_id],
diff = diff_map[trip_id],
is_nil(base) or is_nil(diff) or DateTime.compare(base, diff) != :eq do
{format_time({trip_id, base}), format_time({trip_id, diff})}
end
Enum.take(trips, 2)
end
@spec trainify([Proto.vehicle_position], Proto.vehicle_position_statuses, String.t) :: iodata
defp trainify(vehicles, status, ascii_train) do
vehicles
|> vehicles_with_status(status)
|> Enum.map(fn status ->
label =
if status.vehicle do
status.vehicle.label || ""
else
""
end
[ascii_train, " ", label]
end)
|> Enum.intersperse(",")
end
@spec trainify_diff([Proto.vehicle_position], [Proto.vehicle_position], Proto.vehicle_position_statuses, String.t, String.t) :: Phoenix.HTML.Safe.t
defp trainify_diff(vehicles_base, vehicles_diff, status, ascii_train_base, ascii_train_diff) do
base = vehicles_with_status(vehicles_base, status) |> Enum.map(& &1.vehicle && &1.vehicle.id)
diff = vehicles_with_status(vehicles_diff, status) |> Enum.map(& &1.vehicle && &1.vehicle.id)
unique_base = unique_trains(base, diff, ascii_train_base)
unique_diff = unique_trains(diff, base, ascii_train_diff)
[unique_base, unique_diff]
|> List.flatten()
|> Enum.map(&span_for_id/1)
|> Enum.intersperse(",")
end
defp span_for_id({ascii, id}) do
tag_opts = [class: "vehicle-#{id}", onmouseover: "highlight('#{id}', 'red')", onmouseout: "highlight('#{id}', 'black')"]
:span
|> Phoenix.HTML.Tag.content_tag([ascii, "(", id, ")"], tag_opts)
end
# removes any vehicles that appear in given list
defp unique_trains(vehicles_1, vehicles_2, ascii) do
Enum.reject(vehicles_1, & &1 in vehicles_2) |> Enum.map(&{ascii, &1})
end
defp vehicles_with_status(vehicles, status) do
Enum.filter(vehicles, & &1.current_status == status)
end
end
|
lib/gtfs_realtime_viz.ex
| 0.800224 | 0.774924 |
gtfs_realtime_viz.ex
|
starcoder
|
defmodule Pbuf.Protoc.Fields.Simple do
use Pbuf.Protoc.Field
def new(desc, context) do
typespec = typespec(desc)
repeated = is_repeated?(desc)
packed? = context.version == 3 || (desc.options != nil && desc.options.packed == true)
{tag, name, type, prefix} = extract_core(desc, packed?)
{encode_fun, typespec, default} =
cond do
repeated == false -> {"field", typespec, default(context, type)}
packed? == false -> {"repeated_unpacked_field", "[" <> typespec <> "]", "[]"}
true -> {"repeated_field", "[" <> typespec <> "]", "[]"}
end
# length-prefixed types aren't packed, treat the decode as individual
# fields and let something else worry about merging it
decode_fun = cond do
type == :struct -> "struct_field" # special name largely because a module is an atom and we need to match between the two
!repeated || type in [:bytes, :string] -> "field" # never packed
packed? == false -> "repeated_unpacked_field"
true -> "repeated_field"
end
decode_type = case type == :struct do
false -> ":#{type}"
true -> module_name(desc)
end
post_decode = cond do
repeated && !packed? -> :repack
repeated && type in [:bytes, :string, :struct] -> :repack
true -> get_post_decode(desc, repeated, type)
end
encode_fun = case post_decode do
{:encoder, {encoder, _decode_opts}} -> "
case data.#{name} do
<<>> -> []
value -> Encoder.#{encode_fun}(:#{type}, #{encoder}.encode!(value), #{prefix})
end"
_ -> "Encoder.#{encode_fun}(:#{type}, data.#{name}, #{prefix})"
end
%Field{
tag: tag,
name: name,
prefix: prefix,
default: default,
typespec: typespec,
post_decode: post_decode,
encode_fun: encode_fun,
json?: json?(desc, repeated, type),
decode_fun: "Decoder.#{decode_fun}(#{decode_type}, :#{name}, acc, data)",
}
end
@spec typespec(Protoc.proto_field | atom) :: String.t
def typespec(%{type_name: t} = desc) when t not in [nil, ""] do
module_name(desc) <> ".t"
end
def typespec(:bool), do: "boolean"
def typespec(:bytes), do: "binary"
def typespec(:string), do: "String.t"
def typespec(:double), do: "number"
def typespec(:float), do: "number"
def typespec(:uint32), do: "non_neg_integer"
def typespec(:uint64), do: "non_neg_integer"
def typespec(other) when is_atom(other) do
"integer"
end
def typespec(%{type: type}) do
typespec(internal_type(type))
end
def default(%{version: 2}, _), do: "nil"
def default(_, :bool), do: false
def default(_, :bytes), do: "<<>>"
def default(_, :string), do: ~s("")
def default(_, :double), do: 0.0
def default(_, :float), do: 0.0
def default(_, :struct), do: "nil"
def default(_, _other), do: 0
defp get_post_decode(%{options: options}, false, type) when options != nil and type in [:bytes, :string] do
case Map.get(options, :json_field, 0) do
1 -> {:encoder, {Jason, "[]"}}
2 -> {:encoder, {Jason, "[keys: :atoms]"}}
_ -> :none
end
end
defp get_post_decode(_desc, _repeated, _type) do
:none
end
defp json?(%{options: options}, false, _type) when options != nil do
Map.get(options, :json_field, 0) != -1
end
defp json?(_, _, _), do: true
end
|
lib/protoc/fields/simple.ex
| 0.522933 | 0.420391 |
simple.ex
|
starcoder
|
defmodule TextDelta do
@moduledoc """
Delta is a format used to describe text states and changes.
Delta can describe any rich text changes or a rich text itself, preserving all
the formatting.
At the baseline level, delta is an array of operations (constructed via
`TextDelta.Operation`). Operations can be either
`t:TextDelta.Operation.insert/0`, `t:TextDelta.Operation.retain/0` or
`t:TextDelta.Operation.delete/0`. None of the operations contain index,
meaning that delta aways describes text or a change staring from the very
beginning.
Delta can describe both changes to and text states themselves. We can think of
a document as an artefact of all the changes applied to it. This way, newly
imported documents can be thinked of as a sequence of `insert`s applied to an
empty text.
Deltas are composable. This means that a text delta can be composed with
another delta for that text, resulting in a shorter, optimized version.
Deltas are also transformable. This attribute of deltas is what enables
[Operational Transformation][ot] - a way to transform one operation against
the context of another one. Operational Transformation allows us to build
optimistic, non-locking collaborative editors.
The format for deltas was deliberately copied from [Quill][quill] - a rich
text editor for web. This library aims to be an Elixir counter-part for Quill,
enabling us to build matching backends for the editor.
## Example
iex> delta = TextDelta.insert(TextDelta.new(), "Gandalf", %{bold: true})
%TextDelta{ops: [
%{insert: "Gandalf", attributes: %{bold: true}}]}
iex> delta = TextDelta.insert(delta, " the ")
%TextDelta{ops: [
%{insert: "Gandalf", attributes: %{bold: true}},
%{insert: " the "}]}
iex> TextDelta.insert(delta, "Grey", %{color: "#ccc"})
%TextDelta{ops: [
%{insert: "Gandalf", attributes: %{bold: true}},
%{insert: " the "},
%{insert: "Grey", attributes: %{color: "#ccc"}}]}
[ot]: https://en.wikipedia.org/wiki/Operational_transformation
[quill]: https://quilljs.com
"""
alias TextDelta.{
Operation,
Attributes,
Composition,
Transformation,
Application,
Document,
Difference
}
defstruct ops: []
@typedoc """
Delta is a set of `t:TextDelta.Operation.retain/0`,
`t:TextDelta.Operation.insert/0`, or `t:TextDelta.Operation.delete/0`
operations.
"""
@type t :: %TextDelta{ops: [Operation.t()]}
@typedoc """
A text state represented as delta. Any text state can be represented as a set
of `t:TextDelta.Operation.insert/0` operations.
"""
@type state :: %TextDelta{ops: [Operation.insert()]}
@typedoc """
Alias to `t:TextDelta.state/0`.
"""
@type document :: state
@doc """
Creates new delta.
## Examples
iex> TextDelta.new()
%TextDelta{ops: []}
You can also pass set of operations using optional argument:
iex> TextDelta.new([TextDelta.Operation.insert("hello")])
%TextDelta{ops: [%{insert: "hello"}]}
"""
@spec new([Operation.t()]) :: t
def new(ops \\ [])
def new([]), do: %TextDelta{}
def new(ops), do: Enum.reduce(ops, new(), &append(&2, &1))
@doc """
Creates and appends new `insert` operation to the delta.
Same as with underlying `TextDelta.Operation.insert/2` function, attributes
are optional.
`TextDelta.append/2` is used undert the hood to add operation to the delta
after construction. So all `append` rules apply.
## Example
iex> TextDelta.insert(TextDelta.new(), "hello", %{bold: true})
%TextDelta{ops: [%{insert: "hello", attributes: %{bold: true}}]}
"""
@spec insert(t, Operation.element(), Attributes.t()) :: t
def insert(delta, el, attrs \\ %{}) do
append(delta, Operation.insert(el, attrs))
end
@doc """
Creates and appends new `retain` operation to the delta.
Same as with underlying `TextDelta.Operation.retain/2` function, attributes
are optional.
`TextDelta.append/2` is used undert the hood to add operation to the delta
after construction. So all `append` rules apply.
## Example
iex> TextDelta.retain(TextDelta.new(), 5, %{italic: true})
%TextDelta{ops: [%{retain: 5, attributes: %{italic: true}}]}
"""
@spec retain(t, non_neg_integer, Attributes.t()) :: t
def retain(delta, len, attrs \\ %{}) do
append(delta, Operation.retain(len, attrs))
end
@doc """
Creates and appends new `delete` operation to the delta.
`TextDelta.append/2` is used undert the hood to add operation to the delta
after construction. So all `append` rules apply.
## Example
iex> TextDelta.delete(TextDelta.new(), 3)
%TextDelta{ops: [%{delete: 3}]}
"""
@spec delete(t, non_neg_integer) :: t
def delete(delta, len) do
append(delta, Operation.delete(len))
end
@doc """
Appends given operation to the delta.
Before adding operation to the delta, this function attempts to compact it by
applying 2 simple rules:
1. Delete followed by insert is swapped to ensure that insert goes first.
2. Same operations with the same attributes are merged.
These two rules ensure that our deltas are always as short as possible and
canonical, making it easier to compare, compose and transform them.
## Example
iex> operation = TextDelta.Operation.insert("hello")
iex> TextDelta.append(TextDelta.new(), operation)
%TextDelta{ops: [%{insert: "hello"}]}
"""
@spec append(t, Operation.t()) :: t
def append(delta, op) do
delta.ops
|> Enum.reverse()
|> compact(op)
|> Enum.reverse()
|> wrap()
end
defdelegate compose(first, second), to: Composition
defdelegate transform(left, right, priority), to: Transformation
defdelegate apply(state, delta), to: Application
defdelegate apply!(state, delta), to: Application
defdelegate lines(delta), to: Document
defdelegate lines!(delta), to: Document
defdelegate diff(first, second), to: Difference
defdelegate diff!(first, second), to: Difference
@doc """
Trims trailing retains from the end of a given delta.
## Example
iex> TextDelta.trim(TextDelta.new([%{insert: "hello"}, %{retain: 5}]))
%TextDelta{ops: [%{insert: "hello"}]}
"""
@spec trim(t) :: t
def trim(delta)
def trim(%TextDelta{ops: []} = empty), do: empty
def trim(delta) do
last_operation = List.last(delta.ops)
case Operation.trimmable?(last_operation) do
true ->
delta.ops
|> Enum.slice(0..-2)
|> wrap()
|> trim()
false ->
delta
end
end
@doc """
Calculates the length of a given delta.
Length of delta is a sum of its operations length.
## Example
iex> TextDelta.length(TextDelta.new([%{insert: "hello"}, %{retain: 5}]))
10
The function also allows to select which types of operations we include in the
summary with optional second argument:
iex> TextDelta.length(TextDelta.new([%{insert: "hi"}]), [:retain])
0
"""
@spec length(t, [Operation.type()]) :: non_neg_integer
def length(delta, op_types \\ [:insert, :retain, :delete]) do
delta.ops
|> Enum.filter(&(Operation.type(&1) in op_types))
|> Enum.map(&Operation.length/1)
|> Enum.sum()
end
@doc """
Returns set of operations for a given delta.
## Example
iex> TextDelta.operations(TextDelta.new([%{delete: 5}, %{retain: 3}]))
[%{delete: 5}, %{retain: 3}]
"""
@spec operations(t) :: [Operation.t()]
def operations(delta), do: delta.ops
defp compact(ops, %{insert: ""}), do: ops
defp compact(ops, %{retain: 0}), do: ops
defp compact(ops, %{delete: 0}), do: ops
defp compact(ops, []), do: ops
defp compact(ops, nil), do: ops
defp compact([], new_op), do: [new_op]
defp compact([%{delete: _} = del | ops_remainder], %{insert: _} = ins) do
ops_remainder
|> compact(ins)
|> compact(del)
end
defp compact([last_op | ops_remainder], new_op) do
last_op
|> Operation.compact(new_op)
|> Enum.reverse()
|> Kernel.++(ops_remainder)
end
defp wrap(ops), do: %TextDelta{ops: ops}
end
|
lib/text_delta.ex
| 0.948725 | 0.766337 |
text_delta.ex
|
starcoder
|
defmodule Explorer.PolarsBackend.Shared do
# A collection of **private** helpers shared in Explorer.PolarsBackend.
@moduledoc false
alias Explorer.DataFrame, as: DataFrame
alias Explorer.PolarsBackend.DataFrame, as: PolarsDataFrame
alias Explorer.PolarsBackend.Native
alias Explorer.PolarsBackend.Series, as: PolarsSeries
alias Explorer.Series, as: Series
def apply_native(df_or_s, fun, args \\ [])
def apply_native(%Series{data: series}, fun, args) do
result = apply(Native, fun, [series | args])
unwrap(result)
end
def apply_native(%DataFrame{data: df, groups: groups}, fun, args) do
result = apply(Native, fun, [df | args])
unwrap(result, groups)
end
def to_polars_df(%DataFrame{data: %PolarsDataFrame{} = polars_df}), do: polars_df
def to_polars_df(%PolarsDataFrame{} = polars_df), do: polars_df
def to_dataframe(df, groups \\ [])
def to_dataframe(%DataFrame{} = df, _groups), do: df
def to_dataframe(%PolarsDataFrame{} = polars_df, groups),
do: %DataFrame{data: polars_df, groups: groups}
def to_polars_s(%Series{data: %PolarsSeries{} = polars_s}), do: polars_s
def to_polars_s(%PolarsSeries{} = polars_s), do: polars_s
def to_series(%PolarsSeries{} = polars_s) do
{:ok, dtype} = Native.s_dtype(polars_s)
dtype = normalise_dtype(dtype)
%Series{data: polars_s, dtype: dtype}
end
def to_series(%Series{} = series), do: series
def unwrap(df_or_s, groups \\ [])
def unwrap({:ok, %PolarsSeries{} = series}, _), do: to_series(series)
def unwrap({:ok, %PolarsDataFrame{} = df}, groups), do: to_dataframe(df, groups)
def unwrap({:ok, value}, _), do: value
def unwrap({:error, error}, _), do: raise("#{error}")
def normalise_dtype("u32"), do: :integer
def normalise_dtype("i32"), do: :integer
def normalise_dtype("i64"), do: :integer
def normalise_dtype("f64"), do: :float
def normalise_dtype("bool"), do: :boolean
def normalise_dtype("str"), do: :string
def normalise_dtype("date32(days)"), do: :date
def normalise_dtype("date64(ms)"), do: :datetime
def normalise_dtype("list [u32]"), do: :list
def internal_from_dtype(:integer), do: "i64"
def internal_from_dtype(:float), do: "f64"
def internal_from_dtype(:boolean), do: "bool"
def internal_from_dtype(:string), do: "str"
def internal_from_dtype(:date), do: "date32(days)"
def internal_from_dtype(:datetime), do: "date64(ms)"
end
|
lib/explorer/polars_backend/shared.ex
| 0.762954 | 0.649655 |
shared.ex
|
starcoder
|
defmodule AdventOfCode.Day12 do
@move_regex ~r/^(N|S|E|W|L|R|F)(\d+)$/
@angles %{
:E => 0,
:N => 90,
:W => 180,
:S => 270
}
@inverse_angles %{
0 => :E,
90 => :N,
180 => :W,
270 => :S
}
@spec move_ship(
[{:E | :F | :L | :N | :R | :S | :W, number}],
{{number, number}, :E | :F | :L | :N | :R | :S | :W}
) ::
{{number, number}, :E | :F | :L | :N | :R | :S | :W}
def move_ship([], {{_, _}, _} = position) do
position
end
def move_ship([{:F, units} | directions], {{lat, long}, :E}) do
move_ship(directions, {{lat + units, long}, :E})
end
def move_ship([{:F, units} | directions], {{lat, long}, :W}) do
move_ship(directions, {{lat - units, long}, :W})
end
def move_ship([{:F, units} | directions], {{lat, long}, :N}) do
move_ship(directions, {{lat, long + units}, :N})
end
def move_ship([{:F, units} | directions], {{lat, long}, :S}) do
move_ship(directions, {{lat, long - units}, :S})
end
def move_ship([{:N, units} | directions], {{lat, long}, facing}) do
move_ship(directions, {{lat, long + units}, facing})
end
def move_ship([{:S, units} | directions], {{lat, long}, facing}) do
move_ship(directions, {{lat, long - units}, facing})
end
def move_ship([{:E, units} | directions], {{lat, long}, facing}) do
move_ship(directions, {{lat + units, long}, facing})
end
def move_ship([{:W, units} | directions], {{lat, long}, facing}) do
move_ship(directions, {{lat - units, long}, facing})
end
def move_ship([{:R, units} | directions], {{lat, long}, facing}) do
%{^facing => current_angle} = @angles
new_angle = Integer.mod(current_angle - units, 360)
%{^new_angle => new_direction} = @inverse_angles
move_ship(directions, {{lat, long}, new_direction})
end
def move_ship([{:L, units} | directions], {{lat, long}, facing}) do
%{^facing => current_angle} = @angles
new_angle = Integer.mod(current_angle + units, 360)
%{^new_angle => new_direction} = @inverse_angles
move_ship(directions, {{lat, long}, new_direction})
end
@spec rotate(integer, {number, number}) :: {float, float}
def rotate(angle, {x, y}) do
units = Integer.mod(angle, 360) * :math.pi() / 180
rot_sin = :math.sin(units)
rot_cos = :math.cos(units)
new_x = x * rot_cos - y * rot_sin
new_y = x * rot_sin + y * rot_cos
{new_x, new_y}
end
@spec move_waypoint(
[{:E | :F | :L | :N | :R | :S | :W, number}],
{number, number},
{number, number}
) :: {{number, number}, {number, number}}
def move_waypoint([], position, waypoint_position) do
{position, waypoint_position}
end
def move_waypoint([{:F, units} | directions], {lat, long}, {waypoint_lat, waypoint_long}) do
new_position = {lat + units * waypoint_lat, long + units * waypoint_long}
move_waypoint(directions, new_position, {waypoint_lat, waypoint_long})
end
def move_waypoint([{:N, units} | directions], position, {waypoint_lat, waypoint_long}) do
new_waypoint = {waypoint_lat, waypoint_long + units}
move_waypoint(directions, position, new_waypoint)
end
def move_waypoint([{:S, units} | directions], position, {waypoint_lat, waypoint_long}) do
new_waypoint = {waypoint_lat, waypoint_long - units}
move_waypoint(directions, position, new_waypoint)
end
def move_waypoint([{:E, units} | directions], position, {waypoint_lat, waypoint_long}) do
new_waypoint = {waypoint_lat + units, waypoint_long}
move_waypoint(directions, position, new_waypoint)
end
def move_waypoint([{:W, units} | directions], position, {waypoint_lat, waypoint_long}) do
new_waypoint = {waypoint_lat - units, waypoint_long}
move_waypoint(directions, position, new_waypoint)
end
def move_waypoint([{:R, units} | directions], position, waypoint_pos) do
new_waypoint = rotate(-units, waypoint_pos)
move_waypoint(directions, position, new_waypoint)
end
def move_waypoint([{:L, units} | directions], position, waypoint_pos) do
new_waypoint = rotate(units, waypoint_pos)
move_waypoint(directions, position, new_waypoint)
end
def l1_dist(x, y) do
abs(x) + abs(y)
end
def day12() do
directions =
"day12_input"
|> AdventOfCode.read_file()
|> Enum.map(fn movement ->
[_, direction, units] = Regex.run(@move_regex, movement)
direction = String.to_atom(direction)
{units, _} = Integer.parse(units)
{direction, units}
end)
{{lat, long}, direction} = move_ship(directions, {{0, 0}, :E})
part1 = l1_dist(lat, long)
{{lat_2, long_2}, waypoint} = move_waypoint(directions, {0, 0}, {10, 1})
part2 = l1_dist(lat_2, long_2)
{{{lat, long}, direction, part1}, {{lat_2, long_2}, waypoint, part2}}
end
end
|
lib/day12.ex
| 0.797281 | 0.750118 |
day12.ex
|
starcoder
|
defmodule FlowMonitor.Inspector do
@moduledoc false
alias FlowMonitor.Collector
@mapper_types [:map, :flat_map, :each, :filter]
@binary_operators [
:+,
:-,
:*,
:++,
:--,
:..,
:<>,
:in,
:"not in",
:|>,
:<<<,
:>>>,
:~>>,
:<<~,
:~>,
:<~,
:<~>,
:<|>,
:<,
:>,
:<=,
:>=,
:==,
:!=,
:=~,
:===,
:!==,
:&&,
:&&&,
:and,
:||,
:|||,
:or,
:=,
:|,
:::,
:when,
:<-,
:\\,
:/
]
defmodule NameAcc do
@moduledoc false
defstruct depth: 0,
max_depth: 10,
lines: []
def new, do: %__MODULE__{}
def from(%__MODULE__{depth: depth, max_depth: max_depth}) do
%__MODULE__{depth: depth, max_depth: max_depth}
end
end
@spec extract_names(any()) :: [String.t()]
def extract_names(pipeline) do
pipeline
|> extract_names([])
|> Enum.reverse()
end
defp extract_names(
{
{
:.,
_,
[{:__aliases__, _, [:Flow]}, type]
},
_,
[mapper]
},
acc
)
when type in @mapper_types do
formatted_type =
type
|> Atom.to_string()
|> String.capitalize()
[
NameAcc.new()
|> add(")")
|> build_name(mapper)
|> add("#{formatted_type} (")
|> to_text()
| acc
]
end
defp extract_names({_op, _meta, args}, acc) do
args |> extract_names(acc)
end
defp extract_names(args, acc) when is_list(args) do
args |> Enum.reduce(acc, &extract_names/2)
end
defp extract_names(_, acc), do: acc
defp build_name(
%NameAcc{
depth: depth,
max_depth: max_depth
} = acc,
_args
)
when depth > max_depth do
acc |> add("(...)")
end
defp build_name(
%NameAcc{
depth: depth
} = acc,
args
) do
args
|> to_list()
|> Enum.reduce(%NameAcc{acc | depth: depth + 1}, &build_name_segment/2)
end
defp build_name_segment({:&, _, [arg]}, acc) do
acc
|> build_name(arg)
|> add("&")
end
defp build_name_segment({:/, _, [{{:., _, _}, _, _} = dot_access, arity]}, acc) do
acc
|> add(arity)
|> add("/")
|> build_name(dot_access)
end
defp build_name_segment({:/, _, [{func, _, Elixir}, arity]}, acc) do
acc
|> add(arity)
|> add("/")
|> build_name(func)
end
defp build_name_segment({:., _, [namespace, id]}, acc) do
acc
|> build_name(id)
|> add(".")
|> build_name(namespace)
end
defp build_name_segment({:fn, _, [arrow]}, acc) do
acc
|> build_name(arrow)
|> add("fn ")
end
defp build_name_segment({:->, _, [args, _]}, acc) do
formatted_args =
args
|> Stream.map(fn {arg, _, _} -> arg end)
|> Stream.intersperse(", ")
|> Enum.reverse()
acc
|> add(" -> ... end")
|> add(formatted_args)
end
defp build_name_segment({:__aliases__, _, [sym]}, acc) do
acc |> add(sym)
end
defp build_name_segment({term, _, namespace}, acc) when is_atom(namespace) do
acc |> add(term)
end
defp build_name_segment({op, _, [_, _] = args}, acc) when op in @binary_operators do
formatted_call =
args
|> Stream.map(fn arg ->
NameAcc.from(acc)
|> build_name(arg)
|> to_text()
end)
|> Stream.intersperse(" #{op} ")
|> Enum.join()
acc
|> add(")")
|> add(formatted_call)
|> add("(")
end
defp build_name_segment({op, _, args}, acc) do
formatted_args =
args
|> Stream.map(fn arg ->
NameAcc.from(acc)
|> build_name(arg)
|> to_text()
end)
|> Stream.intersperse(", ")
|> Enum.join()
acc =
if String.length(formatted_args) === 0 do
acc
else
acc
|> add(")")
|> add(formatted_args)
|> add("(")
end
acc |> build_name(op)
end
defp build_name_segment(sym, acc) do
acc |> add(sym)
end
defp add(acc, elem) when not is_list(elem) do
acc |> add([elem])
end
defp add(acc, []), do: acc
defp add(%NameAcc{lines: lines} = acc, [elem | rest]) do
%NameAcc{acc | lines: [elem | lines]} |> add(rest)
end
defp to_text(%NameAcc{lines: lines}) do
lines |> Enum.join()
end
defp to_list(items) when not is_list(items) do
[items]
end
defp to_list(items), do: items
@spec extract_producer_names(Flow.t()) :: [String.t()]
def extract_producer_names(%Flow{producers: producers}) do
case producers do
{:enumerables, enumerables} ->
enumerables
|> Stream.with_index(1)
|> Stream.map(fn {_, index} -> enumerable_name(index) end)
|> Enum.to_list()
_ ->
[]
end
end
defp enumerable_name(index) do
:"Enumerable #{index}"
end
@spec inject_monitors(pid(), any(), [String.t()], [atom()]) :: [any()]
def inject_monitors(pid, operations, names, types \\ @mapper_types) do
[
operations,
names
]
|> Stream.zip()
|> Stream.map(fn {{:mapper, type, [func]} = mapper, name} ->
if type in types do
{:mapper, type,
[
fn item ->
result = func.(item)
Collector.incr(pid, name)
result
end
]}
else
mapper
end
end)
|> Enum.to_list()
end
@spec inject_enumerable_monitors(pid(), any()) :: [String.t()]
def inject_enumerable_monitors(pid, enumerables) do
enumerables
|> Stream.with_index(1)
|> Stream.map(fn {enum, index} ->
enum
|> Stream.each(fn _ ->
Collector.incr(pid, enumerable_name(index))
end)
end)
|> Enum.to_list()
end
end
|
lib/flow_monitor/inspector.ex
| 0.527073 | 0.569194 |
inspector.ex
|
starcoder
|
defmodule ApiWeb.RoutePatternController do
@moduledoc """
Controller for Routes. Filterable by:
* id
* route_id
"""
use ApiWeb.Web, :api_controller
alias State.RoutePattern
plug(:ensure_path_matches_version)
@filters ~w(id route direction_id stop)
@includes ~w(route representative_trip)
@pagination_opts [:offset, :limit, :order_by]
@description """
Route patterns are used to describe the subsets of a route, representing different possible patterns of where trips may serve. For example, a bus route may have multiple branches, and each branch may be modeled as a separate route pattern per direction. Hierarchically, the route pattern level may be considered to be larger than the trip level and smaller than the route level.
For most MBTA modes, a route pattern will typically represent a unique set of stops that may be served on a route-trip combination. Seasonal schedule changes may result in trips within a route pattern having different routings. In simple changes, such a single bus stop removed or added between one schedule rating and the next (for example, between the Summer and Fall schedules), trips will be maintained on the same route_pattern_id. If the changes are significant, a new route_pattern_id may be introduced.
For Commuter Rail, express or skip-stop trips use the same route pattern as local trips. Some branches do have multiple route patterns when the train takes a different path. For example, `CR-Providence` has two route patterns per direction, one for the Wickford Junction branch and the other for the Stoughton branch.
"""
def state_module, do: State.RoutePattern
swagger_path :index do
get(path("route_pattern", :index))
description("""
List of route patterns.
#{@description}
""")
common_index_parameters(__MODULE__)
include_parameters()
parameter(
"filter[id]",
:query,
:string,
"Filter by multiple IDs. #{comma_separated_list()}.",
example: "Red-1-0,Red-1-1"
)
filter_param(:id, name: :route)
filter_param(:direction_id)
filter_param(:stop_id, includes_children: true)
consumes("application/vnd.api+json")
produces("application/vnd.api+json")
response(200, "OK", Schema.ref(:RoutePattern))
response(400, "Bad Request", Schema.ref(:BadRequest))
response(403, "Forbidden", Schema.ref(:Forbidden))
response(429, "Too Many Requests", Schema.ref(:TooManyRequests))
end
defp ensure_path_matches_version(conn, _) do
if String.starts_with?(conn.request_path, "/route_patterns") or
conn.assigns.api_version < "2019-07-01" do
conn
else
conn
|> put_status(:not_found)
|> put_view(ApiWeb.ErrorView)
|> render("404.json-api", [])
|> halt()
end
end
def index_data(conn, params) do
with :ok <- Params.validate_includes(params, @includes, conn),
{:ok, filtered} <- Params.filter_params(params, @filters, conn) do
filtered
|> format_filters()
|> expand_stops_filter(:stop_ids, conn.assigns.api_version)
|> RoutePattern.filter_by()
|> State.all(pagination_opts(params, conn))
else
{:error, _, _} = error -> error
end
end
@spec format_filters(%{optional(String.t()) => String.t()}) :: RoutePattern.filters()
defp format_filters(filters) do
Map.new(filters, fn {key, value} ->
case {key, value} do
{"id", ids} ->
{:ids, Params.split_on_comma(ids)}
{"route", route_ids} ->
{:route_ids, Params.split_on_comma(route_ids)}
{"direction_id", direction_id} ->
{:direction_id, Params.direction_id(%{"direction_id" => direction_id})}
{"stop", stop_ids} ->
{:stop_ids, Params.split_on_comma(stop_ids)}
end
end)
end
defp pagination_opts(params, conn) do
Params.filter_opts(params, @pagination_opts, conn, order_by: {:sort_order, :asc})
end
swagger_path :show do
get(path("route_pattern", :show))
description("""
Show a particular route_pattern by the route's id.
#{@description}
""")
parameter(:id, :path, :string, "Unique identifier for route_pattern")
include_parameters()
consumes("application/vnd.api+json")
produces("application/vnd.api+json")
response(200, "OK", Schema.ref(:RoutePattern))
response(403, "Forbidden", Schema.ref(:Forbidden))
response(404, "Not Found", Schema.ref(:NotFound))
response(406, "Not Acceptable", Schema.ref(:NotAcceptable))
response(429, "Too Many Requests", Schema.ref(:TooManyRequests))
end
def show_data(conn, %{"id" => id} = params) do
case Params.validate_includes(params, @includes, conn) do
:ok ->
RoutePattern.by_id(id)
{:error, _, _} = error ->
error
end
end
defp include_parameters(schema) do
ApiWeb.SwaggerHelpers.include_parameters(
schema,
~w(route representative_trip),
description: """
| include | Description |
|-|-|
| `route` | The route that this pattern belongs to. |
| `representative_trip` | A trip that can be considered a canonical trip for the route pattern. This trip can be used to deduce a pattern's canonical set of stops and shape. |
"""
)
end
def swagger_definitions do
import PhoenixSwagger.JsonApi, except: [page: 1]
%{
RoutePatternResource:
resource do
description("""
Information about the different variations of service that may be run within a single route_id, including when and how often they are operated.
See \
[GTFS `route_patterns.txt](https://github.com/google/transit/blob/master/gtfs/spec/en/reference.md#route_patternstxt) \
for the base specification.
""")
attributes do
name(
:string,
"""
User-facing description of where trips on the route pattern serve.
These names are published in the form
Destination,
Destination via Street or Landmark,
Origin - Destination,
or Origin - Destination via Street or Landmark.
Note that names for bus and subway route patterns currently do not include the origin location,
but will in the future.
""",
example: "Forge Park/495 - South Station via Fairmount"
)
time_desc(
[:string, :null],
"""
User-facing description of when the route pattern operate. Not all route patterns will include a time description
""",
example: "Early mornings only",
"x-nullable": true
)
typicality(
:integer,
"""
Explains how common the route pattern is. For the MBTA, this is within the context of the entire route. Current valid values are:
| Value | Description |
|-|-|
| `0` | Not defined |
| `1` | Typical. Pattern is common for the route. Most routes will have only one such pattern per direction. A few routes may have more than 1, such as the Red Line (with one branch to Ashmont and another to Braintree); routes with more than 2 are rare. |
| `2` | Pattern is a deviation from the regular route. |
| `3` | Pattern represents a highly atypical pattern for the route, such as a special routing which only runs a handful of times per day. |
| `4` | Diversions from normal service, such as planned detours, bus shuttles, or snow routes. |
""",
enum: [0, 1, 2, 3, 4]
)
sort_order(
:integer,
"""
Can be used to order the route patterns in a way which is ideal for presentation to customers.
Route patterns with smaller sort_order values should be displayed before those with larger values.
"""
)
end
direction_id_attribute()
relationship(:route)
relationship(:representative_trip)
end,
RoutePatterns: page(:RoutePatternResource),
RoutePattern: single(:RoutePatternResource)
}
end
end
|
apps/api_web/lib/api_web/controllers/route_pattern_controller.ex
| 0.874734 | 0.463019 |
route_pattern_controller.ex
|
starcoder
|
defmodule NYSETL.Engines.E1.State do
@moduledoc """
Used as an Agent-based state machine for processing a single ECLRS file.
"""
use Agent
alias NYSETL.ECLRS
require Logger
@enforce_keys [:file]
defstruct file: nil,
line_count: 0,
duplicate_records: %{total: 0},
error_records: %{total: 0},
matched_records: %{total: 0},
new_records: %{total: 0},
processed_count: 0,
start_time: nil,
status: :loading,
updates: %{total: 0}
def start_link(%ECLRS.File{} = file) do
info("beginning test results extract from filename=#{file.filename}")
Agent.start_link(fn -> new(file) end, name: __MODULE__)
end
def new(%ECLRS.File{} = file), do: __struct__(file: file, start_time: now())
def get(), do: Agent.get(__MODULE__, fn state -> state end)
def finish_reads(line_count) do
Agent.update(__MODULE__, fn
%{status: :finished} = state ->
state
%{processed_count: processed} = state when processed == line_count ->
%{state | status: :finished, line_count: line_count}
state ->
%{state | status: :read_complete, line_count: line_count}
end)
end
def finished?(), do: status() == :finished
def fini() do
Agent.update(__MODULE__, fn state ->
info("finished test results extracting from filename=#{state.file.filename}")
:telemetry.execute([:broadway, :pipeline, :process], %{time: now() - state.start_time}, %{})
ECLRS.finish_processing_file(state.file,
statistics: %{
duplicate: state.duplicate_records,
error: state.error_records,
matched: state.matched_records,
new: state.new_records
}
)
%{state | status: :finished}
end)
end
def status(), do: Agent.get(__MODULE__, fn state -> state.status end)
def update_duplicate_count(values), do: Agent.update(__MODULE__, &with_duplicate_counts(&1, values))
def update_error_count(values), do: Agent.update(__MODULE__, &with_error_counts(&1, values))
def update_matched_count(values), do: Agent.update(__MODULE__, &with_matched_counts(&1, values))
def update_new_count(values), do: Agent.update(__MODULE__, &with_new_counts(&1, values))
def update_processed_count(count) do
Agent.get_and_update(__MODULE__, fn state ->
processed_count = state.processed_count + count
state = %{state | processed_count: processed_count}
{state, state}
end)
|> case do
%{status: :read_complete, line_count: count, processed_count: count} -> fini()
_ -> :ok
end
end
defp info(msg), do: Logger.info("[#{__MODULE__}] #{msg}")
defp now(), do: DateTime.utc_now() |> DateTime.to_unix(:millisecond)
defp with_duplicate_counts(%__MODULE__{} = state, values) do
summaries =
values
|> Enum.reduce(state.duplicate_records, fn {county_id, count}, acc ->
acc
|> Map.update(county_id, count, &(&1 + count))
|> Map.update(:total, count, &(&1 + count))
end)
%{state | duplicate_records: summaries}
end
defp with_error_counts(%__MODULE__{} = state, values) do
summaries =
values
|> Enum.reduce(state.error_records, fn {county_id, count}, acc ->
acc
|> Map.update(county_id, count, &(&1 + count))
|> Map.update(:total, count, &(&1 + count))
end)
%{state | error_records: summaries}
end
defp with_matched_counts(%__MODULE__{} = state, values) do
summaries =
values
|> Enum.reduce(state.matched_records, fn {county_id, count}, acc ->
acc
|> Map.update(county_id, count, &(&1 + count))
|> Map.update(:total, count, &(&1 + count))
end)
%{state | matched_records: summaries}
end
defp with_new_counts(%__MODULE__{} = state, values) do
summaries =
values
|> Enum.reduce(state.new_records, fn {county_id, count}, acc ->
acc
|> Map.update(county_id, count, &(&1 + count))
|> Map.update(:total, count, &(&1 + count))
end)
%{state | new_records: summaries}
end
end
|
lib/nys_etl/engines/e1/state.ex
| 0.640523 | 0.455986 |
state.ex
|
starcoder
|
defmodule Bonbon.TranslationCase do
@moduledoc """
This module defines the test case to be used by
translation model tests.
"""
use ExUnit.CaseTemplate
using(options) do
options = Keyword.merge([
model: to_string(__CALLER__.module) |> String.trim_trailing("Test") |> String.to_atom,
field: [term: [type: :string, optional: false, case: :lowercase]] #type: [:string], optional: [true, false], case: [:lowercase, :uppercase, nil]
], options)
fields = Enum.reduce(options[:field], %{}, fn { field, attributes }, acc ->
Map.put(acc, field, case attributes[:type] do
:string -> "lemon"
end)
end)
alt_fields = Enum.reduce(options[:field], %{}, fn { field, attributes }, acc ->
Map.put(acc, field, case attributes[:type] do
:string -> "orange"
end)
end)
quote do
import Bonbon.TranslationCase
use Bonbon.ModelCase
alias unquote(options[:model])
@pkey String.to_atom(unquote(options[:model]).__schema__(:source) <> "_pkey")
@valid_model Map.merge(%Translation{ locale_id: 1 }, unquote(Macro.escape(fields)))
test "empty" do
refute_change(%Translation{})
end
test "only locale" do
refute_change(%Translation{}, %{ locale_id: 1 })
end
test "only translate" do
refute_change(%Translation{}, %{ translate_id: 1 })
end
for { field, value } <- Map.to_list(unquote(Macro.escape(fields))) do
@tag [field: field, value: value]
test "only #{field}", %{ field: field, value: value } do
refute_change(%Translation{}, %{ field => value })
end
end
test "without locale" do
refute_change(%Translation{}, Map.merge(%{ translate_id: 1 }, unquote(Macro.escape(fields))))
end
test "without translate" do
changeset = assert_change(%Translation{}, Map.merge(%{ locale_id: 1 }, unquote(Macro.escape(fields))))
|> assert_change_value(:locale_id, 1)
for { field, value } <- Map.to_list(unquote(Macro.escape(fields))) do
assert_change_value(changeset, field, value)
end
end
for { field, value } <- Map.to_list(unquote(Macro.escape(fields))) do
@tag [field: field, value: value]
test "without #{field}", %{ field: field, value: value } do
if unquote(options[:field])[field][:optional] do
assert_change(%Translation{}, Map.merge(%{ locale_id: 1, translate_id: 1 }, Map.delete(unquote(Macro.escape(fields)), field)))
else
refute_change(%Translation{}, Map.merge(%{ locale_id: 1, translate_id: 1 }, Map.delete(unquote(Macro.escape(fields)), field)))
end
end
end
for { field, attributes } <- unquote(options[:field]), attributes[:type] == :string do
casing = attributes[:case]
if casing do
formatter = case casing do
:lowercase -> &String.downcase/1
:uppercase -> &String.upcase/1
end
@tag [field: field, formatter: formatter]
test "#{field} casing", %{ field: field, formatter: formatter } do
assert_change(@valid_model, %{ field => "orange" }) |> assert_change_value(field, formatter.("orange"))
assert_change(@valid_model, %{ field => "Orange" }) |> assert_change_value(field, formatter.("orange"))
assert_change(@valid_model, %{ field => "orangE" }) |> assert_change_value(field, formatter.("orange"))
assert_change(@valid_model, %{ field => "ORANGE" }) |> assert_change_value(field, formatter.("orange"))
end
end
end
test "uniqueness" do
en = Bonbon.Repo.insert!(%Bonbon.Model.Locale{ language: "en" })
fr = Bonbon.Repo.insert!(%Bonbon.Model.Locale{ language: "fr" })
name = Bonbon.Repo.insert!(Translation.changeset(@valid_model, %{ locale_id: en.id }))
assert_change(%Translation{}, Map.merge(%{ locale_id: fr.id + 1 }, unquote(Macro.escape(alt_fields))))
|> assert_insert(:error)
|> assert_error_value(:locale, { "does not exist", [] })
assert_change(%Translation{}, Map.merge(%{ locale_id: en.id, translate_id: name.translate_id }, unquote(Macro.escape(alt_fields))))
|> assert_insert(:error)
|> assert_error_value(@pkey, { "has already been taken", [] })
assert_change(%Translation{}, Map.merge(%{ locale_id: fr.id, translate_id: name.translate_id }, unquote(Macro.escape(alt_fields))))
|> assert_insert(:ok)
assert_change(%Translation{}, Map.merge(%{ locale_id: en.id }, unquote(Macro.escape(alt_fields))))
|> assert_insert(:ok)
end
end
end
end
|
test/support/translation_case.ex
| 0.811265 | 0.449755 |
translation_case.ex
|
starcoder
|
defmodule Cldr.Calendar.Formatter do
@moduledoc """
Calendar formatter behaviour.
This behaviour defines a set of
callbacks that are invoked during
the formatting of a calendar.
At each point in the formatting
process the callbacks are invoked
from the "inside out". That is,
`format_day/4` is invoked for each
day of the week, then `format_week/5`
is called, then `format_month/4`
and finally `format_year/3` is
called if required.
"""
alias Cldr.Calendar.Formatter.Options
@doc """
Returns the formatted calendar for a year
## Arguments
* `formatted_months` is the result
returned by `format_month/4`
* `year` is the year for which
the calendar is requested
* `month` is the month for which
the calendar is requested
* `options` is a `Cldr.Calendar.Formatter.Options`
struct
## Returns
* An arbitrary result as required.
"""
@callback format_year(
formatted_months :: String.t(),
year :: Calendar.year(),
options :: Keyword.t() | Options.t()
) :: any()
@doc """
Returns the formatted calendar for a month
## Arguments
* `formatted_weeks` is the result
returned by `format_week/5`
* `year` is the year for which
the calendar is requested
* `month` is the month for which
the calendar is requested
* `options` is a `Cldr.Calendar.Formatter.Options`
struct
## Returns
* An arbitrary result as required which is either
returned if called by `Cldr.Calendar.Format.month/3`
or passed to `format_year/3` if not.
"""
@callback format_month(
formatted_weeks :: String.t(),
year :: Calendar.year(),
month :: Calendar.month(),
options :: Keyword.t() | Options.t()
) :: any()
@doc """
Returns the formatted calendar for a week
## Arguments
* `formatted_days` is the result
returned by `format_day/4`
* `year` is the year for which
the calendar is requested
* `month` is the month for which
the calendar is requested
* `week_number` is a 2-tuple of the
form `{year, week_number}` that represents
the week of year for week to be formatted
* `options` is a `Cldr.Calendar.Formatter.Options`
struct
## Returns
* An arbitrary result as required which is
passed to `format_month/4`
"""
@callback format_week(
formatted_days :: String.t(),
year :: Calendar.year(),
month :: Calendar.month(),
week_number :: {Calendar.year(), pos_integer},
options :: Options.t()
) :: any()
@doc """
Returns the formatted calendar for a day
## Arguments
* `formatted_months` is the result
returned by `format_month/4`
* `year` is the year for which
the calendar is requested
* `month` is the month for which
the calendar is requested
* `options` is a `Cldr.Calendar.Formatter.Options`
struct
## Returns
* An arbitrary result as required which
is passed to `format_week/5`
"""
@callback format_day(
date :: Date.t(),
year :: Calendar.year(),
month :: Calendar.month(),
options :: Options.t()
) :: any()
end
|
lib/formatter.ex
| 0.903572 | 0.649787 |
formatter.ex
|
starcoder
|
defmodule EthClient.Contract.Opcodes do
alias EthClient.Rpc
@moduledoc """
This module provides a function to turn any valid EVM byte code
into opcodes and a function to retrieve a contract and turn it into
its opcodes.
"""
def bytecode_to_opcodes(code) when is_binary(code) do
parse_code(code, [])
end
defp get_opcodes do
opcodes_from_file!()
|> parse_opcodes()
end
defp opcodes_from_file! do
"./opcodes.json"
|> Path.expand()
|> File.read!()
end
defp parse_opcodes(codes) do
codes
|> Jason.decode!()
|> filter_invalid()
end
defp filter_invalid(code_list) do
Enum.reduce(code_list, fn
%{"Hex" => _hex, "Name" => name}, acc when name == "*invalid*" -> acc
%{"Hex" => hex, "Name" => name}, acc -> Map.put(acc, hex, name)
end)
end
# First remove the leading 0x,
# upcase to keep it consistent with the JSON.
defp parse_code(<<"0x", rest::binary>>, []) do
rest
|> String.upcase()
|> parse_code([], get_opcodes())
end
# Opcodes are base16 numbers ranging from
# 00 up to FF, they come inside a string,
# so we match them every 2 characters and
# check the instruction matching those two characters
# Let's say we have FFAAFF, this function clause
# would match like this:
# opcode = "FF"
# rest = "AAFF"
# And FF matches with the "SELFDESTRUCT" instruction.
defp parse_code(<<opcode::binary-size(2), rest::binary>>, acum, opcodes) do
case Map.get(opcodes, opcode) do
nil ->
parse_code(rest, ["#{opcode} opcode is unknown" | acum], opcodes)
<<"PUSH", n::binary>> ->
{arguments, rest} = fetch_arguments(rest, n, :push)
parse_code(rest, ["PUSH 0x#{arguments}" | acum], opcodes)
instruction ->
parse_code(rest, [instruction | acum], opcodes)
end
end
# When this matches, we have finished parsing the string.
defp parse_code(_, acum, _) do
acum
|> Enum.reverse()
|> Enum.with_index(fn string, index -> "[#{index}] " <> string end)
|> Enum.join("\n")
|> IO.puts()
end
defp fetch_arguments(code, n, :push) when is_binary(n) do
chars_to_fetch = String.to_integer(n) * 2
<<arguments::binary-size(chars_to_fetch), rest::binary>> = code
{arguments, rest}
end
end
|
eth_client/lib/eth_client/contract/opcodes.ex
| 0.533397 | 0.460471 |
opcodes.ex
|
starcoder
|
defmodule Mix.Tasks.Eunit do
use Mix.Task
@recursive true
@preferred_cli_env :test
@shortdoc "Compile and run eunit tests"
@moduledoc """
Run eunit tests for a project.
This task compiles the project and its tests in the test environment,
then runs eunit tests. This task works recursively in umbrella
projects.
## Command line options
A list of patterns to match for test files can be supplied:
```
mix eunit foo* bar*
```
The runner automatically adds \".erl\" to the patterns.
The following command line switches are also available:
* `--verbose`, `-v` - run eunit with the :verbose option
* `--cover`, `-c` - create a coverage report after running the tests
* `--profile`, `-p` - show a list of the 10 slowest tests
* `--start` - start applications after compilation
* `--no-color` - disable color output
* `--force` - force compilation regardless of compilation times
* `--no-compile` - do not compile even if files require compilation
* `--no-archives-check` - do not check archives
* `--no-deps-check` - do not check dependencies
* `--no-elixir-version-check` - do not check Elixir version
The `verbose`, `cover`, `profile`, `start` and `color` switches can be set in
the `mix.exs` file and will apply to every invocation of this task. Switches
set on the command line will override any settings in the mixfile.
```
def project do
[
# ...
eunit: [
verbose: false,
cover: true,
profile: true,
start: true,
color: false
]
]
end
```
## Test search path
All \".erl\" files in the src and test directories are considered.
"""
@switches [
color: :boolean, cover: :boolean, profile: :boolean, verbose: :boolean,
start: :boolean, compile: :boolean, force: :boolean, deps_check: :boolean,
archives_check: :boolean, elixir_version_check: :boolean
]
@aliases [v: :verbose, p: :profile, c: :cover]
@default_cover_opts [output: "cover", tool: Mix.Tasks.Test.Cover]
def run(args) do
project = Mix.Project.config
options = parse_options(args, project)
# add test directory to compile paths and add
# compiler options for test
post_config = eunit_post_config(project)
modify_project_config(post_config)
if Keyword.get(options, :compile, true) do
# make sure mix will let us run compile
ensure_compile()
Mix.Task.run "compile", args
end
if Keyword.get(options, :start, false) do
# start the application
Mix.shell.print_app
Mix.Task.run "app.start", args
end
# start cover
cover_state = start_cover_tool(options[:cover], project)
# run the actual tests
modules =
test_modules(post_config[:erlc_paths], options[:patterns])
|> Enum.map(&module_name_from_path/1)
|> Enum.map(fn m -> {:module, m} end)
eunit_opts = get_eunit_opts(options, post_config)
case :eunit.test(modules, eunit_opts) do
:error -> Mix.raise "mix eunit failed"
:ok -> :ok
end
analyze_coverage(cover_state)
end
defp parse_options(args, project) do
{switches, argv, errors} =
OptionParser.parse(args, strict: @switches, aliases: @aliases)
if errors != [], do: raise OptionParser.ParseError, "#{inspect errors}"
patterns = case argv do
[] -> ["*"]
p -> p
end
eunit_opts = case switches[:verbose] do
true -> [:verbose]
_ -> []
end
project[:eunit] || []
|> Keyword.take([:verbose, :profile, :cover, :start, :color])
|> Keyword.merge(switches)
|> Keyword.put(:eunit_opts, eunit_opts)
|> Keyword.put(:patterns, patterns)
end
defp eunit_post_config(existing_config) do
[erlc_paths: existing_config[:erlc_paths] ++ ["test"],
erlc_options: existing_config[:erlc_options] ++ [{:d, :TEST}, :debug_info],
eunit_opts: existing_config[:eunit_opts] || []]
end
defp get_eunit_opts(options, post_config) do
eunit_opts = options[:eunit_opts] ++ post_config[:eunit_opts]
maybe_add_formatter(eunit_opts, options[:profile], options[:color] || true)
eunit_opts
end
defp maybe_add_formatter(opts, profile, color) do
if Keyword.has_key?(opts, :report) do
opts
else
format_opts = color_opt(color) ++ profile_opt(profile)
[:no_tty, {:report, {:eunit_progress, format_opts}} | opts]
end
end
defp color_opt(true), do: [:colored]
defp color_opt(_), do: []
defp profile_opt(true), do: [:profile]
defp profile_opt(_), do: []
defp modify_project_config(post_config) do
# note - we have to grab build_path because
# Mix.Project.push resets the build path
build_path = Mix.Project.build_path
|> Path.split
|> Enum.map(fn(p) -> filter_replace(p, "dev", "eunit") end)
|> Path.join
%{name: name, file: file} = Mix.Project.pop
Mix.ProjectStack.post_config(Keyword.merge(post_config,
[build_path: build_path]))
Mix.Project.push name, file
end
defp filter_replace(x, x, r) do
r
end
defp filter_replace(x, _y, _r) do
x
end
defp ensure_compile do
# we have to reenable compile and all of its
# child tasks (compile.erlang, compile.elixir, etc)
Mix.Task.reenable("compile")
Enum.each(compilers(), &Mix.Task.reenable/1)
end
defp compilers do
Mix.Task.all_modules
|> Enum.map(&Mix.Task.task_name/1)
|> Enum.filter(fn(t) -> match?("compile." <> _, t) end)
end
defp test_modules(directories, patterns) do
all_modules = erlang_source_files(directories, patterns)
|> Enum.map(&module_name_from_path/1)
|> Enum.uniq
remove_test_duplicates(all_modules, all_modules, [])
end
defp erlang_source_files(directories, patterns) do
Enum.map(patterns, fn(p) ->
Mix.Utils.extract_files(directories, p <> ".erl")
end)
|> Enum.concat
|> Enum.uniq
end
defp module_name_from_path(p) do
Path.basename(p, ".erl") |> String.to_atom
end
defp remove_test_duplicates([], _all_module_names, accum) do
accum
end
defp remove_test_duplicates([module | rest], all_module_names, accum) do
module = Atom.to_string(module)
if tests_module?(module) &&
Enum.member?(all_module_names, without_test_suffix(module)) do
remove_test_duplicates(rest, all_module_names, accum)
else
remove_test_duplicates(rest, all_module_names, [module | accum])
end
end
defp tests_module?(module_name) do
String.match?(module_name, ~r/_tests$/)
end
defp without_test_suffix(module_name) do
module_name
|> String.replace(~r/_tests$/, "")
|> String.to_atom
end
# coverage was disabled
defp start_cover_tool(nil, _project), do: nil
defp start_cover_tool(false, _project), do: nil
# set up the cover tool
defp start_cover_tool(_cover_switch, project) do
compile_path = Mix.Project.compile_path(project)
cover = Keyword.merge(@default_cover_opts, project[:test_coverage] || [])
# returns a callback
cover[:tool].start(compile_path, cover)
end
# no cover tool was specified
defp analyze_coverage(nil), do: :ok
# run the cover callback
defp analyze_coverage(cb), do: cb.()
end
|
lib/mix/tasks/eunit.ex
| 0.705075 | 0.832951 |
eunit.ex
|
starcoder
|
defmodule EdgeDB.Query do
@moduledoc """
A structure carrying the information related to the query.
It's mostly used in driver internally, but user can retrive it along with `EdgeDB.Result` struct
from succeed query execution using `:raw` option for `EdgeDB.query*/4` functions. See `t:EdgeDB.query_option/0`.
"""
alias EdgeDB.Protocol.{
Codec,
Enums
}
defstruct [
:statement,
cardinality: :many,
io_format: :binary,
required: false,
capabilities: [],
input_codec: nil,
output_codec: nil,
cached: false,
params: []
]
@typedoc """
A structure carrying the information related to the query.
Fields:
* `:statement` - EdgeQL statement for execution.
* `:cardinality` - the expected number of elements in the returned set as a result of the query.
* `:io_format` - the preferred format of the query result.
* `:capabilities` - query capabilities. See
[RFC](https://github.com/edgedb/rfcs/blob/master/text/1004-transactions-api.rst#edgedb-changes)
for more information.
* `:required` - flag specifying that the result should not be empty.
* `:input_codec` - codec for encoding query parameters.
* `:output_codec` - codec for decoding the query result.
* `:cached` - flag specifying whether the request has already been cached by the connection.
* `:params` - query parameters.
"""
@type t() :: %__MODULE__{
statement: String.t(),
cardinality: Enums.Cardinality.t(),
io_format: Enums.IOFormat.t(),
capabilities: Enums.Capabilities.t(),
required: boolean(),
input_codec: Codec.t() | nil,
output_codec: Codec.t() | nil,
cached: boolean(),
params: list(any())
}
end
defimpl DBConnection.Query, for: EdgeDB.Query do
alias EdgeDB.Protocol.Codec
@empty_set %EdgeDB.Set{__items__: []}
@impl DBConnection.Query
def decode(%EdgeDB.Query{}, %EdgeDB.Result{set: %EdgeDB.Set{}} = result, _opts) do
result
end
@impl DBConnection.Query
def decode(
%EdgeDB.Query{output_codec: out_codec, required: required},
%EdgeDB.Result{} = result,
_opts
) do
decode_result(%EdgeDB.Result{result | required: required}, out_codec)
end
@impl DBConnection.Query
def describe(query, _opts) do
query
end
@impl DBConnection.Query
def encode(%EdgeDB.Query{input_codec: nil}, _params, _opts) do
raise EdgeDB.Error.interface_error("query hasn't been prepared")
end
@impl DBConnection.Query
def encode(%EdgeDB.Query{input_codec: in_codec}, params, _opts) do
Codec.encode(in_codec, params)
end
@impl DBConnection.Query
def parse(%EdgeDB.Query{cached: true}, _opts) do
raise EdgeDB.Error.interface_error("query has been prepared")
end
@impl DBConnection.Query
def parse(query, _opts) do
query
end
defp decode_result(%EdgeDB.Result{cardinality: :no_result} = result, _codec) do
result
end
defp decode_result(%EdgeDB.Result{} = result, codec) do
encoded_set = result.set
result = %EdgeDB.Result{result | set: @empty_set}
encoded_set
|> Enum.reverse()
|> Enum.reduce(result, fn data, %EdgeDB.Result{set: set} = result ->
element = Codec.decode(codec, data)
%EdgeDB.Result{result | set: add_element_into_set(set, element)}
end)
|> then(fn %EdgeDB.Result{set: set} = result ->
%EdgeDB.Result{result | set: reverse_elements_in_set(set)}
end)
end
defp add_element_into_set(%EdgeDB.Set{__items__: items} = set, element) do
%EdgeDB.Set{set | __items__: [element | items]}
end
defp reverse_elements_in_set(%EdgeDB.Set{__items__: items} = set) do
%EdgeDB.Set{set | __items__: Enum.reverse(items)}
end
end
|
lib/edgedb/query.ex
| 0.923653 | 0.54359 |
query.ex
|
starcoder
|
defmodule SSHKit.SSH do
@moduledoc ~S"""
Provides convenience functions for working with SSH connections
and executing commands on remote hosts.
## Examples
```
{:ok, conn} = SSHKit.SSH.connect("eg.io", user: "me")
{:ok, output, status} = SSHKit.SSH.run(conn, "uptime")
:ok = SSHKit.SSH.close(conn)
Enum.each(output, fn
{:stdout, data} -> IO.write(data)
{:stderr, data} -> IO.write([IO.ANSI.red, data, IO.ANSI.reset])
end)
IO.puts("$?: #{status}")
```
"""
alias SSHKit.SSH.Connection
alias SSHKit.SSH.Channel
@doc """
Establishes a connection to an SSH server.
Uses `SSHKit.SSH.Connection.open/2` to open a connection.
`options_or_function` can either be a list of options or a function.
If it is a list, it is considered to be a list of options as described in
`SSHKit.SSH.Connection.open/2`. If it is a function, then it is equivalent to
calling `connect(host, [], options_or_function)`.
See the documentation for `connect/3` for more information on this function.
## Example
```
{:ok, conn} = SSHKit.SSH.connect("eg.io", port: 2222, user: "me", timeout: 1000)
```
"""
@callback connect(binary(), keyword() | fun()) :: {:ok, Connection.t} | {:error, any()}
def connect(host, options_or_function \\ [])
def connect(host, function) when is_function(function), do: connect(host, [], function)
def connect(host, options) when is_list(options), do: Connection.open(host, options)
@doc """
Similar to `connect/2` but expects a function as its last argument.
The connection is opened, given to the function as an argument and
automatically closed after the function returns, regardless of any
errors raised while executing the function.
Returns `{:ok, function_result}` in case of success,
`{:error, reason}` otherwise.
## Examples
```
SSH.connect("eg.io", port: 2222, user: "me", fn conn ->
SCP.upload(conn, "list.txt")
end)
```
See `SSHKit.SSH.Connection.open/2` for the list of available `options`.
"""
def connect(host, options, function) do
case connect(host, options) do
{:ok, conn} ->
try do
{:ok, function.(conn)}
after
:ok = close(conn)
end
other -> other
end
end
@doc """
Closes an SSH connection.
Uses `SSHKit.SSH.Connection.close/1` to close the connection.
## Example
```
:ok = SSHKit.SSH.close(conn)
```
"""
@callback close(Connection.t) :: :ok
def close(connection) do
Connection.close(connection)
end
@doc """
Executes a command on the remote and aggregates incoming messages.
Using the default handler, returns `{:ok, output, status}` or `{:error,
reason}`. By default, command output is captured into a list of tuples of the
form `{:stdout, data}` or `{:stderr, data}`.
A custom handler function can be provided to handle channel messages.
For further details on handling incoming messages,
see `SSHKit.SSH.Channel.loop/4`.
## Options
* `:timeout` - maximum wait time between messages, defaults to `:infinity`
* `:fun` - handler function passed to `SSHKit.SSH.Channel.loop/4`
* `:acc` - initial accumulator value used in the loop
Any other options will be passed on to `SSHKit.SSH.Channel.open/2` when
creating the channel for executing the command.
## Example
```
{:ok, output, status} = SSHKit.SSH.run(conn, "uptime")
IO.inspect(output)
```
"""
@callback run(Connection.t, binary(), keyword()) :: any()
def run(connection, command, options \\ []) do
{acc, options} = Keyword.pop(options, :acc, {:cont, {[], nil}})
{fun, options} = Keyword.pop(options, :fun, &capture/2)
timeout = Keyword.get(options, :timeout, :infinity)
with {:ok, channel} <- Channel.open(connection, options) do
case Channel.exec(channel, command, timeout) do
:success ->
channel
|> Channel.loop(timeout, acc, fun)
|> elem(1)
:failure ->
{:error, :failure}
err ->
err
end
end
end
defp capture(message, acc = {buffer, status}) do
next = case message do
{:data, _, 0, data} ->
{[{:stdout, data} | buffer], status}
{:data, _, 1, data} ->
{[{:stderr, data} | buffer], status}
{:exit_status, _, code} ->
{buffer, code}
{:closed, _} ->
{:ok, Enum.reverse(buffer), status}
_ ->
acc
end
{:cont, next}
end
end
|
lib/sshkit/ssh.ex
| 0.903033 | 0.82963 |
ssh.ex
|
starcoder
|
defmodule Snowpack.Protocol do
@moduledoc """
Implementation of `DBConnection` behaviour for `Snowpack.ODBC`.
Handles translation of concepts to what ODBC expects and holds
state for a connection.
This module is not called directly, but rather through
other `Snowpack` modules or `DBConnection` functions.
"""
use DBConnection
alias Snowpack.{ODBC, Result, Telemetry, TypeCache, TypeParser}
defstruct pid: nil, snowflake: :idle, conn_opts: []
@typedoc """
Process state.
Includes:
* `:pid`: the pid of the ODBC process
* `:snowflake`: the transaction state. Can be `:idle` (not in a transaction).
* `:conn_opts`: the options used to set up the connection.
"""
@type state :: %__MODULE__{
pid: pid(),
snowflake: :idle,
conn_opts: Keyword.t()
}
@type opts :: Keyword.t()
@type query :: Snowpack.Query.t()
@type params :: [{:odbc.odbc_data_type(), :odbc.value()}]
@type result :: Result.t()
@type status :: :idle | :error
@spec connect(opts) :: {:ok, state} | {:error, Exception.t()}
def connect(opts) do
conn_opts = Keyword.fetch!(opts, :connection)
conn_str =
Enum.reduce(conn_opts, "", fn {key, value}, acc ->
acc <> "#{key}=#{value};"
end)
{:ok, pid} = ODBC.start_link(conn_str, opts)
{:ok,
%__MODULE__{
pid: pid,
conn_opts: opts,
snowflake: :idle
}}
end
@spec disconnect(err :: String.t() | Exception.t(), state) :: :ok
def disconnect(_err, %{pid: pid} = _state) do
:ok = ODBC.disconnect(pid)
end
@spec reconnect(opts, state) :: {:ok, state}
def reconnect(new_opts, state) do
disconnect("Reconnecting", state)
connect(new_opts)
end
@spec checkout(state) ::
{:ok, state}
| {:disconnect, Exception.t(), state}
def checkout(state) do
{:ok, state}
end
@spec checkin(state) ::
{:ok, state}
| {:disconnect, Exception.t(), state}
def checkin(state) do
{:ok, state}
end
@spec handle_prepare(query, opts, state) ::
{:ok, query, state}
| {:error | :disconnect, Exception.t(), state}
def handle_prepare(query, _opts, state) do
{:ok, query, state}
end
@spec handle_execute(query, params, opts, state) ::
{:ok, query(), result(), state}
| {:error | :disconnect, Exception.t(), state}
def handle_execute(query, params, opts, state) do
{status, message, new_state} = do_query(query, params, opts, state)
execute_return(status, query, message, new_state, opts)
end
@spec handle_close(query, opts, state) :: {:ok, result, state}
def handle_close(_query, _opts, state) do
{:ok, %Result{}, state}
end
@spec ping(state :: any()) ::
{:ok, new_state :: any()}
| {:disconnect, Exception.t(), new_state :: any()}
def ping(state) do
query = %Snowpack.Query{name: "ping", statement: "SELECT /* snowpack:heartbeat */ 1;"}
case do_query(query, [], [], state) do
{:ok, _, new_state} -> {:ok, new_state}
{:error, reason, new_state} -> {:disconnect, reason, new_state}
other -> other
end
end
@spec handle_status(opts, state) :: {DBConnection.status(), state}
def handle_status(_, %{snowflake: {status, _}} = s), do: {status, s}
def handle_status(_, %{snowflake: status} = s), do: {status, s}
# NOT IMPLEMENTED
@spec handle_begin(opts, state) ::
{:ok, result, state}
| {status, state}
| {:disconnect, Exception.t(), state}
def handle_begin(_opts, _state) do
throw("not implemeted")
end
@spec handle_commit(opts, state) ::
{:ok, result, state}
| {status, state}
| {:disconnect, Exception.t(), state}
def handle_commit(_opts, _state) do
throw("not implemeted")
end
@spec handle_rollback(opts, state) ::
{:ok, result(), state}
| {status, state}
| {:disconnect, Exception.t(), state}
def handle_rollback(_opts, _state) do
throw("not implemeted")
end
@spec handle_declare(any, any, any, any) :: none
def handle_declare(_query, _params, _opts, _state) do
throw("not implemeted")
end
@spec handle_first(any, any, any, any) :: none
def handle_first(_query, _cursor, _opts, _state) do
throw("not implemeted")
end
@spec handle_next(any, any, any, any) :: none
def handle_next(_query, _cursor, _opts, _state) do
throw("not implemeted")
end
@spec handle_deallocate(any, any, any, any) :: none
def handle_deallocate(_query, _cursor, _opts, _state) do
throw("not implemeted")
end
@spec handle_fetch(any, any, any, any) :: none
def handle_fetch(_query, _cursor, _opts, _state) do
throw("not implemeted")
end
defp do_query(query, params, opts, state) do
metadata = %{params: params, query: query.statement}
start_time = Telemetry.start(:query, metadata)
try do
result =
case TypeCache.get_column_types(query.statement) do
{:ok, column_types} ->
query_result = ODBC.query(state.pid, query.statement, params, opts)
Tuple.append(query_result, %{column_types: column_types})
nil ->
with {:selected, columns, rows, [{query_id}]} <-
ODBC.query(state.pid, query.statement, params, opts, true),
{:ok, column_types} <-
TypeCache.fetch_column_types(state.pid, query_id, to_string(query.statement)) do
{:selected, columns, rows, %{column_types: column_types}}
end
end
case result do
{:error, %Snowpack.Error{odbc_code: :connection_exception} = error} ->
metadata = Map.put(metadata, :error, error)
Telemetry.stop(:query, start_time, metadata)
{:disconnect, error, state}
{:error, error} ->
metadata = Map.put(metadata, :error, error)
Telemetry.stop(:query, start_time, metadata)
{:error, error, state}
{:selected, columns, rows, %{column_types: column_types}} ->
typed_rows = TypeParser.parse_rows(column_types, columns, rows)
num_rows = Enum.count(typed_rows)
metadata = Map.merge(metadata, %{result: :selected, num_rows: num_rows})
Telemetry.stop(:query, start_time, metadata)
{:ok,
%Result{
columns: Enum.map(columns, &to_string(&1)),
rows: typed_rows,
num_rows: num_rows
}, state}
{:updated, num_rows} ->
metadata = Map.merge(metadata, %{result: :updated, num_rows: num_rows})
Telemetry.stop(:query, start_time, metadata)
{:ok, %Result{num_rows: num_rows}, state}
{:updated, :undefined, [{_query_id}]} ->
metadata = Map.merge(metadata, %{result: :updated, num_rows: 1})
Telemetry.stop(:query, start_time, metadata)
{:ok, %Result{num_rows: 1}, state}
end
catch
kind, error ->
Telemetry.exception(:query, start_time, kind, error, __STACKTRACE__, metadata)
:erlang.raise(kind, error, __STACKTRACE__)
end
end
defp execute_return(status, _query, message, state, mode: _savepoint) do
{status, message, state}
end
defp execute_return(status, query, message, state, _opts) do
case status do
:ok -> {status, query, message, state}
_ -> {status, message, state}
end
end
end
|
lib/snowpack/protocol.ex
| 0.862323 | 0.449091 |
protocol.ex
|
starcoder
|
defmodule Sqlitex.Statement do
alias Sqlitex.Row
@moduledoc """
Provides an interface for working with SQLite prepared statements.
Care should be taken when using prepared statements directly - they are not
immutable objects like most things in Elixir. Sharing a statement between
different processes can cause problems if the processes accidentally
interleave operations on the statement. It's a good idea to create different
statements per process, or to wrap the statements up in a GenServer to prevent
interleaving operations.
## Example
```
iex(2)> {:ok, db} = Sqlitex.open(":memory:")
iex(3)> Sqlitex.query(db, "CREATE TABLE data (id, name);")
{:ok, []}
iex(4)> {:ok, statement} = Sqlitex.Statement.prepare(db, "INSERT INTO data VALUES (?, ?);")
iex(5)> Sqlitex.Statement.bind_values(statement, [1, "hello"])
iex(6)> Sqlitex.Statement.exec(statement)
:ok
iex(7)> {:ok, statement} = Sqlitex.Statement.prepare(db, "SELECT * FROM data;")
iex(8)> Sqlitex.Statement.fetch_all(statement, db_timeout: 1_000)
{:ok, [[id: 1, name: "hello"]]}
iex(9)> Sqlitex.close(db)
:ok
```
## RETURNING Clause Support
SQLite does not support the RETURNING extension to INSERT, DELETE, and UPDATE
commands. (See https://www.postgresql.org/docs/9.6/static/sql-insert.html for
a description of the Postgres implementation of this clause.)
Ecto 2.0+ relies on being able to capture this information, so have invented our
own implementation with the following syntax:
```
;--RETURNING ON [INSERT | UPDATE | DELETE] <table>,<col>,<col>,...
```
When the `prepare/2` and `prepare!/2` functions are given a query that contains
the above returning clause, they separate this clause from the end of the query
and store it separately in the `Statement` struct. Only the portion of the query
preceding the returning clause is passed to SQLite's prepare function.
Later, when such a statement struct is passed to `fetch_all/3` or `fetch_all!/3`
the returning clause is parsed and the query is performed with the following
additional logic:
```
SAVEPOINT sp_<random>;
CREATE TEMP TABLE temp.t_<random> (<returning>);
CREATE TEMP TRIGGER tr_<random> AFTER UPDATE ON main.<table> BEGIN
INSERT INTO t_<random> SELECT NEW.<returning>;
END;
UPDATE ...; -- whatever the original statement was
DROP TRIGGER tr_<random>;
SELECT <returning> FROM temp.t_<random>;
DROP TABLE temp.t_<random>;
RELEASE sp_<random>;
```
A more detailed description of the motivations for making this change is here:
https://github.com/jazzyb/sqlite_ecto/wiki/Sqlite.Ecto's-Pseudo-Returning-Clause
"""
defstruct database: nil,
statement: nil,
returning: nil,
column_names: [],
column_types: []
alias Sqlitex.Config
@doc """
Prepare a Sqlitex.Statement
## Parameters
* `db` - The database to prepare the statement for.
* `sql` - The SQL of the statement to prepare.
Also accepts the following keyword options:
* `db_timeout` - The time in ms allowed for the statement to run. Defaults to 5000, or the :db_timeout value in Application env.
## Returns
* `{:ok, statement}` on success
* See `:esqlite3.prepare` for errors.
"""
def prepare(db, sql, opts \\ []) do
timeout = Config.db_timeout(opts)
with {:ok, stmt} <- do_prepare(db, sql, timeout),
{:ok, stmt} <- get_column_names(stmt, timeout),
{:ok, stmt} <- get_column_types(stmt, timeout),
{:ok, stmt} <- extract_returning_clause(stmt, sql),
do: {:ok, stmt}
end
@doc """
Same as `prepare/3` but raises a Sqlitex.Statement.PrepareError on error.
Returns a new statement otherwise.
"""
def prepare!(db, sql, opts \\ []) do
case prepare(db, sql, opts) do
{:ok, statement} -> statement
{:error, reason} -> raise Sqlitex.Statement.PrepareError, reason: reason
end
end
@doc """
Binds values to a Sqlitex.Statement.
## Parameters
* `statement` - The statement to bind values into.
* `values` - A list of values to bind into the statement.
Also accepts the following keyword options:
* `db_timeout` - The time in ms allowed for the statement to run. Defaults to 5000, or the :db_timeout value in Application env.
## Returns
* `{:ok, statement}` on success
* See `:esqlite3.prepare` for errors.
## Value transformations
Some values will be transformed before insertion into the database.
* `nil` - Converted to :undefined
* `true` - Converted to 1
* `false` - Converted to 0
* `datetime` - Converted into a string. See datetime_to_string
* `%Decimal` - Converted into a number.
"""
def bind_values(statement, values, opts \\ []) do
case :esqlite3.bind(statement.statement, translate_bindings(values), Config.db_timeout(opts)) do
{:error, _} = error -> error
:ok -> {:ok, statement}
end
end
@doc """
Same as `bind_values/3` but raises a Sqlitex.Statement.BindValuesError on error.
Returns the statement otherwise.
"""
def bind_values!(statement, values, opts \\ []) do
case bind_values(statement, values, opts) do
{:ok, statement} -> statement
{:error, reason} -> raise Sqlitex.Statement.BindValuesError, reason: reason
end
end
@doc """
Fetches all rows using a statement.
Should be called after the statement has been bound.
## Parameters
* `statement` - The statement to run.
Also accepts the following keyword options:
* `db_timeout` - The time in ms allowed for the statement to run. Defaults to 5000, or the :db_timeout value in Application env.
* `db_chunk_size` - The internal bulk size. Defaults to 5000, or the :db_chunk_size value in Application env.
* `into` - The collection to put the results into. Defaults to an empty list.
## Returns
* `{:ok, results}`
* `{:error, error}`
"""
def fetch_all(statement, opts \\ []) do
case raw_fetch_all(statement, opts) do
{:error, _} = other -> other
raw_data ->
into = Keyword.get(opts, :into, [])
{:ok, Row.from(statement.column_types, statement.column_names, raw_data, into)}
end
end
defp raw_fetch_all(%__MODULE__{returning: nil, statement: statement}, opts) do
:esqlite3.fetchall(statement, Config.db_chunk_size(opts), Config.db_timeout(opts))
end
defp raw_fetch_all(statement, opts) do
returning_query(statement, opts)
end
@doc """
Same as `fetch_all/3` but raises a Sqlitex.Statement.FetchAllError on error.
Returns the results otherwise.
"""
def fetch_all!(statement, opts) do
case fetch_all(statement, opts) do
{:ok, results} -> results
{:error, reason} -> raise Sqlitex.Statement.FetchAllError, reason: reason
end
end
@doc """
Runs a statement that returns no results.
Should be called after the statement has been bound.
## Parameters
* `statement` - The statement to run.
Also accepts the following keyword options:
* `db_timeout` - The time in ms allowed for the statement to run. Defaults to 5000, or the :db_timeout value in Application env.
## Returns
* `:ok`
* `{:error, error}`
"""
def exec(statement, opts \\ []) do
case :esqlite3.step(statement.statement, Config.db_timeout(opts)) do
# esqlite3.step returns some odd values, so let's translate them:
:"$done" -> :ok
:"$busy" -> {:error, {:busy, "Sqlite database is busy"}}
other -> other
end
end
@doc """
Same as `exec/2` but raises a Sqlitex.Statement.ExecError on error.
Returns :ok otherwise.
"""
def exec!(statement, opts \\ []) do
case exec(statement, opts) do
:ok -> :ok
{:error, reason} -> raise Sqlitex.Statement.ExecError, reason: reason
end
end
defp do_prepare(db, sql, timeout) do
case :esqlite3.prepare(sql, db, timeout) do
{:ok, statement} ->
{:ok, %Sqlitex.Statement{database: db, statement: statement}}
other -> other
end
end
defp get_column_names(%Sqlitex.Statement{statement: sqlite_statement} = statement, timeout) do
names =
sqlite_statement
|> :esqlite3.column_names(timeout)
|> Tuple.to_list
{:ok, %Sqlitex.Statement{statement | column_names: names}}
end
defp get_column_types(%Sqlitex.Statement{statement: sqlite_statement} = statement, timeout) do
types =
sqlite_statement
|> :esqlite3.column_types(timeout)
|> Tuple.to_list
{:ok, %Sqlitex.Statement{statement | column_types: types}}
end
defp translate_bindings(params) do
Enum.map(params, fn
nil -> :undefined
true -> 1
false -> 0
date = {_yr, _mo, _da} -> date_to_string(date)
time = {_hr, _mi, _se, _usecs} -> time_to_string(time)
datetime = {{_yr, _mo, _da}, {_hr, _mi, _se, _usecs}} -> datetime_to_string(datetime)
%Decimal{sign: sign, coef: coef, exp: exp} -> sign * coef * :math.pow(10, exp)
other -> other
end)
end
defp date_to_string({yr, mo, da}) do
Enum.join [zero_pad(yr, 4), "-", zero_pad(mo, 2), "-", zero_pad(da, 2)]
end
def time_to_string({hr, mi, se, usecs}) do
Enum.join [zero_pad(hr, 2), ":", zero_pad(mi, 2), ":", zero_pad(se, 2), ".", zero_pad(usecs, 6)]
end
defp datetime_to_string({date = {_yr, _mo, _da}, time = {_hr, _mi, _se, _usecs}}) do
Enum.join [date_to_string(date), " ", time_to_string(time)]
end
defp zero_pad(num, len) do
str = Integer.to_string num
String.duplicate("0", len - String.length(str)) <> str
end
# --- Returning clause support
@pseudo_returning_statement ~r(\s*;--RETURNING\s+ON\s+)i
defp extract_returning_clause(statement, sql) do
if Regex.match?(@pseudo_returning_statement, sql) do
[_, returning_clause] = Regex.split(@pseudo_returning_statement, sql, parts: 2)
case parse_return_contents(returning_clause) do
{_table, cols, _command, _ref} = info ->
{:ok, %{statement | returning: info,
column_names: Enum.map(cols, &String.to_atom/1),
column_types: Enum.map(cols, fn _ -> nil end)}}
err ->
err
end
else
{:ok, statement}
end
end
defp parse_return_contents(<<"INSERT ", values::binary>>) do
[table | cols] = String.split(values, ",")
{table, cols, "INSERT", "NEW"}
end
defp parse_return_contents(<<"UPDATE ", values::binary>>) do
[table | cols] = String.split(values, ",")
{table, cols, "UPDATE", "NEW"}
end
defp parse_return_contents(<<"DELETE ", values::binary>>) do
[table | cols] = String.split(values, ",")
{table, cols, "DELETE", "OLD"}
end
defp parse_return_contents(_) do
{:error, :invalid_returning_clause}
end
defp returning_query(%__MODULE__{database: db} = stmt, opts) do
sp = "sp_#{random_id()}"
{:ok, _} = db_exec(db, "SAVEPOINT #{sp}")
case returning_query_in_savepoint(sp, stmt, opts) do
{:error, _} = error ->
rollback(db, sp)
error
result ->
{:ok, _} = db_exec(db, "RELEASE #{sp}")
result
end
end
defp returning_query_in_savepoint(sp, %__MODULE__{database: db,
statement: statement,
returning: {table, cols, cmd, ref}}, opts)
do
temp_table = "t_#{random_id()}"
temp_fields = Enum.join(cols, ", ")
trigger_name = "tr_#{random_id()}"
trigger_fields = Enum.map_join(cols, ", ", &"#{ref}.#{&1}")
trigger = """
CREATE TEMP TRIGGER #{trigger_name} AFTER #{cmd} ON main.#{table} BEGIN
INSERT INTO #{temp_table} SELECT #{trigger_fields};
END;
"""
column_names = Enum.join(cols, ", ")
with {:ok, _} = db_exec(db, "CREATE TEMP TABLE #{temp_table} (#{temp_fields})"),
{:ok, _} = db_exec(db, trigger),
result = :esqlite3.fetchall(statement, Config.db_chunk_size(opts), Config.db_timeout(opts)),
{:ok, rows} = db_exec(db, "SELECT #{column_names} FROM #{temp_table}"),
{:ok, _} = db_exec(db, "DROP TRIGGER IF EXISTS #{trigger_name}"),
{:ok, _} = db_exec(db, "DROP TABLE IF EXISTS #{temp_table}")
do
if is_list(result), do: rows, else: result
end
catch
e ->
rollback(db, sp)
raise e
end
defp rollback(db, sp) do
{:ok, _} = db_exec(db, "ROLLBACK TO SAVEPOINT #{sp}")
{:ok, _} = db_exec(db, "RELEASE #{sp}")
end
@spec db_exec(Sqlitex.connection, iodata()) :: {:ok, [tuple()]}
defp db_exec(db, sql) do
case :esqlite3.q(sql, db) do
{:error, _} = error ->
error
result ->
{:ok, result}
end
end
defp random_id, do: :rand.uniform |> Float.to_string |> String.slice(2..10)
end
|
deps/sqlitex/lib/sqlitex/statement.ex
| 0.913351 | 0.781289 |
statement.ex
|
starcoder
|
defmodule AWS.SWF do
@moduledoc """
Amazon Simple Workflow Service
The Amazon Simple Workflow Service (Amazon SWF) makes it easy to build
applications that use Amazon's cloud to coordinate work across distributed
components. In Amazon SWF, a *task* represents a logical unit of work that
is performed by a component of your workflow. Coordinating tasks in a
workflow involves managing intertask dependencies, scheduling, and
concurrency in accordance with the logical flow of the application.
Amazon SWF gives you full control over implementing tasks and coordinating
them without worrying about underlying complexities such as tracking their
progress and maintaining their state.
This documentation serves as reference only. For a broader overview of the
Amazon SWF programming model, see the [Amazon SWF Developer
Guide](http://docs.aws.amazon.com/amazonswf/latest/developerguide/).
"""
@doc """
Returns the number of closed workflow executions within the given domain
that meet the specified filtering criteria.
<note>This operation is eventually consistent. The results are best effort
and may not exactly reflect recent updates and changes.</note> **Access
Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>Constrain the following
parameters by using a `Condition` element with the appropriate keys. <ul>
<li>`tagFilter.tag`: String constraint. The key is
`swf:tagFilter.tag`.</li> <li>`typeFilter.name`: String constraint. The key
is `swf:typeFilter.name`.</li> <li>`typeFilter.version`: String constraint.
The key is `swf:typeFilter.version`.</li> </ul> </li> </ul> If the caller
does not have sufficient permissions to invoke the action, or the parameter
values fall outside the specified constraints, the action fails. The
associated event attribute's **cause** parameter will be set to
OPERATION_NOT_PERMITTED. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def count_closed_workflow_executions(client, input, options \\ []) do
request(client, "CountClosedWorkflowExecutions", input, options)
end
@doc """
Returns the number of open workflow executions within the given domain that
meet the specified filtering criteria.
<note>This operation is eventually consistent. The results are best effort
and may not exactly reflect recent updates and changes.</note> **Access
Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>Constrain the following
parameters by using a `Condition` element with the appropriate keys. <ul>
<li>`tagFilter.tag`: String constraint. The key is
`swf:tagFilter.tag`.</li> <li>`typeFilter.name`: String constraint. The key
is `swf:typeFilter.name`.</li> <li>`typeFilter.version`: String constraint.
The key is `swf:typeFilter.version`.</li> </ul> </li> </ul> If the caller
does not have sufficient permissions to invoke the action, or the parameter
values fall outside the specified constraints, the action fails. The
associated event attribute's **cause** parameter will be set to
OPERATION_NOT_PERMITTED. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def count_open_workflow_executions(client, input, options \\ []) do
request(client, "CountOpenWorkflowExecutions", input, options)
end
@doc """
Returns the estimated number of activity tasks in the specified task list.
The count returned is an approximation and is not guaranteed to be exact.
If you specify a task list that no activity task was ever scheduled in then
0 will be returned.
**Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>Constrain the `taskList.name`
parameter by using a **Condition** element with the `swf:taskList.name` key
to allow the action to access only certain task lists.</li> </ul> If the
caller does not have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails.
The associated event attribute's **cause** parameter will be set to
OPERATION_NOT_PERMITTED. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def count_pending_activity_tasks(client, input, options \\ []) do
request(client, "CountPendingActivityTasks", input, options)
end
@doc """
Returns the estimated number of decision tasks in the specified task list.
The count returned is an approximation and is not guaranteed to be exact.
If you specify a task list that no decision task was ever scheduled in then
0 will be returned.
**Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>Constrain the `taskList.name`
parameter by using a **Condition** element with the `swf:taskList.name` key
to allow the action to access only certain task lists.</li> </ul> If the
caller does not have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails.
The associated event attribute's **cause** parameter will be set to
OPERATION_NOT_PERMITTED. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def count_pending_decision_tasks(client, input, options \\ []) do
request(client, "CountPendingDecisionTasks", input, options)
end
@doc """
Deprecates the specified *activity type*. After an activity type has been
deprecated, you cannot create new tasks of that activity type. Tasks of
this type that were scheduled before the type was deprecated will continue
to run.
<note>This operation is eventually consistent. The results are best effort
and may not exactly reflect recent updates and changes.</note> **Access
Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>Constrain the following
parameters by using a `Condition` element with the appropriate keys. <ul>
<li>`activityType.name`: String constraint. The key is
`swf:activityType.name`.</li> <li>`activityType.version`: String
constraint. The key is `swf:activityType.version`.</li> </ul> </li> </ul>
If the caller does not have sufficient permissions to invoke the action, or
the parameter values fall outside the specified constraints, the action
fails. The associated event attribute's **cause** parameter will be set to
OPERATION_NOT_PERMITTED. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def deprecate_activity_type(client, input, options \\ []) do
request(client, "DeprecateActivityType", input, options)
end
@doc """
Deprecates the specified domain. After a domain has been deprecated it
cannot be used to create new workflow executions or register new types.
However, you can still use visibility actions on this domain. Deprecating a
domain also deprecates all activity and workflow types registered in the
domain. Executions that were started before the domain was deprecated will
continue to run.
<note>This operation is eventually consistent. The results are best effort
and may not exactly reflect recent updates and changes.</note> **Access
Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>You cannot use an IAM policy
to constrain this action's parameters.</li> </ul> If the caller does not
have sufficient permissions to invoke the action, or the parameter values
fall outside the specified constraints, the action fails. The associated
event attribute's **cause** parameter will be set to
OPERATION_NOT_PERMITTED. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def deprecate_domain(client, input, options \\ []) do
request(client, "DeprecateDomain", input, options)
end
@doc """
Deprecates the specified *workflow type*. After a workflow type has been
deprecated, you cannot create new executions of that type. Executions that
were started before the type was deprecated will continue to run. A
deprecated workflow type may still be used when calling visibility actions.
<note>This operation is eventually consistent. The results are best effort
and may not exactly reflect recent updates and changes.</note> **Access
Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>Constrain the following
parameters by using a `Condition` element with the appropriate keys. <ul>
<li>`workflowType.name`: String constraint. The key is
`swf:workflowType.name`.</li> <li>`workflowType.version`: String
constraint. The key is `swf:workflowType.version`.</li> </ul> </li> </ul>
If the caller does not have sufficient permissions to invoke the action, or
the parameter values fall outside the specified constraints, the action
fails. The associated event attribute's **cause** parameter will be set to
OPERATION_NOT_PERMITTED. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def deprecate_workflow_type(client, input, options \\ []) do
request(client, "DeprecateWorkflowType", input, options)
end
@doc """
Returns information about the specified activity type. This includes
configuration settings provided when the type was registered and other
general information about the type.
**Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>Constrain the following
parameters by using a `Condition` element with the appropriate keys. <ul>
<li>`activityType.name`: String constraint. The key is
`swf:activityType.name`.</li> <li>`activityType.version`: String
constraint. The key is `swf:activityType.version`.</li> </ul> </li> </ul>
If the caller does not have sufficient permissions to invoke the action, or
the parameter values fall outside the specified constraints, the action
fails. The associated event attribute's **cause** parameter will be set to
OPERATION_NOT_PERMITTED. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def describe_activity_type(client, input, options \\ []) do
request(client, "DescribeActivityType", input, options)
end
@doc """
Returns information about the specified domain, including description and
status.
**Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>You cannot use an IAM policy
to constrain this action's parameters.</li> </ul> If the caller does not
have sufficient permissions to invoke the action, or the parameter values
fall outside the specified constraints, the action fails. The associated
event attribute's **cause** parameter will be set to
OPERATION_NOT_PERMITTED. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def describe_domain(client, input, options \\ []) do
request(client, "DescribeDomain", input, options)
end
@doc """
Returns information about the specified workflow execution including its
type and some statistics.
<note>This operation is eventually consistent. The results are best effort
and may not exactly reflect recent updates and changes.</note> **Access
Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>You cannot use an IAM policy
to constrain this action's parameters.</li> </ul> If the caller does not
have sufficient permissions to invoke the action, or the parameter values
fall outside the specified constraints, the action fails. The associated
event attribute's **cause** parameter will be set to
OPERATION_NOT_PERMITTED. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def describe_workflow_execution(client, input, options \\ []) do
request(client, "DescribeWorkflowExecution", input, options)
end
@doc """
Returns information about the specified *workflow type*. This includes
configuration settings specified when the type was registered and other
information such as creation date, current status, and so on.
**Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>Constrain the following
parameters by using a `Condition` element with the appropriate keys. <ul>
<li>`workflowType.name`: String constraint. The key is
`swf:workflowType.name`.</li> <li>`workflowType.version`: String
constraint. The key is `swf:workflowType.version`.</li> </ul> </li> </ul>
If the caller does not have sufficient permissions to invoke the action, or
the parameter values fall outside the specified constraints, the action
fails. The associated event attribute's **cause** parameter will be set to
OPERATION_NOT_PERMITTED. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def describe_workflow_type(client, input, options \\ []) do
request(client, "DescribeWorkflowType", input, options)
end
@doc """
Returns the history of the specified workflow execution. The results may be
split into multiple pages. To retrieve subsequent pages, make the call
again using the `nextPageToken` returned by the initial call.
<note>This operation is eventually consistent. The results are best effort
and may not exactly reflect recent updates and changes.</note> **Access
Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>You cannot use an IAM policy
to constrain this action's parameters.</li> </ul> If the caller does not
have sufficient permissions to invoke the action, or the parameter values
fall outside the specified constraints, the action fails. The associated
event attribute's **cause** parameter will be set to
OPERATION_NOT_PERMITTED. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def get_workflow_execution_history(client, input, options \\ []) do
request(client, "GetWorkflowExecutionHistory", input, options)
end
@doc """
Returns information about all activities registered in the specified domain
that match the specified name and registration status. The result includes
information like creation date, current status of the activity, etc. The
results may be split into multiple pages. To retrieve subsequent pages,
make the call again using the `nextPageToken` returned by the initial call.
**Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>You cannot use an IAM policy
to constrain this action's parameters.</li> </ul> If the caller does not
have sufficient permissions to invoke the action, or the parameter values
fall outside the specified constraints, the action fails. The associated
event attribute's **cause** parameter will be set to
OPERATION_NOT_PERMITTED. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def list_activity_types(client, input, options \\ []) do
request(client, "ListActivityTypes", input, options)
end
@doc """
Returns a list of closed workflow executions in the specified domain that
meet the filtering criteria. The results may be split into multiple pages.
To retrieve subsequent pages, make the call again using the nextPageToken
returned by the initial call.
<note>This operation is eventually consistent. The results are best effort
and may not exactly reflect recent updates and changes.</note> **Access
Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>Constrain the following
parameters by using a `Condition` element with the appropriate keys. <ul>
<li>`tagFilter.tag`: String constraint. The key is
`swf:tagFilter.tag`.</li> <li>`typeFilter.name`: String constraint. The key
is `swf:typeFilter.name`.</li> <li>`typeFilter.version`: String constraint.
The key is `swf:typeFilter.version`.</li> </ul> </li> </ul> If the caller
does not have sufficient permissions to invoke the action, or the parameter
values fall outside the specified constraints, the action fails. The
associated event attribute's **cause** parameter will be set to
OPERATION_NOT_PERMITTED. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def list_closed_workflow_executions(client, input, options \\ []) do
request(client, "ListClosedWorkflowExecutions", input, options)
end
@doc """
Returns the list of domains registered in the account. The results may be
split into multiple pages. To retrieve subsequent pages, make the call
again using the nextPageToken returned by the initial call.
<note> This operation is eventually consistent. The results are best effort
and may not exactly reflect recent updates and changes.</note> **Access
Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains. The element must be set to
`arn:aws:swf::AccountID:domain/*`, where *AccountID* is the account ID,
with no dashes.</li> <li>Use an `Action` element to allow or deny
permission to call this action.</li> <li>You cannot use an IAM policy to
constrain this action's parameters.</li> </ul> If the caller does not have
sufficient permissions to invoke the action, or the parameter values fall
outside the specified constraints, the action fails. The associated event
attribute's **cause** parameter will be set to OPERATION_NOT_PERMITTED. For
details and example IAM policies, see [Using IAM to Manage Access to Amazon
SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def list_domains(client, input, options \\ []) do
request(client, "ListDomains", input, options)
end
@doc """
Returns a list of open workflow executions in the specified domain that
meet the filtering criteria. The results may be split into multiple pages.
To retrieve subsequent pages, make the call again using the nextPageToken
returned by the initial call.
<note> This operation is eventually consistent. The results are best effort
and may not exactly reflect recent updates and changes.</note> **Access
Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>Constrain the following
parameters by using a `Condition` element with the appropriate keys. <ul>
<li>`tagFilter.tag`: String constraint. The key is
`swf:tagFilter.tag`.</li> <li>`typeFilter.name`: String constraint. The key
is `swf:typeFilter.name`.</li> <li>`typeFilter.version`: String constraint.
The key is `swf:typeFilter.version`.</li> </ul> </li> </ul> If the caller
does not have sufficient permissions to invoke the action, or the parameter
values fall outside the specified constraints, the action fails. The
associated event attribute's **cause** parameter will be set to
OPERATION_NOT_PERMITTED. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def list_open_workflow_executions(client, input, options \\ []) do
request(client, "ListOpenWorkflowExecutions", input, options)
end
@doc """
Returns information about workflow types in the specified domain. The
results may be split into multiple pages that can be retrieved by making
the call repeatedly.
**Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>You cannot use an IAM policy
to constrain this action's parameters.</li> </ul> If the caller does not
have sufficient permissions to invoke the action, or the parameter values
fall outside the specified constraints, the action fails. The associated
event attribute's **cause** parameter will be set to
OPERATION_NOT_PERMITTED. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def list_workflow_types(client, input, options \\ []) do
request(client, "ListWorkflowTypes", input, options)
end
@doc """
Used by workers to get an `ActivityTask` from the specified activity
`taskList`. This initiates a long poll, where the service holds the HTTP
connection open and responds as soon as a task becomes available. The
maximum time the service holds on to the request before responding is 60
seconds. If no task is available within 60 seconds, the poll will return an
empty result. An empty result, in this context, means that an ActivityTask
is returned, but that the value of taskToken is an empty string. If a task
is returned, the worker should use its type to identify and process it
correctly.
<important>Workers should set their client side socket timeout to at least
70 seconds (10 seconds higher than the maximum time service may hold the
poll request).</important> **Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>Constrain the `taskList.name`
parameter by using a **Condition** element with the `swf:taskList.name` key
to allow the action to access only certain task lists.</li> </ul> If the
caller does not have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails.
The associated event attribute's **cause** parameter will be set to
OPERATION_NOT_PERMITTED. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def poll_for_activity_task(client, input, options \\ []) do
request(client, "PollForActivityTask", input, options)
end
@doc """
Used by deciders to get a `DecisionTask` from the specified decision
`taskList`. A decision task may be returned for any open workflow execution
that is using the specified task list. The task includes a paginated view
of the history of the workflow execution. The decider should use the
workflow type and the history to determine how to properly handle the task.
This action initiates a long poll, where the service holds the HTTP
connection open and responds as soon a task becomes available. If no
decision task is available in the specified task list before the timeout of
60 seconds expires, an empty result is returned. An empty result, in this
context, means that a DecisionTask is returned, but that the value of
`taskToken` is an empty string.
<important>Deciders should set their client-side socket timeout to at least
70 seconds (10 seconds higher than the timeout).</important>
<important>Because the number of workflow history events for a single
workflow execution might be very large, the result returned might be split
up across a number of pages. To retrieve subsequent pages, make additional
calls to `PollForDecisionTask` using the `nextPageToken` returned by the
initial call. Note that you do **not** call `GetWorkflowExecutionHistory`
with this `nextPageToken`. Instead, call `PollForDecisionTask`
again.</important> **Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>Constrain the `taskList.name`
parameter by using a **Condition** element with the `swf:taskList.name` key
to allow the action to access only certain task lists.</li> </ul> If the
caller does not have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails.
The associated event attribute's **cause** parameter will be set to
OPERATION_NOT_PERMITTED. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def poll_for_decision_task(client, input, options \\ []) do
request(client, "PollForDecisionTask", input, options)
end
@doc """
Used by activity workers to report to the service that the `ActivityTask`
represented by the specified `taskToken` is still making progress. The
worker can also (optionally) specify details of the progress, for example
percent complete, using the `details` parameter. This action can also be
used by the worker as a mechanism to check if cancellation is being
requested for the activity task. If a cancellation is being attempted for
the specified task, then the boolean `cancelRequested` flag returned by the
service is set to `true`.
This action resets the `taskHeartbeatTimeout` clock. The
`taskHeartbeatTimeout` is specified in `RegisterActivityType`.
This action does not in itself create an event in the workflow execution
history. However, if the task times out, the workflow execution history
will contain a `ActivityTaskTimedOut` event that contains the information
from the last heartbeat generated by the activity worker.
<note>The `taskStartToCloseTimeout` of an activity type is the maximum
duration of an activity task, regardless of the number of
`RecordActivityTaskHeartbeat` requests received. The
`taskStartToCloseTimeout` is also specified in
`RegisterActivityType`.</note> <note>This operation is only useful for
long-lived activities to report liveliness of the task and to determine if
a cancellation is being attempted. </note> <important>If the
`cancelRequested` flag returns `true`, a cancellation is being attempted.
If the worker can cancel the activity, it should respond with
`RespondActivityTaskCanceled`. Otherwise, it should ignore the cancellation
request.</important> **Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>You cannot use an IAM policy
to constrain this action's parameters.</li> </ul> If the caller does not
have sufficient permissions to invoke the action, or the parameter values
fall outside the specified constraints, the action fails. The associated
event attribute's **cause** parameter will be set to
OPERATION_NOT_PERMITTED. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def record_activity_task_heartbeat(client, input, options \\ []) do
request(client, "RecordActivityTaskHeartbeat", input, options)
end
@doc """
Registers a new *activity type* along with its configuration settings in
the specified domain.
<important>A `TypeAlreadyExists` fault is returned if the type already
exists in the domain. You cannot change any configuration settings of the
type after its registration, and it must be registered as a new
version.</important> **Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>Constrain the following
parameters by using a `Condition` element with the appropriate keys. <ul>
<li> `defaultTaskList.name`: String constraint. The key is
`swf:defaultTaskList.name`.</li> <li> `name`: String constraint. The key is
`swf:name`.</li> <li> `version`: String constraint. The key is
`swf:version`.</li> </ul> </li> </ul> If the caller does not have
sufficient permissions to invoke the action, or the parameter values fall
outside the specified constraints, the action fails. The associated event
attribute's **cause** parameter will be set to OPERATION_NOT_PERMITTED. For
details and example IAM policies, see [Using IAM to Manage Access to Amazon
SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def register_activity_type(client, input, options \\ []) do
request(client, "RegisterActivityType", input, options)
end
@doc """
Registers a new domain.
**Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>You cannot use an IAM policy to control domain access for this
action. The name of the domain being registered is available as the
resource of this action.</li> <li>Use an `Action` element to allow or deny
permission to call this action.</li> <li>You cannot use an IAM policy to
constrain this action's parameters.</li> </ul> If the caller does not have
sufficient permissions to invoke the action, or the parameter values fall
outside the specified constraints, the action fails. The associated event
attribute's **cause** parameter will be set to OPERATION_NOT_PERMITTED. For
details and example IAM policies, see [Using IAM to Manage Access to Amazon
SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def register_domain(client, input, options \\ []) do
request(client, "RegisterDomain", input, options)
end
@doc """
Registers a new *workflow type* and its configuration settings in the
specified domain.
The retention period for the workflow history is set by the
`RegisterDomain` action.
<important>If the type already exists, then a `TypeAlreadyExists` fault is
returned. You cannot change the configuration settings of a workflow type
once it is registered and it must be registered as a new
version.</important> **Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>Constrain the following
parameters by using a `Condition` element with the appropriate keys. <ul>
<li> `defaultTaskList.name`: String constraint. The key is
`swf:defaultTaskList.name`.</li> <li> `name`: String constraint. The key is
`swf:name`.</li> <li> `version`: String constraint. The key is
`swf:version`.</li> </ul> </li> </ul> If the caller does not have
sufficient permissions to invoke the action, or the parameter values fall
outside the specified constraints, the action fails. The associated event
attribute's **cause** parameter will be set to OPERATION_NOT_PERMITTED. For
details and example IAM policies, see [Using IAM to Manage Access to Amazon
SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def register_workflow_type(client, input, options \\ []) do
request(client, "RegisterWorkflowType", input, options)
end
@doc """
Records a `WorkflowExecutionCancelRequested` event in the currently running
workflow execution identified by the given domain, workflowId, and runId.
This logically requests the cancellation of the workflow execution as a
whole. It is up to the decider to take appropriate actions when it receives
an execution history with this event.
<note>If the runId is not specified, the `WorkflowExecutionCancelRequested`
event is recorded in the history of the current open workflow execution
with the specified workflowId in the domain.</note> <note>Because this
action allows the workflow to properly clean up and gracefully close, it
should be used instead of `TerminateWorkflowExecution` when
possible.</note> **Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>You cannot use an IAM policy
to constrain this action's parameters.</li> </ul> If the caller does not
have sufficient permissions to invoke the action, or the parameter values
fall outside the specified constraints, the action fails. The associated
event attribute's **cause** parameter will be set to
OPERATION_NOT_PERMITTED. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def request_cancel_workflow_execution(client, input, options \\ []) do
request(client, "RequestCancelWorkflowExecution", input, options)
end
@doc """
Used by workers to tell the service that the `ActivityTask` identified by
the `taskToken` was successfully canceled. Additional `details` can be
optionally provided using the `details` argument.
These `details` (if provided) appear in the `ActivityTaskCanceled` event
added to the workflow history.
<important>Only use this operation if the `canceled` flag of a
`RecordActivityTaskHeartbeat` request returns `true` and if the activity
can be safely undone or abandoned.</important> A task is considered open
from the time that it is scheduled until it is closed. Therefore a task is
reported as open while a worker is processing it. A task is closed after it
has been specified in a call to `RespondActivityTaskCompleted`,
RespondActivityTaskCanceled, `RespondActivityTaskFailed`, or the task has
[timed
out](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-basic.html#swf-dev-timeout-types).
**Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>You cannot use an IAM policy
to constrain this action's parameters.</li> </ul> If the caller does not
have sufficient permissions to invoke the action, or the parameter values
fall outside the specified constraints, the action fails. The associated
event attribute's **cause** parameter will be set to
OPERATION_NOT_PERMITTED. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def respond_activity_task_canceled(client, input, options \\ []) do
request(client, "RespondActivityTaskCanceled", input, options)
end
@doc """
Used by workers to tell the service that the `ActivityTask` identified by
the `taskToken` completed successfully with a `result` (if provided). The
`result` appears in the `ActivityTaskCompleted` event in the workflow
history.
<important> If the requested task does not complete successfully, use
`RespondActivityTaskFailed` instead. If the worker finds that the task is
canceled through the `canceled` flag returned by
`RecordActivityTaskHeartbeat`, it should cancel the task, clean up and then
call `RespondActivityTaskCanceled`.</important> A task is considered open
from the time that it is scheduled until it is closed. Therefore a task is
reported as open while a worker is processing it. A task is closed after it
has been specified in a call to RespondActivityTaskCompleted,
`RespondActivityTaskCanceled`, `RespondActivityTaskFailed`, or the task has
[timed
out](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-basic.html#swf-dev-timeout-types).
**Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>You cannot use an IAM policy
to constrain this action's parameters.</li> </ul> If the caller does not
have sufficient permissions to invoke the action, or the parameter values
fall outside the specified constraints, the action fails. The associated
event attribute's **cause** parameter will be set to
OPERATION_NOT_PERMITTED. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def respond_activity_task_completed(client, input, options \\ []) do
request(client, "RespondActivityTaskCompleted", input, options)
end
@doc """
Used by workers to tell the service that the `ActivityTask` identified by
the `taskToken` has failed with `reason` (if specified). The `reason` and
`details` appear in the `ActivityTaskFailed` event added to the workflow
history.
A task is considered open from the time that it is scheduled until it is
closed. Therefore a task is reported as open while a worker is processing
it. A task is closed after it has been specified in a call to
`RespondActivityTaskCompleted`, `RespondActivityTaskCanceled`,
RespondActivityTaskFailed, or the task has [timed
out](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-basic.html#swf-dev-timeout-types).
**Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>You cannot use an IAM policy
to constrain this action's parameters.</li> </ul> If the caller does not
have sufficient permissions to invoke the action, or the parameter values
fall outside the specified constraints, the action fails. The associated
event attribute's **cause** parameter will be set to
OPERATION_NOT_PERMITTED. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def respond_activity_task_failed(client, input, options \\ []) do
request(client, "RespondActivityTaskFailed", input, options)
end
@doc """
Used by deciders to tell the service that the `DecisionTask` identified by
the `taskToken` has successfully completed. The `decisions` argument
specifies the list of decisions made while processing the task.
A `DecisionTaskCompleted` event is added to the workflow history. The
`executionContext` specified is attached to the event in the workflow
execution history.
**Access Control**
If an IAM policy grants permission to use `RespondDecisionTaskCompleted`,
it can express permissions for the list of decisions in the `decisions`
parameter. Each of the decisions has one or more parameters, much like a
regular API call. To allow for policies to be as readable as possible, you
can express permissions on decisions as if they were actual API calls,
including applying conditions to some parameters. For more information, see
[Using IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def respond_decision_task_completed(client, input, options \\ []) do
request(client, "RespondDecisionTaskCompleted", input, options)
end
@doc """
Records a `WorkflowExecutionSignaled` event in the workflow execution
history and creates a decision task for the workflow execution identified
by the given domain, workflowId and runId. The event is recorded with the
specified user defined signalName and input (if provided).
<note> If a runId is not specified, then the `WorkflowExecutionSignaled`
event is recorded in the history of the current open workflow with the
matching workflowId in the domain.</note> <note> If the specified workflow
execution is not open, this method fails with `UnknownResource`.</note>
**Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>You cannot use an IAM policy
to constrain this action's parameters.</li> </ul> If the caller does not
have sufficient permissions to invoke the action, or the parameter values
fall outside the specified constraints, the action fails. The associated
event attribute's **cause** parameter will be set to
OPERATION_NOT_PERMITTED. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def signal_workflow_execution(client, input, options \\ []) do
request(client, "SignalWorkflowExecution", input, options)
end
@doc """
Starts an execution of the workflow type in the specified domain using the
provided `workflowId` and input data.
This action returns the newly started workflow execution.
**Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>Constrain the following
parameters by using a `Condition` element with the appropriate keys. <ul>
<li> `tagList.member.0`: The key is `swf:tagList.member.0`.</li> <li>
`tagList.member.1`: The key is `swf:tagList.member.1`.</li> <li>
`tagList.member.2`: The key is `swf:tagList.member.2`.</li> <li>
`tagList.member.3`: The key is `swf:tagList.member.3`.</li> <li>
`tagList.member.4`: The key is `swf:tagList.member.4`.</li> <li>`taskList`:
String constraint. The key is `swf:taskList.name`.</li>
<li>`workflowType.name`: String constraint. The key is
`swf:workflowType.name`.</li> <li>`workflowType.version`: String
constraint. The key is `swf:workflowType.version`.</li> </ul> </li> </ul>
If the caller does not have sufficient permissions to invoke the action, or
the parameter values fall outside the specified constraints, the action
fails. The associated event attribute's **cause** parameter will be set to
OPERATION_NOT_PERMITTED. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def start_workflow_execution(client, input, options \\ []) do
request(client, "StartWorkflowExecution", input, options)
end
@doc """
Records a `WorkflowExecutionTerminated` event and forces closure of the
workflow execution identified by the given domain, runId, and workflowId.
The child policy, registered with the workflow type or specified when
starting this execution, is applied to any open child workflow executions
of this workflow execution.
<important> If the identified workflow execution was in progress, it is
terminated immediately.</important> <note> If a runId is not specified,
then the `WorkflowExecutionTerminated` event is recorded in the history of
the current open workflow with the matching workflowId in the
domain.</note> <note> You should consider using
`RequestCancelWorkflowExecution` action instead because it allows the
workflow to gracefully close while `TerminateWorkflowExecution` does
not.</note> **Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li>Use a `Resource` element with the domain name to limit the action
to only specified domains.</li> <li>Use an `Action` element to allow or
deny permission to call this action.</li> <li>You cannot use an IAM policy
to constrain this action's parameters.</li> </ul> If the caller does not
have sufficient permissions to invoke the action, or the parameter values
fall outside the specified constraints, the action fails. The associated
event attribute's **cause** parameter will be set to
OPERATION_NOT_PERMITTED. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html).
"""
def terminate_workflow_execution(client, input, options \\ []) do
request(client, "TerminateWorkflowExecution", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "swf"}
host = get_host("swf", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.0"},
{"X-Amz-Target", "SimpleWorkflowService.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/swf.ex
| 0.928959 | 0.641591 |
swf.ex
|
starcoder
|
defmodule GroupManager.Data.LocalClock do
require Record
require Chatter.NetID
alias Chatter.NetID
alias Chatter.Serializer
Record.defrecord :local_clock,
member: nil,
time_val: 0
@type t :: record( :local_clock,
member: NetID.t,
time_val: integer )
@type local_clock_list :: list(t)
@spec new(NetID.t) :: t
def new(id)
when NetID.is_valid(id)
do
local_clock(member: id)
end
@spec new(NetID.t, integer) :: t
def new(id, time)
when NetID.is_valid(id) and
is_integer(time) and
time >= 0 and
time <= 0xffffffff
do
local_clock(member: id) |> local_clock(time_val: time)
end
defmacro is_valid(data) do
case Macro.Env.in_guard?(__CALLER__) do
true ->
quote do
is_tuple(unquote(data)) and tuple_size(unquote(data)) == 3 and
:erlang.element(1, unquote(data)) == :local_clock and
# member
NetID.is_valid(:erlang.element(2, unquote(data))) and
# time_val
is_integer(:erlang.element(3, unquote(data))) and
:erlang.element(3, unquote(data)) >= 0 and
:erlang.element(3, unquote(data)) <= 0xffffffff
end
false ->
quote bind_quoted: binding() do
is_tuple(data) and tuple_size(data) == 3 and
:erlang.element(1, data) == :local_clock and
# member
NetID.is_valid(:erlang.element(2, data)) and
# time_val
is_integer(:erlang.element(3, data)) and
:erlang.element(3, data) >= 0 and
:erlang.element(3, data) <= 0xffffffff
end
end
end
@spec valid?(t) :: boolean
def valid?(data)
when is_valid(data)
do
true
end
def valid?(_), do: false
@spec validate_list(list(t)) :: :ok | :error
def validate_list([]), do: :ok
def validate_list([head|rest])
do
case valid?(head) do
true -> validate_list(rest)
false -> :error
end
end
def validate_list(_), do: :error
@spec validate_list!(list(t)) :: :ok
def validate_list!([]), do: :ok
def validate_list!([head|rest])
when is_valid(head)
do
validate_list!(rest)
end
@spec next(t) :: t
def next(clock)
when is_valid(clock)
do
local_clock(clock, time_val: local_clock(clock, :time_val)+1)
end
@spec time_val(t) :: integer
def time_val(clock)
when is_valid(clock)
do
local_clock(clock, :time_val)
end
@spec member(t) :: NetID.t
def member(clock)
when is_valid(clock)
do
local_clock(clock, :member)
end
@spec merge(local_clock_list, t) :: local_clock_list
def merge(lhs, rhs)
when is_list(lhs) and
is_valid(rhs)
do
# optimize this ???
dict = Enum.map([rhs|lhs], fn(x) -> {member(x), time_val(x)} end)
|> Enum.reduce(%{}, fn({m, t} ,acc) ->
Map.update(acc, m, t, fn(prev_time) ->
max(t, prev_time)
end)
end)
keys = Map.keys(dict) |> Enum.sort
Enum.map(keys, fn(k) ->
local_clock(member: k)
|> local_clock(time_val: Map.get(dict, k))
end)
end
@spec merge(local_clock_list, local_clock_list) :: local_clock_list
def merge(lhs, rhs)
when is_list(lhs) and
is_list(rhs)
do
# optimize this ???
dict = Enum.map(lhs ++ rhs, fn(x) -> {member(x), time_val(x)} end)
|> Enum.reduce(%{}, fn({m, t} ,acc) ->
Map.update(acc, m, t, fn(prev_time) ->
max(t, prev_time)
end)
end)
keys = Map.keys(dict) |> Enum.sort
Enum.map(keys, fn(k) ->
local_clock(member: k)
|> local_clock(time_val: Map.get(dict, k))
end)
end
@spec max_clock(t, t) :: t
def max_clock(lhs, rhs)
when is_valid(lhs) and
is_valid(rhs) and
local_clock(lhs, :member) == local_clock(rhs, :member)
do
if( local_clock(lhs, :time_val) > local_clock(rhs, :time_val) )
do
lhs
else
rhs
end
end
@spec encode_with(t, map) :: binary
def encode_with(clock, id_map)
when is_valid(clock) and
is_map(id_map)
do
id = Map.fetch!(id_map, local_clock(clock, :member))
<< Serializer.encode_uint(id) :: binary,
Serializer.encode_uint(local_clock(clock, :time_val)) :: binary >>
end
@spec decode_with(binary, map) :: {t, binary}
def decode_with(bin, id_map)
when is_binary(bin) and
byte_size(bin) > 0 and
is_map(id_map)
do
{id, rest} = Serializer.decode_uint(bin)
{time, rest} = Serializer.decode_uint(rest)
net_id = Map.fetch!(id_map, id)
{ new(net_id, time) , rest }
end
@spec encode_list_with(list(t), map) :: binary
def encode_list_with(elems, id_map)
when is_list(elems) and
is_map(id_map)
do
:ok = validate_list!(elems)
bin_size = elems |> length |> Serializer.encode_uint
bin_list = elems |> Enum.reduce(<<>>, fn(x,acc) ->
acc <> encode_with(x, id_map)
end)
<< bin_size :: binary,
bin_list :: binary >>
end
@spec decode_list_with(binary, map) :: {list(t), binary}
def decode_list_with(bin, id_map)
do
{count, remaining} = Serializer.decode_uint(bin)
{list, remaining} = decode_list_with_(remaining, count, [], id_map)
{Enum.reverse(list), remaining}
end
defp decode_list_with_(<<>>, _count, acc, _map), do: {acc, <<>>}
defp decode_list_with_(binary, 0, acc, _map), do: {acc, binary}
defp decode_list_with_(msg, count, acc, map)
when is_binary(msg) and
is_integer(count) and
count > 0 and
is_list(acc) and
is_map(map)
do
{id, remaining} = decode_with(msg, map)
decode_list_with_(remaining, count-1, [id | acc], map)
end
end
|
lib/group_manager/data/local_clock.ex
| 0.672439 | 0.446374 |
local_clock.ex
|
starcoder
|
defexception ExUnit.AssertionError, message: "assertion failed"
defmodule ExUnit.Assertions do
@moduledoc """
This module contains a set of assertions functions that are
imported by default into your test cases.
In general, a developer will want to use the general
`assert` macro in tests. The macro tries to be smart
and provide good reporting whenever there is a failure.
For example, `assert some_fun() == 10` will fail (assuming
`some_fun()` returns 13):
Expected 10 to be equal to 13
This module also provides other small convenient functions
like `assert_in_delta` and `assert_raise` to easily handle other
common cases as checking a float number or handling exceptions.
"""
@doc """
Asserts the `expected` value is true.
`assert` in general tries to be smart and provide a good
reporting whenever there is a failure. For example,
`assert 10 > 15` is going to fail with a message:
Expected 10 to be more than 15
## Examples
assert true
"""
defmacro assert(expected) do
translate_assertion(expected, fn ->
quote do
value = unquote(expected)
assert value, "Expected #{inspect value} to be true"
end
end)
end
@doc """
Refutes the `expected` value is true.
`refute` in general tries to be smart and provide a good
reporting whenever there is a failure.
## Examples
refute false
"""
defmacro refute(expected) do
contents = translate_assertion({ :!, 0, [expected] }, fn ->
quote do
value = unquote(expected)
assert !value, "Expected #{inspect value} to be false"
end
end)
{ :!, 0, [contents] }
end
## START HELPERS
defmacrop negation?(op) do
quote do: (var!(op) == :! or var!(op) == :not)
end
defp translate_assertion({ :=, _, [expected, received] }, _else) do
quote do
try do
unquote(expected) = unquote(received)
rescue
x in [MatchError] ->
raise ExUnit.AssertionError, message: x.message
end
end
end
defp translate_assertion({ :==, _, [left, right] }, _else) do
assert_operator :==, left, right, "be equal to (==)"
end
defp translate_assertion({ :<, _, [left, right] }, _else) do
assert_operator :<, left, right, "be less than"
end
defp translate_assertion({ :>, _, [left, right] }, _else) do
assert_operator :>, left, right, "be more than"
end
defp translate_assertion({ :<=, _, [left, right] }, _else) do
assert_operator :<=, left, right, "be less than or equal to"
end
defp translate_assertion({ :>=, _, [left, right] }, _else) do
assert_operator :>=, left, right, "be more than or equal to"
end
defp translate_assertion({ :===, _, [left, right] }, _else) do
assert_operator :===, left, right, "be equal to (===)"
end
defp translate_assertion({ :!==, _, [left, right] }, _else) do
assert_operator :!==, left, right, "be not equal to (!==)"
end
defp translate_assertion({ :!=, _, [left, right] }, _else) do
assert_operator :!=, left, right, "be not equal to (!=)"
end
defp translate_assertion({ :=~, _, [left, right] }, _else) do
assert_operator :=~, left, right, "match (=~)"
end
defp translate_assertion({ :in, _, [left, right] }, _else) do
quote do
left = unquote(left)
right = unquote(right)
assert(Enum.find(right, &1 == left), "Expected #{inspect left} to be in #{inspect right}")
end
end
## Negative versions
defp translate_assertion({ op, _, [{ :=, _, [expected, received] }] }, _else) when negation?(op) do
quote do
try do
unquote(expected) = x = unquote(received)
flunk "Unexpected right side #{inspect x} match"
rescue
x in [MatchError] -> true
end
end
end
defp translate_assertion({ op, _, [{ :=~, _, [left, right] }] }, _else) when negation?(op) do
quote do
left = unquote(left)
right = unquote(right)
assert(!(left =~ right), "Expected #{inspect left} to not match #{inspect right}")
end
end
defp translate_assertion({ op, _, [{ :in, _, [left, right] }] }, _else) when negation?(op) do
quote do
left = unquote(left)
right = unquote(right)
assert(!Enum.find(right, &1 == left), "Expected #{inspect left} to not be in #{inspect right}")
end
end
## Fallback
defp translate_assertion(_expected, fallback) do
fallback.()
end
defp assert_operator(operator, expected, actual, text) do
quote do
left = unquote(expected)
right = unquote(actual)
assert unquote(operator).(left, right),
"Expected #{inspect left} to #{unquote(text)} #{inspect right}"
end
end
## END HELPERS
@doc """
Asserts the `expected` value is true.
If it fails, raises the given `message`.
## Examples
assert false, "it will never be true"
"""
def assert(expected, message) when is_binary(message) do
unless expected, do: flunk message
true
end
@doc """
Asserts a message was received and is in the current process mailbox.
The given `expected` content must to be a match pattern.
Timeout is set to 0, so there is no waiting time.
## Examples
self <- :hello
assert_received :hello
You can also match against specific patterns:
self <- { :hello, "world" }
assert_received { :hello, _ }
"""
defmacro assert_received(expected, message // nil) do
binary = Macro.to_binary(expected)
quote do
receive do
unquote(expected) = received -> received
after
0 -> flunk unquote(message) || "Expected to have received message matching #{unquote binary}"
end
end
end
@doc """
Asserts the `exception` is raised during `function` execution with
the `expected_message`. Returns the rescued exception, fails otherwise.
## Examples
assert_raise ArithmeticError, "bad argument in arithmetic expression", fn ->
1 + "test"
end
"""
def assert_raise(exception, expected_message, function) do
error = assert_raise(exception, function)
assert error.message == expected_message
error
end
@doc """
Asserts the `exception` is raised during `function` execution.
Returns the rescued exception, fails otherwise.
## Examples
assert_raise ArithmeticError, fn ->
1 + "test"
end
"""
def assert_raise(exception, function) do
try do
function.()
flunk "Expected #{inspect exception} exception but nothing was raised"
rescue
error in [exception] -> error
error ->
name = error.__record__(:name)
if name == ExUnit.AssertionError do
raise(error)
else
flunk "Expected exception #{inspect exception}, got #{inspect name} (#{error.message})"
end
end
end
@doc """
Asserts the `expected` and `received` are within `delta`.
## Examples
assert_in_delta 1.1, 1.5, 0.2
assert_in_delta 10, 15, 4
"""
def assert_in_delta(expected, received, delta, message // nil) do
diff = abs(expected - received)
message = message ||
"Expected |#{inspect expected} - #{inspect received}| (#{inspect diff}) to be < #{inspect delta}"
assert diff < delta, message
end
@doc """
Asserts the given `expression` will throw a value.
Returns the thrown value or fails otherwise.
## Examples
assert catch_throw(throw 1) == 1
"""
defmacro catch_throw(expression) do
do_catch(:throw, expression)
end
@doc """
Asserts the given `expression` will exit.
Returns the exit status/message or fails otherwise.
## Examples
assert catch_exit(exit 1) == 1
"""
defmacro catch_exit(expression) do
do_catch(:exit, expression)
end
@doc """
Asserts the given `expression` will cause an error.
Returns the error or fails otherwise.
## Examples
assert catch_error(error 1) == 1
"""
defmacro catch_error(expression) do
do_catch(:error, expression)
end
defp do_catch(kind, expr) do
quote do
try do
unquote(expr)
flunk "Expected to catch #{unquote(kind)}, got nothing"
rescue
ExUnit.AssertionError = e -> raise(e)
catch
unquote(kind), what_we_got -> what_we_got
end
end
end
@doc """
Asserts the `not_expected` value is nil or false.
In case it is a truthy value, raises the given message.
## Examples
refute true, "This will obviously fail"
"""
def refute(not_expected, message) do
not assert(!not_expected, message)
end
@doc """
Asserts a message was not received (i.e. it is not in the current process mailbox).
The `not_expected` contents must be a match pattern.
Timeout is set to 0, so there is no waiting time.
## Examples
self <- :hello
refute_received :bye
"""
defmacro refute_received(not_expected, message // nil) do
binary = Macro.to_binary(not_expected)
quote do
receive do
unquote(not_expected) = actual ->
flunk unquote(message) || "Expected to not have received message matching #{unquote binary}, got #{inspect actual}"
after
0 -> false
end
end
end
@doc """
Asserts the `expected` and `received` are not within `delta`.
## Examples
refute_in_delta 1.1, 1.2, 0.2
refute_in_delta 10, 11, 2
"""
def refute_in_delta(expected, received, delta, message // nil) do
diff = abs(expected - received)
message = message ||
"Expected |#{inspect expected} - #{inspect received}| (#{inspect diff}) to not be < #{inspect delta}"
refute diff < delta, message
end
@doc """
Fails with a message.
## Examples
flunk "This should raise an error"
"""
def flunk(message // "Epic Fail!") do
raise ExUnit.AssertionError, message: message
end
end
|
lib/ex_unit/lib/ex_unit/assertions.ex
| 0.914706 | 0.903166 |
assertions.ex
|
starcoder
|
defrecord Flect.Compiler.Syntax.Token, type: nil,
value: "",
location: nil do
@moduledoc """
Represents a token from a Flect source code document.
`type` is an atom describing the kind of token. `value` is a binary
containing the raw string value of the token. `location` is a
`Flect.Compiler.Syntax.Location` indicating where in the source code
the token originates.
`type` can be one of:
* `:line_comment`
* `:block_comment`
* `:plus`
* `:minus`
* `:minus_angle_close`
* `:star`
* `:slash`
* `:percent`
* `:ampersand`
* `:ampersand_ampersand`
* `:pipe`
* `:pipe_pipe`
* `:pipe_angle_close`
* `:caret`
* `:tilde`
* `:exclamation`
* `:exclamation_assign`
* `:exclamation_assign_assign`
* `:paren_open`
* `:paren_close`
* `:brace_open`
* `:brace_close`
* `:bracket_open`
* `:bracket_close`
* `:comma`
* `:period`
* `:period_period`
* `:at`
* `:colon`
* `:colon_colon`
* `:semicolon`
* `:assign`
* `:assign_assign`
* `:assign_assign_assign`
* `:angle_open`
* `:angle_open_assign`
* `:angle_open_pipe`
* `:angle_open_angle_open`
* `:angle_close`
* `:angle_close_assign`
* `:angle_close_angle_close`
* `:string`
* `:character`
* `:directive`
* `:identifier`
* `:mod`
* `:use`
* `:pub`
* `:priv`
* `:trait`
* `:impl`
* `:struct`
* `:union`
* `:enum`
* `:type`
* `:fn`
* `:ext`
* `:ref`
* `:glob`
* `:tls`
* `:mut`
* `:imm`
* `:let`
* `:as`
* `:if`
* `:else`
* `:cond`
* `:match`
* `:loop`
* `:while`
* `:for`
* `:break`
* `:goto`
* `:return`
* `:safe`
* `:unsafe`
* `:asm`
* `:true`
* `:false`
* `:null`
* `:new`
* `:assert`
* `:in`
* `:meta`
* `:test`
* `:macro`
* `:quote`
* `:unquote`
* `:yield`
* `:fixed`
* `:pragma`
* `:scope`
* `:tls`
* `:move`
* `:float`
* `:integer`
* `:f32`
* `:f64`
* `:i8`
* `:u8`
* `:i16`
* `:u16`
* `:i32`
* `:u32`
* `:i64`
* `:u64`
* `:i`
* `:u`
"""
record_type(type: atom(),
value: String.t(),
location: Flect.Compiler.Syntax.Location.t())
end
|
lib/compiler/syntax/token.ex
| 0.863722 | 0.938181 |
token.ex
|
starcoder
|
defmodule DeadLetter.Carrier do
@moduledoc """
Defines the behaviour that clients must implement
in order to properly dispatch dead letter messages
to the waiting message queue service.
"""
@doc """
Start a DeadLetter carrier and link to the current process.
"""
@callback start_link(term()) :: GenServer.on_start()
@doc """
Return a child specification for the DeadLetter carrier for
inclusion in an application supervision tree.
"""
@callback child_spec(term()) :: Supervisor.child_spec()
@doc """
Send the desired message to the message queue processing
dead letters for the system.
"""
@callback send(term()) :: :ok | {:error, term()}
end
defmodule DeadLetter.Carrier.Test do
@moduledoc """
Test implementation of the `DeadLetter.Carrier` behaviour.
Simply stores the message in an internal queue via the Erlang
`:queue` module with a configurable cap.
Ideal for testing or very small systems.
"""
use GenServer
@behaviour DeadLetter.Carrier
@default_size 2_000
@name :dead_letter_carrier
@doc """
Start the default carrier and link to the calling process.
"""
@impl DeadLetter.Carrier
def start_link(opts) do
GenServer.start_link(__MODULE__, opts, name: @name)
end
@doc """
Initialize the default driver and setup the queue.
"""
@impl GenServer
def init(opts) do
size = Keyword.get(opts, :size, @default_size)
{:ok, %{size: size, queue: :queue.new()}}
end
@doc """
Add processed message to the end queue.
"""
@impl DeadLetter.Carrier
def send(message) do
GenServer.cast(@name, {:send, message})
end
@doc """
Remove the first processed message from the front of the queue.
"""
def receive() do
{:ok, GenServer.call(@name, :receive)}
end
@impl GenServer
def handle_cast({:send, message}, state) do
new_queue = :queue.in(message, state.queue)
{:noreply, %{state | queue: ensure_queue_size(new_queue, state.size)}}
end
@impl GenServer
def handle_call(:receive, _from, state) do
{value, new_queue} =
case :queue.out(state.queue) do
{{:value, head}, queue} -> {head, queue}
{:empty, queue} -> {:empty, queue}
end
{:reply, value, %{state | queue: new_queue}}
end
defp ensure_queue_size(queue, size) do
case :queue.len(queue) > size do
true ->
{_value, new_queue} = :queue.out(queue)
new_queue
false ->
queue
end
end
end
|
apps/dead_letter/lib/dead_letter/carrier.ex
| 0.833731 | 0.462837 |
carrier.ex
|
starcoder
|
defmodule OAuth2TokenManager do
@moduledoc """
Manages OAuth2 tokens and OpenID Connect claims and ID tokens
## Options
- `:auto_introspect`: if set to `true`, access and refresh tokens are automatically inspected
when they are registered, so as to gather additional useful information about them. The
authorization server may not be configured to allow a client to inspect its own tokens.
Defaults to `true`
- `:min_introspect_interval`: the minimum time interval in seconds to introspect a token on
the authorization server. Defaults to `30`
- `:min_userinfo_refresh_interval`: the minimum time interval in seconds to request the
userinfo endpoint of the authorization server when requesting claims. Defaults to `30`
- `:oauth2_metadata_updater_opts`: options pased to `Oauth2MetadataUpdater`
- `:revoke_on_delete`: when set to `true`, the calls to
`OAuth2TokenManager.AccessToken.delete/4` and `OAuth2TokenManager.RefreshToken.delete/4`
automatically trigger token revocation on the authorization server. Defaults to `true`
- `:server_metadata`: additional server metadata that takes precedence over that which is
returned from the autorization server
- `:tesla_middlewares`: Tesla middlewares added to requests
- `:tesla_auth_middleware_opts`: options added to the Tesla authentication middleware
selected for client authentication. See also `TeslaOAuth2ClientAuth`
## Client configuration
Client configuration is passed as a parameter to some functions. It must contain at least:
- `"client_id"`: the client id of the client
- `"client_secret"` for use with the client secret basic authentication scheme. The client
authentication scheme is determined by the `"token_endpoint_auth_method"` and defaults to
`"client_secret_basic"` if not set. This is used on the following endpoints:
- `"token_endpoint"`
- `"introspection_endpoint"`
- `"revocation_endpoint"`
When not using the defaults, the client might also have the following configuration fields set:
- `"token_endpoint_auth_method"`
- `"userinfo_signed_response_alg"`
- `"userinfo_encrypted_response_alg"`
- `"userinfo_encrypted_response_enc"`
- `"jwks"`
- `"jwks_uri"`
## Environment options
- `OAuth2TokenManager.Store`: the token store implementation. Defaults to
`OAuth2TokenManager.Store.Local`
- `:tesla_middlewares`: allows adding Tesla middlewares for all request. Example:
config :oauth2_token_manager, :tesla_middlewares, [Tesla.Middleware.Logger]
## Examples
```elixir
iex> cc
%{"client_id" => "client1", "client_secret" => "clientpassword1"}
iex> OAuth2TokenManager.AccessToken.get("https://repentant-brief-fishingcat.gigalixirapp.com", "<KEY>", cc, nil)
{:ok, {"0mUB13mvdDkrsUECnMhK-EGKvL0", "bearer"}}
iex> OAuth2TokenManager.AccessToken.introspect("<KEY>", "https://repentant-brief-fishingcat.gigalixirapp.com", cc)
{:ok,
%{
"active" => true,
"client_id" => "client1",
"exp" => 1590345951,
"iat" => 1590345771,
"iss" => "https://repentant-brief-fishingcat.gigalixirapp.com",
"scope" => ["interbank_transfer", "openid", "read_account_information",
"read_balance"],
"sub" => "<KEY>"
}}
iex> OAuth2TokenManager.AccessToken.get("https://repentant-brief-fishingcat.gigalixirapp.com", "<KEY>", cc, ["read_balance", "read_account_information"])
{:ok, {"4kWo-<KEY>", "bearer"}}
iex> OAuth2TokenManager.AccessToken.introspect("<KEY>", "https://repentant-brief-fishingcat.gigalixirapp.com", cc)
{:ok,
%{
"active" => true,
"client_id" => "client1",
"exp" => 1590346428,
"iat" => 1590345828,
"iss" => "https://repentant-brief-fishingcat.gigalixirapp.com",
"scope" => ["read_account_information", "read_balance"],
"sub" => "cThpjg2-HzfS_7fvNkCYeEUBkCUpmKFSjzb6iebl5TU"
}}
iex> OAuth2TokenManager.Claims.get_claims("https://repentant-brief-fishingcat.gigalixirapp.com", "<KEY>", cc)
{:ok, %{"sub" => "cThpjg2-HzfS_7fvNkCYeEUBkCUpmKFSjzb6iebl5TU"}}
iex> OAuth2TokenManager.Claims.get_id_token("https://repentant-brief-fishingcat.gigalixirapp.com", "<KEY>")
{:ok,
"<KEY>"}
iex> OAuth2TokenManager.AccessToken.delete("<KEY>", "https://repentant-brief-fishingcat.gigalixirapp.com", cc)
:ok
```
"""
@type access_token :: String.t()
@type access_token_type :: String.t()
@typedoc """
User claims, usually those returned by the userinfo endpoint
"""
@type claims :: %{optional(String.t()) => any()}
@typedoc """
Client configuration as per RFC7591
Used fields include:
- `"client_id"` (mandatory)
- `"jwks"` and `"jwks_uri"` for ID token decryption
- `"token_endpoint_auth_method"` to determine which authentication method use to access the
token endpoint
"""
@type client_config :: %{optional(String.t()) => any()}
@type client_id :: String.t()
@type endpoint :: :token | :revocation | :introspection | :userinfo
@typedoc """
ID token in its JWE or JWS form
"""
@type id_token :: String.t()
@type issuer :: String.t()
@type opts() :: [opt()]
@type opt ::
{:auto_introspect, boolean()}
| {:min_introspect_interval, non_neg_integer()}
| {:min_userinfo_refresh_interval, non_neg_integer()}
| {:oauth2_metadata_updater_opts, Keyword.t()}
| {:revoke_on_delete, boolean()}
| {:server_metadata, server_metadata()}
| {:tesla_middlewares, Tesla.Client.middleware()}
| {:tesla_auth_middleware_opts, Keyword.t()}
@type refresh_token :: String.t()
@typedoc """
OAuth2 AS / OpenID Connect OP server metadata as per RFC 8414
When set, its values take precedence over the discovery document published on the AS / OP.
"""
@type server_metadata :: %{optional(String.t()) => any()}
@type scope :: String.t()
@type subject :: String.t()
@typedoc """
Token metadata
Known fields from [RFC7662](https://tools.ietf.org/html/rfc7662#section-2) are:
- `"active"`
- `"scope"`
- `"client_id"`
- `"username"`
- `"token_type"`
- `"exp"`
- `"iat"`
- `"nbf"`
- `"sub"`
- `"aud"`
- `"iss"`
- `"jti"`
"""
@type token_metadata :: %{optional(String.t()) => any()}
@typedoc """
The token type, for instance `"Bearer"`
"""
@type token_type :: String.t()
@default_opts [
auto_introspect: true,
min_userinfo_refresh_interval: 30,
min_introspect_interval: 30,
revoke_on_delete: true,
]
@doc """
Determines if a token is valid from a token's metadata
"""
@spec token_valid?(
token_metadata()
| {access_token(), token_type(), token_metadata(), non_neg_integer()}
| {refresh_token(), token_metadata, non_neg_integer()}
) :: boolean()
def token_valid?({_at, _token_type, token_metadata, _updated_at}) do
token_valid?(token_metadata)
end
def token_valid?({_rt, token_metadata, _updated_at}) do
token_valid?(token_metadata)
end
def token_valid?(%{"valid" => false}) do
false
end
def token_valid?(%{} = at_metadata) do
exp = at_metadata["exp"]
nbf = at_metadata["nbf"]
cond do
is_integer(exp) and exp < now() ->
false
is_integer(nbf) and nbf > now() ->
false
true ->
true
end
end
defp now, do: System.system_time(:second)
@doc false
def opts_set_default(opts), do: Keyword.merge(@default_opts, opts)
end
|
lib/oauth2_token_manager.ex
| 0.889745 | 0.712082 |
oauth2_token_manager.ex
|
starcoder
|
defmodule Omise.Charge do
@moduledoc ~S"""
Provides Charge API interfaces.
<https://www.omise.co/charges-api>
"""
use Omise.HTTPClient, endpoint: "charges"
defstruct object: "charge",
id: nil,
livemode: nil,
location: nil,
amount: nil,
currency: nil,
description: nil,
metadata: nil,
status: nil,
capture: nil,
authorized: nil,
reversed: nil,
captured: nil,
expired: nil,
capturable: nil,
disputable: nil,
refundable: nil,
reversible: nil,
paid: nil,
branch: nil,
device: nil,
transaction: nil,
source_of_fund: nil,
refunded: nil,
refunds: %Omise.List{data: [%Omise.Refund{}]},
return_uri: nil,
offsite: nil,
offline: nil,
installment_terms: nil,
reference: nil,
authorize_uri: nil,
failure_code: nil,
failure_message: nil,
card: %Omise.Card{},
customer: nil,
ip: nil,
fee: nil,
fee_vat: nil,
interest: nil,
interest_vat: nil,
net: nil,
platform_fee: nil,
funding_amount: nil,
funding_currency: nil,
refunded_amount: nil,
dispute: %Omise.Dispute{},
created: nil,
expired_at: nil,
expires_at: nil,
link: nil,
schedule: nil,
terminal: nil,
zero_interest_installments: nil,
source: %Omise.Source{}
@type t :: %__MODULE__{
object: String.t(),
id: String.t(),
livemode: boolean,
location: String.t(),
amount: String.t(),
currency: String.t(),
description: String.t(),
metadata: map,
status: String.t(),
capture: boolean,
authorized: boolean,
reversed: boolean,
captured: boolean,
expired: boolean,
capturable: boolean,
disputable: boolean,
refundable: boolean,
reversible: boolean,
paid: boolean,
branch: String.t(),
device: String.t(),
transaction: String.t(),
source_of_fund: String.t(),
refunded: String.t(),
refunds: Omise.List.t(),
return_uri: String.t(),
offsite: String.t(),
offline: map,
installment_terms: integer,
reference: String.t(),
authorize_uri: String.t(),
failure_code: String.t(),
failure_message: String.t(),
card: Omise.Card.t(),
customer: String.t(),
ip: String.t(),
fee: integer,
fee_vat: integer,
interest: integer,
interest_vat: integer,
net: integer,
platform_fee: integer,
funding_amount: integer,
funding_currency: String.t(),
refunded_amount: integer,
dispute: Omise.Dispute.t(),
created: String.t(),
expired_at: String.t(),
expires_at: String.t(),
link: String.t(),
schedule: String.t(),
terminal: String.t(),
zero_interest_installments: boolean,
source: Omise.Source.t()
}
@doc ~S"""
List all charges.
Returns `{:ok, charges}` if the request is successful, `{:error, error}` otherwise.
## Query Parameters:
* `offset` - (optional, default: 0) The offset of the first record returned.
* `limit` - (optional, default: 20, maximum: 100) The maximum amount of records returned.
* `from` - (optional, default: 1970-01-01T00:00:00Z, format: ISO 8601) The UTC date and time limiting the beginning of returned records.
* `to` - (optional, default: current UTC Datetime, format: ISO 8601) The UTC date and time limiting the end of returned records.
## Examples
Omise.Charge.list
Omise.Charge.list(limit: 10)
"""
@spec list(Keyword.t(), Keyword.t()) :: {:ok, Omise.List.t()} | {:error, Omise.Error.t()}
def list(params \\ [], opts \\ []) do
opts = Keyword.merge(opts, as: %Omise.List{data: [%__MODULE__{}]})
get(@endpoint, params, opts)
end
@doc ~S"""
Retrieve a charge.
## Examples
Omise.Charge.retrieve("chrg_test_4xso2s8ivdej29pqnhz")
"""
@spec retrieve(String.t(), Keyword.t()) :: {:ok, t} | {:error, Omise.Error.t()}
def retrieve(id, opts \\ []) do
opts = Keyword.merge(opts, as: %__MODULE__{})
get("#{@endpoint}/#{id}", [], opts)
end
@doc ~S"""
Create a charge.
Returns `{:ok, charge}` if the request is successful, `{:error, error}` otherwise.
## Request Parameters:
* `customer` - (required or optional) A valid `CUSTOMER_ID` that has at least one card already associated.
By default the default card of the customer will be used.
This parameter is required unless passing a `TOKEN_ID` in the card parameter.
* `card` - (required or optional) A valid unused `TOKEN_ID` or `CARD_ID`.
In the case of the `CARD_ID` the customer parameter must be present and be the owner of the card.
For the `TOKEN_ID`, the customer must not be passed.
* `amount` - (required) The amount in the smallest subunits of the currency used.
For thb (Thai Baht) you'll need to pass the amount in satangs.
* `currency` - (required) The currency in which you want the charge to be done. The default and only valid value is thb.
* `description` - (optional) A custom description for the charge. This value can be searched for in your dashboard.
* `capture` - (optional) Whether or not you want the charge to be captured right away, when not specified it is set to true.
* `return_uri` - (optional) The url where we will return the customer after the charge has been authorized with 3-D Secure.
## Examples
# Charge a card using a token.
Omise.Charge.create(
amount: 1000_00,
currency: "thb",
card: "tokn_test_51w6fvilnsxalda4cih"
)
# Charge a card using a customer.
Omise.Charge.create(
amount: 1000_00,
currency: "thb",
customer: "<KEY>"
)
# Charge a card using a customer and a card.
Omise.Charge.create(
amount: 1000_00,
currency: "thb",
customer: "<KEY>",
card: "card_test_51w6jblhhpzmc2g8bcm"
)
# Create an internet banking charge
Omise.Charge.create(
amount: 1000_00,
currency: "thb",
return_uri: "https://example.com/orders/123/complete",
source: "src_test_59vbms154ab4pe4jh2i"
)
# Create a bill payment charge
Omise.Charge.create(
amount: 1000_00,
currency: "thb",
source: "src_test_59vb8av645gxw48glui"
)
"""
@spec create(Keyword.t(), Keyword.t()) :: {:ok, t} | {:error, Omise.Error.t()}
def create(params, opts \\ []) do
opts = Keyword.merge(opts, as: %__MODULE__{})
post(@endpoint, params, opts)
end
@doc ~S"""
Update a charge.
Returns `{:ok, charge}` if the request is successful, `{:error, error}` otherwise.
## Request Parameters:
* `description` - (optional) A custom description for the charge. This value can be searched for in your dashboard.
## Examples
Omise.Charge.update("chrg_test_4xso2s8ivdej29pqnhz",
description: "The funny thing is that when I am okay, oh it makes me wish for rain")
"""
@spec update(String.t(), Keyword.t(), Keyword.t()) :: {:ok, t} | {:error, Omise.Error.t()}
def update(id, params, opts \\ []) do
opts = Keyword.merge(opts, as: %__MODULE__{})
put("#{@endpoint}/#{id}", params, opts)
end
@doc ~S"""
Capture a charge.
Returns `{:ok, charge}` if the request is successful, `{:error, error}` otherwise.
***NOTE***:
If you have created a charge and passed `capture=false` you'll have an authorized only charge that you can capture at a later time.
You can hold it for as long as permitted by the issuing bank. This delay may vary between cards from 1 to 30 days.
## Examples
Omise.Charge.capture("chrg_test_4xso2s8ivdej29pqnhz")
"""
@spec capture(String.t(), Keyword.t()) :: {:ok, t} | {:error, Omise.Error.t()}
def capture(id, opts \\ []) do
opts = Keyword.merge(opts, as: %__MODULE__{})
post("#{@endpoint}/#{id}/capture", [], opts)
end
@doc ~S"""
Reverse an uncaptured charge.
Returns `{:ok, charge}` if the request is successful, `{:error, error}` otherwise.
***NOTE***:
If you have created a charge and passed `capture=false`,
you'll have an authorized only charge that can be reversed, release hold money, at a later time.
## Examples
Omise.Charge.reverse("chrg_test_4xso2s8ivdej29pqnhz")
"""
@spec reverse(String.t(), Keyword.t()) :: {:ok, t} | {:error, Omise.Error.t()}
def reverse(id, opts \\ []) do
opts = Keyword.merge(opts, as: %__MODULE__{})
post("#{@endpoint}/#{id}/reverse", [], opts)
end
@doc ~S"""
Search all the charges.
Returns `{:ok, charges}` if the request is successful, `{:error, error}` otherwise.
## Query Parameters:
<https://www.omise.co/search-query-and-filters>
## Examples
Omise.Charge.search(filters: [paid: true])
Omise.Charge.search(query: "omise")
"""
@spec search(Keyword.t(), Keyword.t()) :: {:ok, Omise.Search.t()} | {:error, Omise.Error.t()}
def search(params \\ [], opts \\ []) do
Omise.Search.execute("charge", params, opts)
end
@doc ~S"""
Create a refund.
Returns `{:ok, refund}` if the request is successful, `{:error, error}` otherwise.
## Request Parameters:
* `amount` - The amount in the smallest subunits of the currency used.
So for thb (Thai Baht) you'll need to pass the amount in satangs.
## Examples
Omise.Charge.refund("chrg_test_520jim7x8u6t4si58va", amount: 100_00)
"""
@spec refund(String.t(), Keyword.t(), Keyword.t()) :: {:ok, Omise.Refund.t()} | {:error, Omise.Error.t()}
def refund(id, params, opts \\ []) do
opts = Keyword.merge(opts, as: %Omise.Refund{})
post("#{@endpoint}/#{id}/refunds", params, opts)
end
@doc ~S"""
List all refunds.
Returns `{:ok, refunds}` if the request is successful, `{:error, error}` otherwise.
## Query Parameters:
* `offset` - (optional, default: 0) The offset of the first record returned.
* `limit` - (optional, default: 20, maximum: 100) The maximum amount of records returned.
* `from` - (optional, default: 1970-01-01T00:00:00Z, format: ISO 8601) The UTC date and time limiting the beginning of returned records.
* `to` - (optional, default: current UTC Datetime, format: ISO 8601) The UTC date and time limiting the end of returned records.
## Examples
Omise.Charge.list_refunds("chrg_test_52oo08bwpgnwb95rye8")
"""
@spec list_refunds(String.t(), Keyword.t(), Keyword.t()) :: {:ok, Omise.List.t()} | {:error, Omise.Error.t()}
def list_refunds(id, params \\ [], opts \\ []) do
opts = Keyword.merge(opts, as: %Omise.List{data: [%Omise.Refund{}]})
get("#{@endpoint}/#{id}/refunds", params, opts)
end
@doc ~S"""
Retrieve a refund.
Returns `{:ok, refund}` if the request is successful, `{:error, error}` otherwise.
## Examples
Omise.Charge.retrieve_refund("chrg_test_520jim7x8u6t4si58va", "rfnd_test_4zgf1d7jcw5kr123puq")
"""
@spec retrieve_refund(String.t(), String.t(), Keyword.t()) :: {:ok, t} | {:error, Omise.Error.t()}
def retrieve_refund(id, refund_id, opts \\ []) do
opts = Keyword.merge(opts, as: %Omise.Refund{})
get("#{@endpoint}/#{id}/refunds/#{refund_id}", [], opts)
end
@doc ~S"""
List all charge schedules.
Returns `{:ok, schedules}` if the request is successful, `{:error, error}` otherwise.
## Query Parameters:
* `offset` - (optional, default: 0) The offset of the first record returned.
* `limit` - (optional, default: 20, maximum: 100) The maximum amount of records returned.
* `from` - (optional, default: 1970-01-01T00:00:00Z, format: ISO 8601) The UTC date and time limiting the beginning of returned records.
* `to` - (optional, default: current UTC Datetime, format: ISO 8601) The UTC date and time limiting the end of returned records.
## Examples
Omise.Charge.list_schedules
"""
@spec list_schedules(Keyword.t(), Keyword.t()) :: {:ok, Omise.List.t()} | {:error, Omise.Error.t()}
def list_schedules(params \\ [], opts \\ []) do
opts = Keyword.merge(opts, as: %Omise.List{data: [%Omise.Schedule{}]})
get("#{@endpoint}/schedules", params, opts)
end
end
|
lib/omise/charge.ex
| 0.917778 | 0.472562 |
charge.ex
|
starcoder
|
defmodule Andy.Rover.Actuation do
@moduledoc "Provides the configurations of all rover actuators to be activated"
require Logger
alias Andy.{ActuatorConfig, MotorSpec, LEDSpec, SoundSpec, Activation, Script}
import Andy.Utils
@default_wait 500
@doc "Give the configurations of all Rover actuators"
def actuator_configs() do
[
ActuatorConfig.new(
name: :locomotion,
type: :motor,
# to find and name motors from specs
specs: [
%MotorSpec{name: :left_wheel, port: "outA"},
%MotorSpec{name: :right_wheel, port: "outB"}
],
# scripted actions to be taken upon receiving intents
activations: [
%Activation{
intent: :go_forward,
script: going_forward()
},
%Activation{
intent: :go_backward,
script: going_backward()
},
%Activation{
intent: :turn_right,
script: turning_right()
},
%Activation{
intent: :turn_left,
script: turning_left()
},
%Activation{
intent: :turn,
script: turning()
},
%Activation{
intent: :stop,
script: stopping()
},
%Activation{
intent: :move,
script: moving()
},
%Activation{
intent: :panic,
script: panicking()
},
%Activation{
intent: :wait,
script: waiting()
}
]
),
ActuatorConfig.new(
name: :manipulation,
type: :motor,
specs: [
%MotorSpec{name: :mouth, port: "outC"}
],
activations: [
%Activation{
intent: :eat,
script: eating()
}
]
),
ActuatorConfig.new(
name: :leds,
type: :led,
specs: [
%LEDSpec{name: :lb, position: :left, color: :blue}
],
activations: [
%Activation{
intent: :blue_lights,
script: blue_lights()
}
]
),
ActuatorConfig.new(
name: :sounds,
type: :sound,
specs: [
%SoundSpec{
name: :loud_speech,
type: :speech,
props: %{
volume: :loud,
speed: :normal,
voice: get_voice()
}
}
],
activations: [
%Activation{
intent: :say,
script: say()
}
]
)
]
end
# locomotion
defp going_forward() do
fn intent, motors ->
rps_speed = speed(intent.value.speed)
how_long = round(intent.value.time * 1000)
Script.new(:going_forward, motors)
|> Script.add_step(:right_wheel, :set_speed, [:rps, rps_speed * -1])
|> Script.add_step(:left_wheel, :set_speed, [:rps, rps_speed * -1])
|> Script.add_step(:all, :run_for, [how_long])
end
end
defp speed(kind) do
case kind do
:very_fast -> very_fast_rps()
:fast -> fast_rps()
:normal -> normal_rps()
:slow -> slow_rps()
:very_slow -> very_slow_rps()
:zero -> 0
end
end
defp going_backward() do
fn intent, motors ->
rps_speed = speed(intent.value.speed)
how_long = round(intent.value.time * 1000)
Script.new(:going_backward, motors)
|> Script.add_step(:right_wheel, :set_speed, [:rps, rps_speed])
|> Script.add_step(:left_wheel, :set_speed, [:rps, rps_speed])
|> Script.add_step(:all, :run_for, [how_long])
end
end
defp turning_right() do
fn intent, motors ->
how_long = round(intent.value * 1000)
Script.new(:turning_right, motors)
|> Script.add_step(:left_wheel, :set_speed, [:rps, -1 * speed(:normal)])
|> Script.add_step(:right_wheel, :set_speed, [:rps, speed(:normal)])
|> Script.add_step(:all, :run_for, [how_long])
end
end
defp turning_left() do
fn intent, motors ->
how_long = round(intent.value * 1000)
Script.new(:turning_left, motors)
|> Script.add_step(:right_wheel, :set_speed, [:rps, -1 * speed(:normal)])
|> Script.add_step(:left_wheel, :set_speed, [:rps, speed(:normal)])
|> Script.add_step(:all, :run_for, [how_long])
end
end
defp turning() do
fn intent, motors ->
Logger.info("Turning with intent #{inspect(intent)}")
how_long = round(intent.value.turn_time * 1000)
direction = intent.value.turn_direction
toggle =
case direction do
:right -> 1
:left -> -1
end
Script.new(:turning, motors)
|> Script.add_step(:left_wheel, :set_speed, [:rps, -1 * speed(:normal) * toggle])
|> Script.add_step(:right_wheel, :set_speed, [:rps, speed(:normal) * toggle])
|> Script.add_step(:all, :run_for, [how_long])
end
end
defp moving() do
fn intent, motors ->
forward_rps_speed = speed(intent.value.forward_speed)
forward_time_ms = round(intent.value.forward_time * 1000)
turn_direction = intent.value.turn_direction
turn_time_ms = round(intent.value.turn_time * 1000)
script = Script.new(:moving, motors)
script =
case turn_direction do
:right ->
script
|> Script.add_step(:left_wheel, :set_speed, [:rps, -1 * speed(:normal)])
|> Script.add_step(:right_wheel, :set_speed, [:rps, speed(:normal)])
:left ->
script
|> Script.add_step(:left_wheel, :set_speed, [:rps, speed(:normal)])
|> Script.add_step(:right_wheel, :set_speed, [:rps, -1 * speed(:normal)])
end
script
|> Script.add_step(:all, :run_for, [turn_time_ms])
|> Script.add_wait(@default_wait)
|> Script.add_step(:right_wheel, :set_speed, [:rps, forward_rps_speed * -1])
|> Script.add_step(:left_wheel, :set_speed, [:rps, forward_rps_speed * -1])
|> Script.add_step(:all, :run_for, [forward_time_ms])
end
end
defp panicking() do
fn intent, motors ->
script = Script.new(:panicking, motors)
back_off_speed = intent.value.back_off_speed
back_off_time = intent.value.back_off_time
turn_time = intent.value.turn_time
repeats = intent.value.repeats
backward_rps_speed = speed(back_off_speed)
backward_time_ms = round(1000 * back_off_time)
turn_time_ms = round(1000 * turn_time)
Enum.reduce(
1..repeats,
script,
fn _n, acc ->
turn_direction = Enum.random([:right, :left, :none])
case turn_direction do
:right ->
acc
|> Script.add_step(:left_wheel, :set_speed, [:rps, -1 * speed(:normal)])
|> Script.add_step(:right_wheel, :set_speed, [:rps, speed(:normal)])
:left ->
acc
|> Script.add_step(:right_wheel, :set_speed, [:rps, -1 * speed(:normal)])
|> Script.add_step(:left_wheel, :set_speed, [:rps, speed(:normal)])
:none ->
acc
|> Script.add_step(:right_wheel, :set_speed, [:rps, 0])
|> Script.add_step(:left_wheel, :set_speed, [:rps, 0])
end
acc
|> Script.add_step(:all, :run_for, [turn_time_ms])
|> Script.add_wait(@default_wait)
|> Script.add_step(:right_wheel, :set_speed, [:rps, backward_rps_speed])
|> Script.add_step(:left_wheel, :set_speed, [:rps, backward_rps_speed])
|> Script.add_step(:all, :run_for, [backward_time_ms])
|> Script.add_wait(@default_wait)
end
)
end
end
defp stopping() do
fn _intent, motors ->
Script.new(:stopping, motors)
|> Script.add_step(:all, :coast)
|> Script.add_step(:all, :reset)
end
end
def waiting() do
fn intent, motors ->
sleep = (intent.value.time * 1_000) |> round()
Script.new(:waiting, motors)
|> Script.add_wait(sleep)
end
end
# manipulation
defp eating() do
fn _intent, motors ->
Script.new(:eating, motors)
|> Script.add_step(:mouth, :set_speed, [:rps, 1])
|> Script.add_step(:mouth, :run_for, [2000])
end
end
# light
defp blue_lights() do
fn intent, leds ->
value =
case intent.value do
:on -> 255
:off -> 0
end
Script.new(:blue_lights, leds)
|> Script.add_step(:lb, :set_brightness, [value])
end
end
# Sounds
defp say() do
fn intent, sound_players ->
Script.new(:say, sound_players)
|> Script.add_step(:loud_speech, :speak, [intent.value])
end
end
end
|
lib/andy/platforms/rover/actuation.ex
| 0.782746 | 0.476762 |
actuation.ex
|
starcoder
|
defmodule Bigtable.ReadRows do
@moduledoc """
Provides functions to build `Google.Bigtable.V2.ReadRowsRequest` and submit them to Bigtable.
"""
alias Bigtable.{ChunkReader, Utils}
alias Google.Bigtable.V2
alias V2.Bigtable.Stub
@doc """
Builds a `Google.Bigtable.V2.ReadRowsRequest` with a provided table name.
## Examples
iex> table_name = "projects/[project_id]/instances/[instnace_id]/tables/[table_name]"
iex> Bigtable.ReadRows.build(table_name)
%Google.Bigtable.V2.ReadRowsRequest{
app_profile_id: "",
filter: nil,
rows: nil,
rows_limit: 0,
table_name: "projects/[project_id]/instances/[instnace_id]/tables/[table_name]"
}
"""
@spec build(binary()) :: V2.ReadRowsRequest.t()
def build(table_name) when is_binary(table_name) do
V2.ReadRowsRequest.new(table_name: table_name, app_profile_id: "")
end
@doc """
Builds a `Google.Bigtable.V2.ReadRowsRequest` with the configured table name.
## Examples
iex> Bigtable.ReadRows.build()
%Google.Bigtable.V2.ReadRowsRequest{
app_profile_id: "",
filter: nil,
rows: nil,
rows_limit: 0,
table_name: "projects/dev/instances/dev/tables/test"
}
"""
@spec build() :: V2.ReadRowsRequest.t()
def build do
build(Bigtable.Utils.configured_table_name())
end
@doc """
Submits a `Google.Bigtable.V2.ReadRowsRequest` to Bigtable.
Can be called with either a `Google.Bigtable.V2.ReadRowsRequest` or a table name to read all rows from a non-configured table.
Returns a list of `{:ok, %Google.Bigtable.V2.ReadRowsResponse{}}`.
"""
@spec read(V2.ReadRowsRequest.t()) ::
{:error, any()}
| {:ok, ChunkReader.chunk_reader_result()}
def read(%V2.ReadRowsRequest{} = request) do
result =
request
|> Utils.process_request(&Stub.read_rows/3, stream: true)
case result do
{:error, _} ->
result
{:ok, response} ->
process_response(response)
end
end
@spec read(binary()) ::
{:error, any()}
| {:ok, ChunkReader.chunk_reader_result()}
def read(table_name) when is_binary(table_name) do
request = build(table_name)
request
|> read()
end
@doc """
Submits a `Google.Bigtable.V2.ReadRowsRequest` to Bigtable.
Without arguments, `Bigtable.ReadRows.read` will read all rows from the configured table.
Returns a list of `{:ok, %Google.Bigtable.V2.ReadRowsResponse{}}`.
"""
@spec read() ::
{:error, GRPC.RPCError.t()}
| [ok: V2.ReadRowsResponse.t()]
def read do
request = build()
request
|> read
end
defp process_response(response) do
{:ok, cr} = ChunkReader.open()
response
|> Enum.filter(&contains_chunks?/1)
|> Enum.flat_map(fn {:ok, resp} -> resp.chunks end)
|> Enum.reduce({:ok, %{}}, fn chunk, accum ->
if match?({:error, _}, accum) do
accum
else
ChunkReader.process(cr, chunk)
end
end)
ChunkReader.close(cr)
end
defp contains_chunks?({:ok, response}), do: !Enum.empty?(response.chunks)
end
|
lib/data/read_rows.ex
| 0.859325 | 0.487185 |
read_rows.ex
|
starcoder
|
defmodule RigMetrics.EventsMetrics do
@moduledoc """
Metrics instrumenter for the Rig Events (consume, produce).
"""
use Prometheus.Metric
# Consumer
@counter name: :rig_consumed_events_total,
help: "Total count of consumed events.",
labels: [:source, :topic]
@counter name: :rig_consumed_events_forwarded_total,
help:
"Total count of consumed events forwarded to any frontend channel (Websocket, Server-Sent Events, Long Polling).",
labels: [:type]
@counter name: :rig_consumed_events_failed_total,
help: "Total count of events failed to be consumed.",
labels: [:source, :topic]
@histogram name: :rig_consumed_event_processing_duration_milliseconds,
labels: [:source, :topic],
buckets: [1, 2, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000],
help: "Event consumer processing execution time in milliseconds."
# Producer
@counter name: :rig_produced_events_failed_total,
help: "Total count of events failed to be produced.",
labels: [:target, :topic]
@counter name: :rig_produced_events_total,
help: "Total count of produced events.",
labels: [:target, :topic]
# To be called at app startup.
def setup do
events = [
[:events, :count_forwarded],
[:events, :count_consumed],
[:events, :count_consume_failed],
[:events, :histogram_consumer_time],
[:events, :count_produced],
[:events, :count_produce_failed]
]
# Attach defined events to a telemetry callback
:telemetry.attach_many("events-metrics", events, &__MODULE__.handle_event/4, nil)
end
# ---
@doc "Increases the Prometheus counter rig_consumed_events_forwarded_total"
def count_forwarded_event(type) do
:telemetry.execute(
[:events, :count_forwarded],
%{},
%{type: type}
)
Counter.inc(name: :rig_consumed_events_forwarded_total, labels: [type])
end
@doc "Increases the Prometheus counters rig_consumed_events_total, rig_consumed_events_failed_total"
def count_failed_event(source, topic) do
:telemetry.execute(
[:events, :count_consumed],
%{},
%{source: source, topic: topic}
)
:telemetry.execute(
[:events, :count_consume_failed],
%{},
%{source: source, topic: topic}
)
Counter.inc(name: :rig_consumed_events_total, labels: [source, topic])
Counter.inc(name: :rig_consumed_events_failed_total, labels: [source, topic])
end
@doc "Increases the Prometheus counters rig_consumed_events_total and observes histogram rig_consumed_event_processing_duration_milliseconds"
def measure_event_processing(source, topic, time) do
:telemetry.execute(
[:events, :count_consumed],
%{},
%{source: source, topic: topic}
)
:telemetry.execute(
[:events, :histogram_consumer_time],
%{},
%{source: source, topic: topic, time: time}
)
Counter.inc(name: :rig_consumed_events_total, labels: [source, topic])
Histogram.observe(
[name: :rig_consumed_event_processing_duration_milliseconds, labels: [source, topic]],
time
)
end
@doc "Increases the Prometheus counter rig_produced_events_total"
def count_produced_event(target, topic) do
:telemetry.execute(
[:events, :count_produced],
%{},
%{target: target, topic: topic}
)
Counter.inc(name: :rig_produced_events_total, labels: [target, topic])
end
@doc "Increases the Prometheus counter rig_produced_events_failed_total"
def count_failed_produce_event(target, topic) do
:telemetry.execute(
[:events, :count_produce_failed],
%{},
%{target: target, topic: topic}
)
Counter.inc(name: :rig_produced_events_failed_total, labels: [target, topic])
end
end
|
lib/rig_metrics/events_metrics.ex
| 0.736685 | 0.5592 |
events_metrics.ex
|
starcoder
|
defmodule GiantSquid do
def read_input(filename) do
lines = File.stream!(filename) |> Enum.map(&String.trim_trailing/1)
[move_line, _blank | lines] = lines
moves =
move_line
|> String.split(",")
|> Enum.map(&String.to_integer/1)
{moves, read_boards(lines)}
end
defp read_boards(lines) do
# 6 is the step to skip empty lines between boards
Enum.chunk_every(lines, 5, 6)
|> Enum.map(&read_board/1)
end
# Given a list of
defp read_board(lines) do
lines
|> Enum.with_index()
|> Enum.reduce(%{}, fn {line, row}, acc ->
new_row =
String.split(line)
|> Enum.with_index()
|> Enum.reduce(%{}, fn {number, column}, acc ->
# A number cannot be be repeated within the same board
Map.put(acc, String.to_integer(number), {{column, row}, false})
end)
Map.merge(acc, new_row)
end)
end
def mark(board, number) do
if Map.has_key?(board, number) do
Map.update!(board, number, fn {coord, _marked} -> {coord, true} end)
else
board
end
end
defp marked_coordinates(board) do
board
|> Enum.map(fn
{_number, {coord, true}} -> coord
{_number, {_coord, false}} -> nil
end)
|> Enum.filter(fn
nil -> false
{_,_} -> true
end)
end
def unmarked_numbers(board) do
board
|> Enum.map(fn
{_number, {_coord, true}} -> nil
{number, {_coord, false}} -> number
end)
|> Enum.filter(fn
nil -> false
_ -> true
end)
end
def winner?(board) do
marked = MapSet.new(marked_coordinates(board))
paths()
|> Enum.any?(fn path ->
MapSet.subset?(MapSet.new(path), MapSet.new(marked))
end)
end
def find_first_winner({moves, boards}) do
find_winner(moves, boards, :first, nil, nil)
end
defp find_winner([], _boards, :last, winner, last_called) do
{winner, last_called}
end
defp find_winner(_moves, _boards, :first, winner, last_called)
when winner != nil do
{winner, last_called}
end
defp find_winner([move | rest], boards, first_last, winner, last_called) do
boards = Enum.map(boards, fn board -> mark(board, move) end)
{new_winner, new_boards} = pop_winners(boards)
{new_winner, new_last_called} =
case new_winner do
nil -> {winner, last_called}
_ -> {new_winner, move}
end
find_winner(rest, new_boards, first_last, new_winner, new_last_called)
end
defp pop_winners(boards) do
pop_winners(boards, nil)
end
defp pop_winners(boards, winner) do
winner_index = Enum.find_index(boards, fn board -> winner?(board) end)
case winner_index do
nil ->
{winner, boards} # No more winners, end recursion
winner_index ->
{new_winner, new_boards} = List.pop_at(boards, winner_index)
pop_winners(new_boards, new_winner)
end
end
def final_score({board, last_called}) do
unmarked_sum =
board
|> unmarked_numbers()
|> Enum.sum()
unmarked_sum * last_called
end
defp paths() do
vertical = for row <- 0..4, do: (for col <- 0..4, do: {row, col})
horizontal = for col <- 0..4, do: (for row <- 0..4, do: {row, col})
vertical ++ horizontal
end
def find_last_winner({moves, boards}) do
find_winner(moves, boards, :last, nil, nil)
end
end
# Part 1
GiantSquid.read_input("test_input")
|> GiantSquid.find_first_winner()
|> GiantSquid.final_score()
|> IO.inspect
GiantSquid.read_input("input")
|> GiantSquid.find_first_winner()
|> GiantSquid.final_score()
|> IO.inspect
# Part 2
GiantSquid.read_input("test_input")
|> GiantSquid.find_last_winner()
|> GiantSquid.final_score()
|> IO.inspect
GiantSquid.read_input("input")
|> GiantSquid.find_last_winner()
|> IO.inspect
|> GiantSquid.final_score()
|> IO.inspect
|
day_4/giant_squid.ex
| 0.766031 | 0.405625 |
giant_squid.ex
|
starcoder
|
defmodule Simplepq do
alias Simplepq.Queue
@moduledoc """
Simple queue that's store on the disc.
Warning! At this moment all functions don't handle case when the file don't exist. Errors can be raised.
"""
@doc """
Creates file on the filesystem and create Simplepq.Queue associated with this file.
Returns `{:ok, queue}` in case of success, else {:error, :file.posix()}.
"""
@spec create(String.t) :: {:ok, queue::Simplepq.Queue} | {:error, :file.posix()}
def create(file_path) when is_bitstring(file_path) do
new_equeue = %Queue{file_path: file_path, equeue: {}}
update_queue(new_equeue, :queue.new())
end
@doc """
Opens queue from file `file_path`.
Returns `{:ok, queue}` in case of success, or `{:error, :bad_file}` when file
can not be converted to term or term is not .
"""
def open(file_path) do
binary_queue = File.read!(file_path)
try do
# Can raised ArgumentError
term_from_file = :erlang.binary_to_term(binary_queue)
if :queue.is_queue(term_from_file) do
{:ok, %Queue{file_path: file_path, equeue: term_from_file}}
else
{:error, :bad_file}
end
rescue
_ in ArgumentError -> {:error, :bad_file}
end
end
@doc """
Adds elemet to the end of queue.
Returns `{:ok, queue}` if element added, else {:error, :file.posix()}.
"""
@spec add(Simplepq.Queue, String.t) :: {:ok, queue::Simplepq.Queue} | {:error, :file.posix()}
def add(%Simplepq.Queue{equeue: equeue} = queue, message) do
equeue = :queue.in(message, equeue)
update_queue(queue, equeue)
end
@doc """
Reads first element from queue without removing.
Returns `{:ok, message}` if element exists, or `{:error, :empty} when queue is empty`.
"""
@spec get(Simplepq.Queue) :: {:ok, message::String.t} | {:error, :empty}
def get(%Queue{equeue: equeue}) do
if :queue.is_empty(equeue) do
{:error, :empty}
else
{:ok, :queue.get(equeue)}
end
end
@doc """
Rejects first elemet from the the queue. This method don't return element and don't have
special return for the case when queue is empty. It's just return
this queue.
Returns `{:ok, queue}` if element successfully rejected or queue is empty.
Else {:error, reason} when caused problem on writing the file
"""
@spec ack(Simplepq.Queue) :: {:ok, queue::Simplepq.Queue} | {:error, :file.posix()}
def ack(%Queue{equeue: equeue} = queue) do
{result , equeue} = :queue.out(equeue)
case result do
{:value, _} -> update_queue(queue, equeue)
_ -> {:ok, queue}
end
end
@doc """
Take first element from the queue and place it in to the end of the queue.
Returns `{:ok, queue}` if element successfully moved or queue is empty.
Else {:error, reason} when caused problem on writing the file
"""
@spec reject(Simplepq.Queue) :: {:ok, queue::Simplepq.Queue} | {:error, :file.posix()}
def reject(%Queue{equeue: equeue} = queue) do
{result , equeue} = :queue.out(equeue)
case result do
{:value, value} -> update_queue(queue, :queue.in(value, equeue))
_ -> {:ok, queue}
end
end
@doc """
Return count elements in the queue.
"""
@spec length(Simplepq.Queue) :: number
def length(%Queue{equeue: equeue}) do
:queue.len(equeue)
end
@spec update_queue(Simplepq.Queue, tuple) :: {:ok, queue::Simplepq.Queue} | {:error, :file.posix()}
defp update_queue(%Queue{file_path: file_path} = queue, equeue) do
case File.write(file_path, :erlang.term_to_binary(equeue)) do
:ok -> {:ok, %{queue | equeue: equeue}}
{:error, reason} -> {:error, reason}
end
end
end
|
lib/simplepq.ex
| 0.832066 | 0.436082 |
simplepq.ex
|
starcoder
|
defmodule Mix.Tasks.Hex.Organization do
use Mix.Task
@shortdoc "Manages Hex.pm organizations"
@moduledoc """
Manages the list of authorized Hex.pm organizations.
Organizations is a feature of Hex.pm to host and manage private packages. See
<https://hex.pm/docs/private> for more information.
By default you will be authorized to all your applications when running
`mix hex.user auth` and this is the recommended approach. This task is mainly
provided for a CI and build systems where access to an organization is needed
without authorizing a user.
By authorizing a new organization a new key is created for fetching packages
from the organizations repository and the repository key is stored on the
local machine.
To use a package from an organization add `organization: "my_organization"` to the
dependency declaration in `mix.exs`:
{:plug, "~> 1.0", organization: "my_organization"}
## Authorize an organization
This command will generate an API key used to authenticate access to the organization.
See the `hex.user` tasks to list and control all your active API keys.
mix hex.organization auth ORGANIZATION [--key KEY] [--key-name KEY_NAME]
## Deauthorize and remove an organization
mix hex.organization deauth NAME
## Generate a repository authentication key
This command is useful to pre-generate keys for use with `mix hex.organization auth ORGANIZATION --key KEY`
on CI servers or similar systems. It returns the hash of the generated key that you can pass to
`auth ORGANIZATION --key KEY`. This key allows read-only access to the repository
mix hex.organization key ORGANIZATION [--key-name KEY_NAME]
## List all authorized organizations
This command will only list organizations you have authorized with this task, it will not
list organizations you have access to by having authorized with `mix hex.user auth`.
mix hex.organization list
## Command line options
* `--key KEY` - Hash of key used to authenticate HTTP requests to repository, if
omitted will generate a new key with your account credentials. This flag
is useful if you have a key pre-generated with `mix hex.organization key`
and want to authenticate on a CI server or similar system
* `--key-name KEY_NAME` - By default Hex will base the key name on your machine's
hostname and the organization name, use this option to give your own name.
"""
@switches [key: :string, key_name: :string]
def run(args) do
Hex.start()
{opts, args} = Hex.OptionParser.parse!(args, switches: @switches)
case args do
["auth", name] ->
auth(name, opts)
["deauth", name] ->
deauth(name)
["key", name] ->
key(name, opts)
["list"] ->
list()
_ ->
Mix.raise("""
Invalid arguments, expected one of:
mix hex.organization auth ORGANIZATION
mix hex.organization deauth ORGANIZATION
mix hex.organization list
""")
end
end
defp auth(name, opts) do
key = opts[:key]
if opts[:key], do: test_key(key, name)
key = key || Mix.Tasks.Hex.generate_organization_key(name, opts[:key_name])
Mix.Tasks.Hex.auth_organization("hexpm:#{name}", key)
end
defp deauth(name) do
Hex.State.fetch!(:repos)
|> Map.delete("hexpm:#{name}")
|> Hex.Config.update_repos()
end
defp key(name, opts) do
Hex.Shell.info(Mix.Tasks.Hex.generate_organization_key(name, opts[:key_name]))
end
defp list() do
Enum.each(Hex.State.fetch!(:repos), fn {name, _repo} ->
case String.split(name, ":", parts: 2) do
["hexpm", name] ->
Hex.Shell.info(name)
_ ->
:ok
end
end)
end
defp test_key(key, name) do
case Hex.API.Auth.get("repository", name, key: key) do
{:ok, {code, _body, _}} when code in 200..299 ->
:ok
other ->
Hex.Utils.print_error_result(other)
Mix.raise("Failed to authenticate against organization repository with given key")
end
end
end
|
lib/mix/tasks/hex.organization.ex
| 0.828072 | 0.478712 |
hex.organization.ex
|
starcoder
|
defmodule Commanded.EventStore.Adapters.EventStore do
@moduledoc """
[EventStore](https://github.com/commanded/eventstore) adapter for
[Commanded](https://github.com/commanded/commanded).
"""
alias Commanded.EventStore.Adapters.EventStore.Mapper
@behaviour Commanded.EventStore
@all_stream "$all"
@impl Commanded.EventStore
def child_spec, do: []
@impl Commanded.EventStore
def append_to_stream(stream_uuid, expected_version, events) do
EventStore.append_to_stream(
stream_uuid,
expected_version,
Enum.map(events, &Mapper.to_event_data/1)
)
end
@impl Commanded.EventStore
def stream_forward(stream_uuid, start_version \\ 0, read_batch_size \\ 1_000) do
case EventStore.stream_forward(stream_uuid, start_version, read_batch_size) do
{:error, error} -> {:error, error}
stream -> Stream.map(stream, &Mapper.from_recorded_event/1)
end
end
@impl Commanded.EventStore
def subscribe(:all), do: subscribe(@all_stream)
@impl Commanded.EventStore
def subscribe(stream_uuid) do
EventStore.subscribe(stream_uuid, mapper: &Mapper.from_recorded_event/1)
end
@impl Commanded.EventStore
def subscribe_to(:all, subscription_name, subscriber, start_from) do
EventStore.subscribe_to_all_streams(
subscription_name,
subscriber,
subscription_options(start_from)
)
end
@impl Commanded.EventStore
def subscribe_to(stream_uuid, subscription_name, subscriber, start_from) do
EventStore.subscribe_to_stream(
stream_uuid,
subscription_name,
subscriber,
subscription_options(start_from)
)
end
@impl Commanded.EventStore
def ack_event(subscription, %Commanded.EventStore.RecordedEvent{} = event) do
%Commanded.EventStore.RecordedEvent{event_number: event_number} = event
EventStore.ack(subscription, event_number)
end
@impl Commanded.EventStore
def unsubscribe(subscription) do
EventStore.Subscriptions.Subscription.unsubscribe(subscription)
end
@impl Commanded.EventStore
def delete_subscription(:all, subscription_name) do
EventStore.delete_subscription(@all_stream, subscription_name)
end
@impl Commanded.EventStore
def delete_subscription(stream_uuid, subscription_name) do
EventStore.delete_subscription(stream_uuid, subscription_name)
end
@impl Commanded.EventStore
def read_snapshot(source_uuid) do
with {:ok, snapshot_data} <- EventStore.read_snapshot(source_uuid) do
{:ok, Mapper.from_snapshot_data(snapshot_data)}
end
end
@impl Commanded.EventStore
def record_snapshot(%Commanded.EventStore.SnapshotData{} = snapshot) do
snapshot
|> Mapper.to_snapshot_data()
|> EventStore.record_snapshot()
end
@impl Commanded.EventStore
def delete_snapshot(source_uuid) do
EventStore.delete_snapshot(source_uuid)
end
defp subscription_options(start_from) do
[
start_from: start_from,
mapper: &Mapper.from_recorded_event/1
]
end
end
|
lib/event_store_adapter.ex
| 0.778186 | 0.429968 |
event_store_adapter.ex
|
starcoder
|
defmodule Patch.Mock.Value do
@moduledoc """
Interface for generating mock values.
In a test this module is imported into the test and so using this module directly is not
necessary.
"""
alias Patch.Apply
alias Patch.Mock.Values
@type t ::
Values.Callable.t()
| Values.CallableStack.t()
| Values.Cycle.t()
| Values.Raises.t()
| Values.Scalar.t()
| Values.Sequence.t()
| Values.Throws.t()
| term()
@value_modules [
Values.Callable,
Values.CallableStack,
Values.Cycle,
Values.Raises,
Values.Scalar,
Values.Sequence,
Values.Throws
]
@doc """
Create a new `Values.Callable` to be used as the mock value.
When a patched function has a `Values.Callable` as its mock value, it will invoke the callable
with the arguments to the patched function on every invocation to generate a new value to
return.
```elixir
patch(Example, :example, callable(fn arg -> {:patched, arg} end))
assert Example.example(1) == {:patched, 1} # passes
assert Example.example(2) == {:patched, 2} # passes
assert Example.example(3) == {:patched, 3} # passes
```
Any function literal will automatically be promoted into a `Values.Callable` unless it is
wrapped in a `scalar/1` call.
See `callable/2` for more configuration options. `callable/1` calls use the default
configuration options
- dispatch: :apply
- evaluate: :passthrough
"""
@spec callable(target :: function()) :: Values.Callable.t()
def callable(target) do
callable(target, [])
end
@doc """
`callable/2` allows the test author to provide additional configuration.
There are two options
## `:dispatch`
Controls how the arguments are dispatched to the callable.
- `:apply` is the default. It will call the function with the same arity as the incoming call.
- `:list` will always call the callable with a single argument, a list of all the incoming
arguments.
### Apply Example
```elixir
patch(Example, :example, callable(fn a, b, c -> {:patched, a, b, c} end), :apply)
assert Example.example(1, 2, 3) == {:patched, 1, 2, 3} # passes
```
### List Example
```elixir
patch(Example, :example, callable(fn
[a, b, c] ->
{:patched, a, b, c}
[a] ->
{:patched, a}
end, :list))
assert Example.example(1, 2, 3) == {:patched, 1, 2, 3} # passes
assert Example.example(1) == {:patched, 1} # passes
```
## `:evaluate`
Controls how the callable is evaluated.
- `:passthrough` is the default. It will passthrough to the original function if the provided
callable fails to pattern match to the incoming call
- `:strict` will bubble up any `BadArityError` or `FunctionClauseErrors`.
## Legacy Configuration Behavior (may be deprecated)
This function accepts either a single atom, in which case it will assign that to the `:dispatch`
configuration and use the default `:evaluate` option.
The following calls are equivalent
```elixir
# Using legacy configuration convention
patch(Example, :example, callable(fn args -> {:patched, args}, :apply))
# Using explicit options without evaluate
patch(Example, :example, callable(fn args -> {:patched, args}, dispatch: :apply))
# Using fully specified explicit options
patch(Example, :example, callable(fn args -> {:patched, args}, dispatch: :apply, evaluate: :passthrough))
```
## Multiple Arities
`dispatch: :list` used to be the preferred way to deal with multiple arities, here's an example.
```elixir
patch(Example, :example, callable(fn
[a] ->
{:patched, a}
[a, b, c] ->
{:patched, a, b, c}
end, dispatch: :list))
assert Example.example(1) == {:patched, 1}
assert Example.example(1, 2, 3) == {:patched, 1, 2, 3}
```
Patch now has "Stacked Callables" so the preferred method is to use the equivalent code
```elixir
patch(Example, :example, fn a -> {:patched, a} end)
patch(Example, :example, fn a, b, c -> {:patched, a, b, c} end)
assert Example.example(1) == {:patched, 1}
assert Example.example(1, 2, 3) == {:patched, 1, 2, 3}
```
"""
@spec callable(target :: function(), dispatch :: Values.Callable.dispatch_mode()) :: Values.Callable.t()
def callable(target, dispatch) when is_atom(dispatch) do
callable(target, dispatch: dispatch)
end
@spec callable(target :: function(), options :: [Values.Callable.option()]) :: Values.Callable.t()
def callable(target, options) when is_list(options) do
Values.Callable.new(target, options)
end
@doc """
Create a new `Values.Cycle` to be used as the mock value.
When a patched function has a `Values.Cycle` as its mock value, it will provide the first value
in the cycle and then move the first value to the end of the cycle on every invocation.
Consider a function patched with `cycle([1, 2, 3])` via the following code
```elixir
patch(Example, :example, cycle([1, 2, 3]))
```
| Invocation | Cycle Before Call | Return Value | Cycle After Call |
|------------|-------------------|--------------|------------------|
| 1 | [1, 2, 3] | 1 | [2, 3, 1] |
| 2 | [2, 3, 1] | 2 | [3, 1, 2] |
| 3 | [3, 1, 2] | 3 | [1, 2, 3] |
| 4 | [1, 2, 3] | 1 | [2, 3, 1] |
| 5 | [2, 3, 1] | 2 | [3, 1, 2] |
| 6 | [3, 1, 2] | 3 | [1, 2, 3] |
| 7 | [1, 2, 3] | 1 | [2, 3, 1] |
We could continue the above table forever since the cycle will repeat endlessly. Cycles can
contain `callable/1,2`, `raise/1,2` and `throw/1` mock values.
"""
@spec cycle(values :: [term()]) :: Values.Cycle.t()
defdelegate cycle(values), to: Values.Cycle, as: :new
@doc """
Creates a new `Values.Scalar` to be used as the mock value.
When a patched function has a `Values.Scalar` as its mock value, it will provide the scalar
value on every invocation
```elixir
patch(Example, :example, scalar(:patched))
assert Example.example() == :patched # passes
assert Example.example() == :patched # passes
assert Example.example() == :patched # passes
```
When patching with any term that isn't a function, it will automatically be promoted into a
`Values.Scalar`.
```elixir
patch(Example, :example, :patched)
assert Example.example() == :patched # passes
assert Example.example() == :patched # passes
assert Example.example() == :patched # passes
```
Since functions are always automatically promoted to `Values.Callable`, if a function is meant
as a scalar value it **must** be wrapped in a call to `scalar/1`.
```elixir
patch(Example, :get_name_normalizer, scalar(&String.downcase/1))
assert Example.get_name_normalizer == &String.downcase/1 # passes
```
"""
@spec scalar(value :: term()) :: Values.Scalar.t()
defdelegate scalar(value), to: Values.Scalar, as: :new
@doc """
Creates a new `Values.Sequence` to be used as a mock value.
When a patched function has a `Values.Sequence` as its mock value, it will provide the first
value in the sequence as the return value and then discard the first value. Once the sequence
is down to a final value it will be retained and returned on every subsequent invocation.
Consider a function patched with `sequence([1, 2, 3])` via the following code
```elixir
patch(Example, :example, sequence([1, 2, 3]))
```
| Invocation | Sequence Before Call | Return Value | Sequence After Call |
|------------|----------------------|--------------|---------------------|
| 1 | [1, 2, 3] | 1 | [2, 3] |
| 2 | [2, 3] | 2 | [3] |
| 3 | [3] | 3 | [3] |
| 4 | [3] | 3 | [3] |
| 5 | [3] | 3 | [3] |
We could continue the above table forever since the sequence will continue to return the last
value endlessly. Sequences can contain `callable/1,2`, `raise/1,2` and `throw/1` mock values.
There is one special behavior of sequence, and that's an empty sequence, which always returns
the value `nil` on every invocation.
If the test author would like to simulate an exhaustable sequence, one that returns a set number
of items and then responds to every other call with `nil`, they can simply add a `nil` as the
last element in the sequence
```elixir
patch(Example, :example, sequence([1, 2, 3, nil])
```
| Invocation | Sequence Before Call | Return Value | Sequence After Call |
|------------|----------------------|--------------|---------------------|
| 1 | [1, 2, 3, nil] | 1 | [2, 3, nil] |
| 2 | [2, 3, nil] | 2 | [3, nil] |
| 3 | [3, nil] | 3 | [nil] |
| 4 | [nil] | nil | [nil] |
| 5 | [nil] | nil | [nil] |
"""
@spec sequence(values :: [term()]) :: Values.Sequence.t()
defdelegate sequence(values), to: Values.Sequence, as: :new
@doc """
Guard that checks whether a value is a proper Values module
"""
defguard is_value(module) when module in @value_modules
@doc """
Creates a special value that raises a RuntimeError with the given message.
```elixir
patch(Example, :example, raises("patched"))
assert_raise RuntimeError, "patched", fn ->
Example.example()
end
```
"""
@spec raises(message :: String.t()) :: Values.Raises.t()
defdelegate raises(message), to: Values.Raises, as: :new
@doc """
Creates a special value that raises the given exception with the provided attributes.
```elixir
patch(Example, :example, raises(ArgumentError, message: "patched"))
assert_raise ArgumentError, "patched", fn ->
Example.example()
end
```
"""
@spec raises(exception :: module(), attributes :: Keyword.t()) :: Values.Raises.t()
defdelegate raises(exception, attributes), to: Values.Raises, as: :new
@doc """
Creates a special values that throws the provided value when evaluated.
```elixir
patch(Example, :example, throws(:patched))
assert catch_throw(Example.example()) == :patched
```
"""
@spec throws(value :: term()) :: Values.Throws.t()
defdelegate throws(value), to: Values.Throws, as: :new
@doc """
Advances the given value.
Sequences and Cycles both have meaningful advances, all other values types this acts as a no-op.
"""
@spec advance(value :: t()) :: t()
def advance(%module{} = value) when is_value(module) do
module.advance(value)
end
def advance(value) do
value
end
@doc """
Generate the next return value and advance the underlying value.
"""
@spec next(value :: t(), arguments :: [term()]) :: {:ok, t(), term()} | :error
def next(%Values.Scalar{} = value, arguments) do
Values.Scalar.next(value, arguments)
end
def next(%module{} = value, arguments) when is_value(module) do
with {:ok, next, return_value} <- module.next(value, arguments) do
{:ok, _, return_value} = next(return_value, arguments)
{:ok, next, return_value}
end
end
def next(callable, arguments) when is_function(callable) do
with {:ok, result} <- Apply.safe(callable, arguments) do
{:ok, callable, result}
end
end
def next(scalar, _arguments) do
{:ok, scalar, scalar}
end
end
|
lib/patch/mock/value.ex
| 0.953966 | 0.920896 |
value.ex
|
starcoder
|
defmodule YukiHelper.CLI do
@moduledoc """
Provides the following commands:
```console
yuki help
yuki config
yuki lang.list
yuki test
yuki testcase.list
yuki testcase.download
```
For the usage of each command, refer to the help command.
```console
yuki help COMMAND
```
"""
@commands [
"config",
"lang.list",
"test",
"testcase.list",
"testcase.download"
]
@version Mix.Project.config()[:version]
@name Mix.Project.config()[:name]
@doc """
Entry point when an escript is started.
"""
@spec main([String.t()]) :: none()
def main([]), do: main(["help"])
def main(["--version"]) do
IO.puts("#{@name} v#{@version}")
end
def main(["help" = command]) do
opts = Application.get_env(:mix, :colors)
opts = [width: 80, enabled: IO.ANSI.enabled?()] ++ opts
print_doc(command, @moduledoc, opts)
end
def main(["help", command]) when command in @commands do
doc = module(command).doc()
opts = Application.get_env(:mix, :colors)
opts = [width: 80, enabled: IO.ANSI.enabled?()] ++ opts
print_doc(command, doc, opts)
end
def main([command | args]) when command in @commands do
Mix.Task.run("yuki.#{command}", args)
end
def main(args) do
opts = Application.get_env(:mix, :colors)
opts = [width: 80, enabled: IO.ANSI.enabled?()] ++ opts
"""
`#{Enum.join(args, " ")}` is not supported command.
Please refer to `help` command.
"""
|> IO.ANSI.Docs.print("text/markdown", opts)
end
defp print_doc(command, doc, opts) do
IO.ANSI.Docs.print_headings(["yuki #{command}"], opts)
IO.ANSI.Docs.print(doc, "text/markdown", opts)
end
defp module(command)
defp module("config"), do: Mix.Tasks.Yuki.Config
defp module("lang.list"), do: Mix.Tasks.Yuki.Lang.List
defp module("test"), do: Mix.Tasks.Yuki.Test
defp module("testcase.list"), do: Mix.Tasks.Yuki.Testcase.List
defp module("testcase.download"), do: Mix.Tasks.Yuki.Testcase.Download
end
|
lib/yuki_helper/cli.ex
| 0.569733 | 0.606702 |
cli.ex
|
starcoder
|
defmodule Hades.Helpers do
@moduledoc """
Provides some usefull functions for internal handling `NMAP` outputs.
"""
import SweetXml
require Logger
alias Hades.Command
alias Hades.Argument
@doc """
This functions parses the given file that is located at `filepath` and
returns a `t:map/0` filled with the `hosts` and `time` content that was saved
from `NMAP` into the `XML` file.
Returns `t:map/0`.
## Example
iex> Hades.Helpers.parse_xml("test/mocs/test.xml")
%{
hosts: [
%{hostname: "FelixsMACNCHEESEPRO.root.box", ip: "192.168.120.42", ports: []}
],
time: %{
elapsed: 0.03,
endstr: "Fri Feb 7 09:55:09 2020",
startstr: "Fri Feb 7 09:55:09 2020",
unix: 1581065709
}
}
"""
@spec parse_xml(String.t()) :: map
def parse_xml(filepath) do
File.read!(filepath)
|> xmap(
hosts: [
~x"//host"l,
ip: ~x"./address/@addr"s,
hostname: ~x"./hostnames/hostname/@name[1]"s,
ports: [
~x"//port"l,
port: ~x"./@portid"i,
product: ~x"./service/@product"s,
name: ~x"./service/@name"s,
version: ~x"./service/@version"s,
script: ~x"./script/@id"sl,
output: ~x"./script/@output"l,
state: ~x"./state/@state"s
]
],
time: [
~x"//nmaprun",
startstr: ~x"./@startstr"s,
endstr: ~x"./runstats/finished/@timestr"s,
elapsed: ~x"./runstats/finished/@elapsed"f,
unix: ~x"./runstats/finished/@time"i
]
)
end
@doc """
With this function it is possible to validate given IP Adresses.
Returns `{:ok, ip_address}` if the given `ip_address` was validated successfully.
## Example
iex> Hades.Helpers.check_ip_address("192.168.120.1")
{:ok, "192.168.120.1"}
"""
# TODO: Also provide a technique to test for submask and FQDN's
@spec check_ip_address(String.t()) :: {atom, String.t()}
def check_ip_address(ip_address) do
case :inet.parse_address(String.to_charlist(ip_address)) do
{:ok, _ip} ->
{:ok, ip_address}
{:error, :einval} ->
{:error, "No valid IP-Address given."}
end
end
@doc """
Macro used to generate the argument functions that are described inside
the `Hades.Arguments` modules.
"""
defmacro option_functions(options_map) do
quote bind_quoted: [options_map: options_map] do
Enum.each(options_map, fn {k, v} ->
function_name = String.to_atom("arg_" <> Atom.to_string(k))
if v.argument do
@doc """
#{v.desc}
"""
def unquote(function_name)(argument),
do: Map.put(unquote(Macro.escape(v)), :argument, argument)
else
@doc """
#{v.desc}
"""
def unquote(function_name)(), do: unquote(Macro.escape(v))
end
end)
end
end
@doc """
If the context is not directly specified simply return `:ok`.
Returns `:ok`.
## Example
iex> Hades.Helpers.validate_contexts!(:unspecified, [:scan_type, :option])
:ok
"""
def validate_contexts!(:unspecified, _), do: :ok
@doc """
Checks if the given context is in the required ones.
Raises `raise(ArgumentError)` if the given context is not in the required ones.
## Examples
iex> Hades.Helpers.validate_contexts!(:option, [:scan_type, :option])
nil
iex> Hades.Helpers.validate_contexts!(:undefined, [:scan_type, :option])
** (ArgumentError) argument error
"""
def validate_contexts!(context, required) do
unless Enum.member?(required, context), do: raise(ArgumentError)
end
@doc """
Prepares the command to be executed, by converting the `%Command{}` into
proper parameters to be fed to NMAP.
Under normal circumstances `Hades.scan/1` should be used, use `prepare`
only when converted args are needed.
Returns `{nmap_args_string, the_aimed_target}`.
## Example
iex> command = Hades.new_command()
...> |> Hades.add_argument(Hades.Arguments.ServiceVersionDetection.arg_sV())
...> |> Hades.add_argument(Hades.Arguments.ServiceVersionDetection.arg_version_all())
...> |> Hades.add_argument(Hades.Arguments.ScriptScan.arg_script("vulners"))
...> |> Hades.add_target("192.168.0.1")
iex> Hades.Helpers.prepare(command)
{"--script vulners --version-all -sV", "192.168.0.1"}
"""
@spec prepare(command :: Command.t()) :: {binary() | nil, list(binary)}
def prepare(%Command{scan_types: scan_types, target: target}) do
if (length(scan_types) == 0) do
raise ArgumentError, "Must specify atleast one scan type"
end
if (target == "") do
raise ArgumentError, "Must specify a target"
end
options = Enum.map(scan_types, &arg_for_option/1) |> List.flatten()
{Enum.join(options, " "), target}
end
defp arg_for_option(%Argument{name: name, options: false, argument: nil}) do
~w(#{name} )
end
defp arg_for_option(%Argument{name: name, argument: arg}) when not is_nil(arg) do
~w(#{name} #{arg} )
end
@doc """
Read hades `timeout` from the config.
If unspecified, return the default `timeout` which is currently `300_000` (corresponds to 5 minutes).
This `timeout` is propagated to the function `Task.await()`.
If the specified `timeout` period is exceeded, it is assumed that the process running the NMAP command has timed out.
The `timeout` is specified in `ms`.
Returns `t:integer/0`.
## Example
iex> Hades.Helpers.hades_timeout()
300000
"""
def hades_timeout do
case Application.get_env(:hades, :timeout, nil) do
nil -> 300_000
timeout -> timeout
end
end
@doc """
Reads hades `output_path` from the config.
If there is nothing specified in the config then the default path will be returned.
Returns `t:binary/0`.
## Example
iex> Hades.Helpers.hades_path()
"/var/folders/c1/f0tm33sd3tgg_ds8kyhykyw80000gn/T/briefly-1581/hades-73279-791202-3hades/8b09d31e1a1142869ce8b15faf27ed45.xml"
"""
def hades_path do
case Application.get_env(:hades, :output_path, nil) do
nil ->
{:ok, path} = Briefly.create(directory: true)
path <> "/" <> UUID.uuid4(:hex) <> ".xml"
path ->
path
end
end
end
|
lib/helpers.ex
| 0.848816 | 0.513059 |
helpers.ex
|
starcoder
|
defmodule EspEx.MessageStore.Postgres do
@moduledoc """
This is the real implementation of MessageStore. It will execute the needed
queries on Postgres through Postgrex by calling the functions provided in
[ESP](https://github.com/Carburetor/ESP/tree/master/app/config/functions/stream). You should be able to infer what to write, it's just passing the
required arguments to the SQL functions and converting any returned value.
Whenever a stream name is expected, please use the %StreamName struct and
make sure to convert it to string.
"""
use EspEx.MessageStore
import EspEx.MessageStore,
only: [
is_version: 1,
is_expected_version: 1,
is_batch_size: 1
]
alias EspEx.StreamName
alias EspEx.RawEvent
alias EspEx.RawEvent.Metadata
@wrong_version "Wrong expected version:"
@wrong_list "No messages"
@read_batch_sql """
select * from stream_read_batch(
_stream_name := $1,
_position := $2,
_batch_size := $3
)
"""
@read_last_sql "select * from stream_read_last(_stream_name := $1)"
@write_sql """
select * from stream_write(
_id := $1::uuid,
_stream_name := $2,
_type := $3,
_data := $4,
_metadata := $5,
_expected_version := $6
)
"""
@write_batch_sql """
select * from stream_write_batch($1::minimal_message[], $2, $3)
"""
@version_sql "select * from stream_version(_stream_name := $1)"
@pg_notify_sql "select pg_notify($1, $2)"
@impl EspEx.MessageStore
@doc """
Write has an optional expected_version argument. This argument could be one of:
- nil: no version expected
- no_stream: no message ever written to this stream, the Postgres
stream_version position will return null (max(position) is null if no rows
are present)
- An integer (0+): Representing the expected version
"""
def write!(%RawEvent{} = raw_event, expected_version \\ nil)
when is_expected_version(expected_version) do
expected_version = to_number_version(expected_version)
params = raw_event_to_params(raw_event)
params = params ++ [expected_version]
query(@write_sql, params).rows
|> rows_to_single_result
rescue
error in Postgrex.Error -> as_known_error!(error)
end
@impl EspEx.MessageStore
@doc """
- `raw_events` list of events to write
- `stream_name` stream where events will be written to (will overwrite
any stream_name provided in the raw_events)
- optional `expected_version` argument. This argument could be one of:
- `nil`: no version expected
- `:no_stream`: no message ever written to this stream, the Postgres
stream_version position will return null (max(position) is null if no
rows are present)
- An integer (0+): Representing the expected version
"""
def write_batch!(
raw_events,
%StreamName{} = stream_name,
expected_version \\ nil
)
when is_list(raw_events) and is_expected_version(expected_version) do
raw_events_params = raw_events_to_params(raw_events)
stream_name = to_string(stream_name)
expected_version = to_number_version(expected_version)
params = [raw_events_params, stream_name, expected_version]
query(@write_batch_sql, params).rows
|> rows_to_single_result
rescue
error in Postgrex.Error -> as_known_error!(error)
end
@impl EspEx.MessageStore
@doc """
Retrieve's the last stream by the stream_name (based on greatest position).
"""
def read_last(%StreamName{} = stream_name) do
query(@read_last_sql, [to_string(stream_name)]).rows
|> rows_to_raw_events
|> List.last()
end
@impl EspEx.MessageStore
@doc """
Retrieve steams by the stream_name, in batches of 10 by default.
"""
def read_batch(%StreamName{} = stream_name, position \\ 0, batch_size \\ 10)
when is_version(position) and is_batch_size(batch_size) do
query(@read_batch_sql, [to_string(stream_name), position, batch_size]).rows
|> rows_to_raw_events
end
@impl EspEx.MessageStore
@doc """
Retrieves the last message position, or nil if none are present
"""
def read_version(%StreamName{} = stream_name) do
query(@version_sql, [to_string(stream_name)]).rows
|> rows_to_single_result
end
@impl EspEx.MessageStore
@doc """
Receives notifications as GenServer casts. Two types of notifications are
received:
- `{:notification, connection_pid, ref, channel, payload}` with a notify
from Postgres (check
[Postgrex documentation](https://hexdocs.pm/postgrex/Postgrex.Notifications.html#listen/3))
- `{:reminder}` which is received every X seconds
"""
def listen(%StreamName{} = stream_name, opts \\ []) do
EspEx.MessageStore.Postgres.Notifications.listen(stream_name, opts)
end
@impl EspEx.MessageStore
@doc """
Stops notifications
"""
def unlisten(ref, opts \\ []) do
EspEx.MessageStore.Postgres.Notifications.unlisten(ref, opts)
end
@doc """
Sends an SQL NOTIFY through postgres
"""
@spec notify(channel :: String.t(), data :: String.t()) :: :ok
def notify(channel, data) do
query(@pg_notify_sql, [channel, data])
:ok
end
defp to_number_version(:no_stream), do: -1
defp to_number_version(nil), do: nil
defp to_number_version(expected_version), do: expected_version
defp query(raw_sql, parameters) do
EspEx.MessageStore.Postgres.Repo
|> Ecto.Adapters.SQL.query!(raw_sql, parameters)
end
defp raw_events_to_params(raw_events) do
Enum.map(raw_events, &raw_event_to_minimal/1)
end
defp raw_event_to_minimal(%RawEvent{
id: id,
type: type,
data: data,
metadata: metadata
}) do
id = uuid_as_uuid(id)
{id, type, data, metadata}
end
defp raw_event_to_params(%RawEvent{
id: id,
stream_name: stream_name,
type: type,
data: data,
metadata: metadata
}) do
id = uuid_as_uuid(id)
[id, to_string(stream_name), type, data, metadata]
end
defp rows_to_single_result([[value]]), do: value
defp rows_to_raw_events(rows) do
rows
|> Enum.map(&row_to_raw_event/1)
end
defp row_to_raw_event([
id,
stream_name,
type,
position,
global_position,
data,
metadata,
time
]) do
id = uuid_as_string(id)
%RawEvent{
id: id,
stream_name: StreamName.from_string(stream_name),
type: type,
position: position,
global_position: global_position,
data: symbolize(data),
metadata: struct(Metadata, metadata),
time: time
}
end
defp symbolize(map) do
map
|> Map.new(fn {k, v} -> {String.to_existing_atom(k), v} end)
end
defp as_known_error!(error) do
message = to_string(error.postgres.message)
cond do
String.starts_with?(message, @wrong_version) ->
raise EspEx.MessageStore.ExpectedVersionError, message: message
String.starts_with?(message, @wrong_list) ->
raise EspEx.MessageStore.EmptyBatchError, message: message
true ->
raise error
end
end
defp uuid_as_uuid(id) do
{:ok, uuid} =
id
|> Ecto.UUID.cast!()
|> Ecto.UUID.dump()
uuid
end
defp uuid_as_string(id) do
Ecto.UUID.cast!(id)
end
end
|
lib/esp_ex/message_store/postgres.ex
| 0.814754 | 0.521167 |
postgres.ex
|
starcoder
|
defmodule Spell do
@moduledoc """
`Spell` is a WAMP client library and an application for managing WAMP peers.
## Examples
See `Crossbar` for how to start a Crossbar.io server for interactive
development.
Once up, you can connect a new peer by calling:
{:ok, peer} = Spell.connect(Crossbar.uri,
realm: Crossbar.realm,
)
## Peer Interface
The WAMP protocol defines peers which communicate by passing messages.
Peers create and communicate over one to one bidirectional channels.
Use `Spell.connect` to create and connect a new peer to the WAMP server.
`Spell` delegates common client role functions to provide a single
interface. See the `defdelegate` statements in the source or run
`Spell.__info__(:functions)` for the full list of module functions.
## WAMP Support
Spell supports the client portion of the
[basic WAMP profile, RC4](https://github.com/tavendo/WAMP/blob/master/spec/basic.md).
### Client Roles:
* Publisher: `Spell.Role.Publisher`
* Subscriber: `Spell.Role.Subscriber`
* Caller: `Spell.Role.Caller`
* Callee: `Spell.Role.Callee`
See `Spell.Role` for how to create new roles.
### Transports
* WebSocket: `Spell.Transport.WebSocket`
* RawSocket: `Spell.Transport.RawSocket`
See `Spell.Transport` for how to create new transports.
### Serializers
* JSON: `Spell.Serializer.JSON`
* MessagePack: `Spell.Serializer.MessagePack`
See `Spell.Serializer` for how to create new serializers.
"""
use Application
require Logger
alias Spell.Peer
alias Spell.Message
alias Spell.Role
# Delegate commonly used role functions into `Spell`.
# WARNING: `defdelegate` drops the documentation -- kills the illusion.
defdelegate [cast_goodbye(peer),
cast_goodbye(peer, options),
call_goodbye(peer),
call_goodbye(peer, options)], to: Role.Session
defdelegate [cast_publish(peer, topic),
cast_publish(peer, topic, options),
call_publish(peer, topic),
call_publish(peer, topic, options),
receive_published(peer, request_id)], to: Role.Publisher
defdelegate [cast_subscribe(peer, topic),
cast_subscribe(peer, topic, options),
call_subscribe(peer, topic),
call_subscribe(peer, topic, options),
receive_event(peer, subscription),
cast_unsubscribe(peer, subscription),
call_unsubscribe(peer, subscription),
receive_unsubscribed(peer, unsubscribe)], to: Role.Subscriber
defdelegate [cast_call(peer, procedure),
cast_call(peer, procedure, options),
receive_result(peer, call_id),
call(peer, procedure),
call(peer, procedure, options)], to: Role.Caller
defdelegate [cast_register(peer, procedure),
cast_register(peer, procedure, options),
receive_registered(peer, register_id),
call_register(peer, procedure),
call_register(peer, procedure, options),
cast_unregister(peer, registration),
call_unregister(peer, registration),
receive_unregistered(peer, registration),
cast_yield(peer, invocation),
cast_yield(peer, invocation, options)], to: Role.Callee
# Module Attributes
@supervisor_name __MODULE__.Supervisor
@default_retries 5
@default_retry_interval 1000
@default_roles [Role.Publisher,
Role.Subscriber,
Role.Caller,
Role.Callee]
# Public API
@doc """
Creates and returns a new peer with an open WAMP session at `uri`.
## Options
* `:realm :: String.t` the peer's configured realm
* `:roles = #{inspect(@default_roles)} :: [module | {module, any}]` the
list of roles to start the client with. Each item can be the bare role's
module, or the a 2-tuple of the module and init options.
* `:retries = #{@default_retries} :: integer` number of times to
retry connecting
* `:retry_interval = #{@default_retry_interval} :: integer` inteveral
in milliseconds between retries
* `:timeout = 2000 :: integer` connection timeout for a peer
* `:authentication :: Keyword.t`, defaults to `[]`
* `:id :: String.t` the `authid` to authenticate with
* `:schemes :: Keyword.t` the authentication schemes supported. See
`Spell.Authenticate`.
"""
# TODO: there should be an asynchronous connect which doesn't await the WELCOME
@spec connect(String.t, Keyword.t) :: {:ok, pid}
def connect(uri, options \\ [])
when is_binary(uri) and is_list(options) do
case parse_uri(uri) do
{:ok, %{protocol: :raw_socket, host: host, port: port}} ->
transport = %{module: Spell.Transport.RawSocket,
options: [host: host, port: port]}
init_peer(options, transport)
{:ok, %{protocol: protocol, host: host, port: port, path: path}} when protocol in [:ws, :wss] ->
transport = %{module: Spell.Transport.WebSocket,
options: [host: host, port: port, path: path, protocol: to_string(protocol)]}
init_peer(options, transport)
{:error, reason} -> {:error, reason}
end
end
defp init_peer(options, transport_options) do
case Keyword.put(options, :transport, transport_options) |> normalize_options() do
{:ok, options} ->
{:ok, peer} = Peer.add(options)
case Role.Session.await_welcome(peer) do
{:ok, _welcome} -> {:ok, peer}
{:error, reason} -> {:error, reason}
end
{:error, reason} -> {:error, reason}
end
end
@doc """
Close the peer by sending a GOODBYE message. This call is synchronous; it
blocks until receiving the acknowledging GOODBYE.
"""
@spec close(pid) :: Message.t | {:error, any}
def close(peer, options \\ []) do
case call_goodbye(peer, options) do
{:ok, _goodbye} -> :ok
{:error, reason} -> {:error, reason}
end
end
# Application Callbacks
def start(_type, _args) do
import Supervisor.Spec, warn: false
children = [supervisor(Spell.Peer, [])]
options = [strategy: :one_for_one, name: @supervisor_name]
Supervisor.start_link(children, options)
end
# Private Functions
@spec parse_uri(String.t | char_list) :: {:ok, Map.t} | {:error, any}
defp parse_uri(string) when is_binary(string) do
string |> to_char_list() |> parse_uri()
end
defp parse_uri(chars) when is_list(chars) do
case :http_uri.parse(chars, [scheme_defaults: [ws: 80, wss: 443]]) do
{:ok, {protocol, [], host, port, path, []}} ->
{:ok, %{protocol: protocol,
host: to_string(host),
port: port,
path: to_string(path)}}
{:error, reason} ->
{:error, reason}
end
end
# TODO: This function is a bit of a mess. Validation utils would be nice
@spec normalize_options(Keyword.t) :: tuple
defp normalize_options(options) when is_list(options) do
case Dict.get(options, :roles, @default_roles)
|> Role.normalize_role_options() do
{:ok, role_options} ->
session_options = Keyword.take(options, [:realm, :authentication])
%{transport: Keyword.get(options, :transport),
serializer: Keyword.get(options, :serializer, Spell.Config.serializer),
owner: Keyword.get(options, :owner),
role: %{options: Keyword.put_new(role_options, Role.Session,
session_options),
features: Keyword.get(options, :features,
Role.collect_features(role_options))},
realm: Keyword.get(options, :realm),
retries: Keyword.get(options, :retries, @default_retries),
retry_interval: Keyword.get(options, :retry_interval,
@default_retry_interval)}
|> normalize_options()
{:error, reason} -> {:error, {:role, reason}}
end
end
defp normalize_options(%{transport: nil}) do
{:error, :transport_required}
end
defp normalize_options(%{transport: transport_options} = options)
when is_list(transport_options) do
%{options | transport: %{module: Spell.Config.transport,
options: transport_options}}
|> normalize_options()
end
defp normalize_options(%{transport: transport_module} = options)
when is_atom(transport_module) do
%{options | transport: %{module: transport_module, options: options}}
|> normalize_options()
end
defp normalize_options(%{serializer: serializer_module} = options)
when is_atom(serializer_module) do
%{options | serializer: %{module: serializer_module, options: []}}
|> normalize_options()
end
defp normalize_options(%{realm: nil}) do
{:error, :realm_required}
end
defp normalize_options(%{transport: %{module: transport_module,
options: transport_options},
serializer: %{module: serializer_module,
options: serializer_options},
role: %{options: role_options},
realm: realm} = options)
when is_atom(transport_module) and is_list(transport_options)
and is_atom(serializer_module) and is_list(serializer_options)
and is_list(role_options) and is_binary(realm) do
{:ok, options}
end
defp normalize_options(_options) do
{:error, :bad_options}
end
end
|
lib/spell.ex
| 0.88819 | 0.51751 |
spell.ex
|
starcoder
|
defmodule Game.Format.Template do
@moduledoc """
Template a string with variables
"""
@doc """
Render a template with a context
Variables are denoted with `[key]` in the template string. You can also
include leading spaces that can be collapsed if the variable is nil or does
not exist in the context.
For instance:
~s(You say[ adverb_phrase], {say}"[message]"{/say})
If templated with `%{message: "Hello"}` will output as:
You say, {say}"Hello"{/say}
"""
@spec render(String.t(), map()) :: String.t()
def render(context, string) do
context =
context
|> Map.get(:assigns, %{})
|> Enum.into(%{}, fn {key, val} -> {to_string(key), val} end)
with {:ok, ast} <- VML.parse(string) do
VML.collapse(replace_variables(ast, context))
else
{:error, _module, _error} ->
"{error}Could not parse text.{/error}"
end
end
defp replace_variables([], _context), do: []
defp replace_variables([node | nodes], context) do
[replace_variable(node, context) | replace_variables(nodes, context)]
end
defp replace_variable({:variable, space, name}, context) do
case replace_variable({:variable, name}, context) do
{:string, ""} ->
{:string, ""}
{:string, value} ->
{:string, space <> value}
value when is_list(value) ->
[{:string, space} | value]
end
end
defp replace_variable({:variable, name}, context) do
case Map.get(context, name, "") do
"" ->
{:string, ""}
nil ->
{:string, ""}
value when is_list(value) ->
value
value ->
{:string, value}
end
end
defp replace_variable({:tag, attributes, nodes}, context) do
name = Keyword.get(attributes, :name)
attributes = Keyword.get(attributes, :attributes, [])
attributes =
attributes
|> Enum.map(fn {key, value} ->
{key, replace_variables(value, context)}
end)
{:tag, [name: name, attributes: attributes], replace_variables(nodes, context)}
end
defp replace_variable(node, _context), do: node
end
|
lib/game/format/template.ex
| 0.88639 | 0.410077 |
template.ex
|
starcoder
|
defmodule Ash.DataLayer.Ets do
@moduledoc """
An ETS (Erlang Term Storage) backed Ash Datalayer, for testing.
This is used for testing. *Do not use this data layer in production*
"""
alias Ash.Actions.Sort
alias Ash.Filter.{Expression, Not, Predicate}
alias Ash.Filter.Predicate.{Eq, GreaterThan, In, IsNil, LessThan}
@behaviour Ash.DataLayer
@ets %Ash.Dsl.Section{
name: :ets,
describe: """
A section for configuring the ets data layer
""",
schema: [
private?: [
type: :boolean,
default: false
]
]
}
use Ash.Dsl.Extension, sections: [@ets]
alias Ash.Dsl.Extension
@spec private?(Ash.resource()) :: boolean
def private?(resource) do
Extension.get_opt(resource, [:ets], :private?, false, true)
end
defmodule Query do
@moduledoc false
defstruct [:resource, :filter, :limit, :sort, relationships: %{}, offset: 0, aggregates: []]
end
@impl true
def can?(resource, :async_engine) do
not private?(resource)
end
def can?(_, :composite_primary_key), do: true
def can?(_, :upsert), do: true
def can?(_, :create), do: true
def can?(_, :read), do: true
def can?(_, :update), do: true
def can?(_, :destroy), do: true
def can?(_, :sort), do: true
def can?(_, :filter), do: true
def can?(_, :limit), do: true
def can?(_, :offset), do: true
def can?(_, :boolean_filter), do: true
def can?(_, :transact), do: false
def can?(_, {:filter_predicate, _, %In{}}), do: true
def can?(_, {:filter_predicate, _, %Eq{}}), do: true
def can?(_, {:filter_predicate, _, %LessThan{}}), do: true
def can?(_, {:filter_predicate, _, %GreaterThan{}}), do: true
def can?(_, {:filter_predicate, _, %IsNil{}}), do: true
def can?(_, {:sort, _}), do: true
def can?(_, _), do: false
@impl true
def resource_to_query(resource) do
%Query{
resource: resource
}
end
@impl true
def limit(query, offset, _), do: {:ok, %{query | limit: offset}}
@impl true
def offset(query, offset, _), do: {:ok, %{query | offset: offset}}
@impl true
def filter(query, filter, _resource) do
{:ok, %{query | filter: filter}}
end
@impl true
def sort(query, sort, _resource) do
{:ok, %{query | sort: sort}}
end
@impl true
def add_aggregate(query, aggregate, _) do
{:ok, %{query | aggregates: [aggregate | query.aggregates]}}
end
@impl true
def run_query(
%Query{resource: resource, filter: filter, offset: offset, limit: limit, sort: sort},
_resource
) do
with {:ok, records} <- get_records(resource),
filtered_records <- filter_matches(records, filter) do
offset_records =
filtered_records
|> Sort.runtime_sort(sort)
|> Enum.drop(offset || 0)
limited_records =
if limit do
Enum.take(offset_records, limit)
else
offset_records
end
{:ok, limited_records}
else
{:error, error} -> {:error, error}
end
end
defp get_records(resource) do
with {:ok, table} <- wrap_or_create_table(resource),
{:ok, record_tuples} <- ETS.Set.to_list(table) do
{:ok, Enum.map(record_tuples, &elem(&1, 1))}
end
end
def filter_matches(records, nil), do: records
def filter_matches(records, filter) do
Enum.filter(records, &matches_filter?(&1, filter.expression))
end
defp matches_filter?(_record, nil), do: true
defp matches_filter?(_record, boolean) when is_boolean(boolean), do: boolean
defp matches_filter?(
record,
%Predicate{
predicate: predicate,
attribute: %{name: name},
relationship_path: []
}
) do
matches_predicate?(record, name, predicate)
end
defp matches_filter?(record, %Expression{op: :and, left: left, right: right}) do
matches_filter?(record, left) && matches_filter?(record, right)
end
defp matches_filter?(record, %Expression{op: :or, left: left, right: right}) do
matches_filter?(record, left) || matches_filter?(record, right)
end
defp matches_filter?(record, %Not{expression: expression}) do
not matches_filter?(record, expression)
end
defp matches_filter?(record, %IsNil{field: field, nil?: true}) do
Map.fetch(record, field) == {:ok, nil}
end
defp matches_filter?(record, %IsNil{field: field, nil?: false}) do
Map.fetch(record, field) != {:ok, nil}
end
defp matches_predicate?(record, field, %Eq{value: predicate_value}) do
Map.fetch(record, field) == {:ok, predicate_value}
end
defp matches_predicate?(record, field, %LessThan{value: predicate_value}) do
case Map.fetch(record, field) do
{:ok, value} -> value < predicate_value
:error -> false
end
end
defp matches_predicate?(record, field, %GreaterThan{value: predicate_value}) do
case Map.fetch(record, field) do
{:ok, value} -> value > predicate_value
:error -> false
end
end
defp matches_predicate?(record, field, %In{values: predicate_values}) do
case Map.fetch(record, field) do
{:ok, value} -> value in predicate_values
:error -> false
end
end
defp matches_predicate?(record, field, %IsNil{field: field, nil?: nil?}) do
case Map.fetch(record, field) do
{:ok, nil} -> nil?
{:ok, _} -> !nil?
:error -> nil?
end
end
@impl true
def upsert(resource, changeset) do
create(resource, changeset)
end
@impl true
def create(resource, changeset) do
pkey =
resource
|> Ash.Resource.primary_key()
|> Enum.into(%{}, fn attr ->
{attr, Ash.Changeset.get_attribute(changeset, attr)}
end)
with {:ok, table} <- wrap_or_create_table(resource),
record <- Ash.Changeset.apply_attributes(changeset),
{:ok, _} <- ETS.Set.put(table, {pkey, record}) do
{:ok, record}
else
{:error, error} -> {:error, error}
end
end
@impl true
def destroy(resource, %{data: record}) do
pkey = Map.take(record, Ash.Resource.primary_key(resource))
with {:ok, table} <- wrap_or_create_table(resource),
{:ok, _} <- ETS.Set.delete(table, pkey) do
:ok
else
{:error, error} -> {:error, error}
end
end
@impl true
def update(resource, changeset) do
create(resource, changeset)
end
defp wrap_or_create_table(resource) do
case ETS.Set.wrap_existing(resource) do
{:error, :table_not_found} ->
protection =
if private?(resource) do
:private
else
:public
end
ETS.Set.new(
name: resource,
protection: protection,
ordered: true,
read_concurrency: true
)
{:ok, table} ->
{:ok, table}
{:error, other} ->
{:error, other}
end
end
end
|
lib/ash/data_layer/ets.ex
| 0.847021 | 0.424233 |
ets.ex
|
starcoder
|
defmodule Phoenix.LiveDashboard.Router do
@moduledoc """
Provides LiveView routing for LiveDashboard.
"""
@doc """
Defines a LiveDashboard route.
It expects the `path` the dashboard will be mounted at
and a set of options.
This will also generate a named helper called `live_dashboard_path/2`
which you can use to link directly to the dashboard, such as:
<%= link "Dashboard", to: live_dashboard_path(conn, :home) %>
Note you should only use `link/2` to link to the dashboard (and not
`live_redirect/live_link`, as it has to set its own session on first
render.
## Options
* `:live_socket_path` - Configures the socket path. it must match
the `socket "/live", Phoenix.LiveView.Socket` in your endpoint.
* `:csp_nonce_assign_key` - an assign key to find the CSP nonce
value used for assets. Supports either `atom()` or a map of
type `%{optional(:img) => atom(), optional(:script) => atom(), optional(:style) => atom()}`
* `:ecto_repos` - the repositories to show database information.
Currently only PSQL databases are supported
* `:env_keys` - Configures environment variables to display.
It is defined as a list of string keys. If not set, the environment
information will not be displayed.
* `:metrics` - Configures the module to retrieve metrics from.
It can be a `module` or a `{module, function}`. If nothing is
given, the metrics functionality will be disabled.
* `:metrics_history` - Configures a callback for retreiving metric history.
It must be an "MFA" tuple of `{Module, :function, arguments}` such as
metrics_history: {MyStorage, :metrics_history, []}
If not set, metrics will start out empty/blank and only display
data that occurs while the browser page is open.
* `:request_logger_cookie_domain` - Configures the domain the request_logger
cookie will be written to. It can be a string or `:parent` atom.
When a string is given, it will directly set cookie domain to the given
value. When `:parent` is given, it will take the parent domain from current
endpoint host (if host is "www.acme.com" the cookie will be scoped on
"acme.com"). When not set, the cookie will be scoped to current domain.
* `:allow_destructive_actions` - When true, allow destructive actions directly
from the UI. Defaults to `false`. The following destructive actions are
available in the dashboard:
* "Kill process" - a "Kill process" button on the process modal
Note that custom pages given to "Additional pages" may support their own
destructive actions.
* `:additional_pages` - A keyword list of addictional pages
## Examples
defmodule MyAppWeb.Router do
use Phoenix.Router
import Phoenix.LiveDashboard.Router
scope "/", MyAppWeb do
pipe_through [:browser]
live_dashboard "/dashboard",
metrics: {MyAppWeb.Telemetry, :metrics},
env_keys: ["APP_USER", "VERSION"],
metrics_history: {MyStorage, :metrics_history, []},
request_logger_cookie_domain: ".acme.com"
end
end
"""
defmacro live_dashboard(path, opts \\ []) do
quote bind_quoted: binding() do
scope path, alias: false, as: false do
import Phoenix.LiveView.Router, only: [live: 4]
opts = Phoenix.LiveDashboard.Router.__options__(opts)
# All helpers are public contracts and cannot be changed
live "/", Phoenix.LiveDashboard.PageLive, :home, opts
live "/:page", Phoenix.LiveDashboard.PageLive, :page, opts
live "/:node/:page", Phoenix.LiveDashboard.PageLive, :page, opts
end
end
end
@doc false
def __options__(options) do
live_socket_path = Keyword.get(options, :live_socket_path, "/live")
metrics =
case options[:metrics] do
nil ->
nil
mod when is_atom(mod) ->
{mod, :metrics}
{mod, fun} when is_atom(mod) and is_atom(fun) ->
{mod, fun}
other ->
raise ArgumentError,
":metrics must be a tuple with {Mod, fun}, " <>
"such as {MyAppWeb.Telemetry, :metrics}, got: #{inspect(other)}"
end
env_keys =
case options[:env_keys] do
nil ->
nil
keys when is_list(keys) ->
keys
other ->
raise ArgumentError,
":env_keys must be a list of strings, got: " <> inspect(other)
end
metrics_history =
case options[:metrics_history] do
nil ->
nil
{module, function, args}
when is_atom(module) and is_atom(function) and is_list(args) ->
{module, function, args}
other ->
raise ArgumentError,
":metrics_history must be a tuple of {module, function, args}, got: " <>
inspect(other)
end
additional_pages =
case options[:additional_pages] do
nil ->
[]
pages when is_list(pages) ->
normalize_additional_pages(pages)
other ->
raise ArgumentError, ":additional_pages must be a keyword, got: " <> inspect(other)
end
request_logger_cookie_domain =
case options[:request_logger_cookie_domain] do
nil ->
nil
domain when is_binary(domain) ->
domain
:parent ->
:parent
other ->
raise ArgumentError,
":request_logger_cookie_domain must be a binary or :parent atom, got: " <>
inspect(other)
end
ecto_repos = options[:ecto_repos]
csp_nonce_assign_key =
case options[:csp_nonce_assign_key] do
nil -> nil
key when is_atom(key) -> %{img: key, style: key, script: key}
%{} = keys -> Map.take(keys, [:img, :style, :script])
end
allow_destructive_actions = options[:allow_destructive_actions] || false
session_args = [
env_keys,
allow_destructive_actions,
metrics,
metrics_history,
additional_pages,
request_logger_cookie_domain,
ecto_repos,
csp_nonce_assign_key
]
[
session: {__MODULE__, :__session__, session_args},
private: %{live_socket_path: live_socket_path, csp_nonce_assign_key: csp_nonce_assign_key},
layout: {Phoenix.LiveDashboard.LayoutView, :dash},
as: :live_dashboard
]
end
defp normalize_additional_pages(pages) do
Enum.map(pages, fn
{path, module} when is_atom(path) and is_atom(module) ->
{path, {module, []}}
{path, {module, args}} when is_atom(path) and is_atom(module) ->
{path, {module, args}}
other ->
msg =
"invalid value in :additional_pages, " <>
"must be a tuple {path, {module, args}} or {path, module}, where path " <>
"is an atom and the module implements Phoenix.LiveDashboard.PageBuilder, got: "
raise ArgumentError, msg <> inspect(other)
end)
end
@doc false
def __session__(
conn,
env_keys,
allow_destructive_actions,
metrics,
metrics_history,
additional_pages,
request_logger_cookie_domain,
ecto_repos,
csp_nonce_assign_key
) do
metrics_session = %{
"metrics" => metrics,
"metrics_history" => metrics_history
}
request_logger_session = %{
"request_logger" => Phoenix.LiveDashboard.RequestLogger.param_key(conn),
"cookie_domain" => request_logger_cookie_domain
}
{pages, requirements} =
[
home: {Phoenix.LiveDashboard.HomePage, %{"env_keys" => env_keys}},
os_mon: {Phoenix.LiveDashboard.OSMonPage, %{}},
metrics: {Phoenix.LiveDashboard.MetricsPage, metrics_session},
request_logger: {Phoenix.LiveDashboard.RequestLoggerPage, request_logger_session},
applications: {Phoenix.LiveDashboard.ApplicationsPage, %{}},
processes: {Phoenix.LiveDashboard.ProcessesPage, %{}},
ports: {Phoenix.LiveDashboard.PortsPage, %{}},
sockets: {Phoenix.LiveDashboard.SocketsPage, %{}},
ets: {Phoenix.LiveDashboard.EtsPage, %{}}
]
|> Enum.concat(ecto_stats(ecto_repos))
|> Enum.concat(additional_pages)
|> Enum.map(fn {key, {module, opts}} ->
{session, requirements} = initialize_page(module, opts)
{{key, {module, session}}, requirements}
end)
|> Enum.unzip()
%{
"pages" => pages,
"allow_destructive_actions" => allow_destructive_actions,
"requirements" => requirements |> Enum.concat() |> Enum.uniq(),
"csp_nonces" => %{
img: conn.assigns[csp_nonce_assign_key[:img]],
style: conn.assigns[csp_nonce_assign_key[:style]],
script: conn.assigns[csp_nonce_assign_key[:script]]
}
}
end
defp ecto_stats(nil), do: [{:ecto_stats, {Phoenix.LiveDashboard.EctoStatsPage, %{repo: nil}}}]
defp ecto_stats(repos) do
for repo <- List.wrap(repos) do
page =
repo
|> Macro.underscore()
|> String.replace("/", "_")
|> Kernel.<>("_info")
|> String.to_atom()
{page, {Phoenix.LiveDashboard.EctoStatsPage, %{repo: repo}}}
end
end
defp initialize_page(module, opts) do
case module.init(opts) do
{:ok, session} ->
{session, []}
{:ok, session, requirements} ->
validate_requirements(module, requirements)
{session, requirements}
end
end
defp validate_requirements(module, requirements) do
Enum.each(requirements, fn
{key, value} when key in [:application, :module, :process] and is_atom(value) ->
:ok
other ->
raise "unknown requirement #{inspect(other)} from #{inspect(module)}"
end)
end
end
|
lib/phoenix/live_dashboard/router.ex
| 0.895074 | 0.552419 |
router.ex
|
starcoder
|
defmodule Exq.Api.Server do
@moduledoc """
The API deals with getting current stats for the UI / API.
"""
alias Exq.Support.Config
alias Exq.Redis.JobQueue
alias Exq.Redis.JobStat
use GenServer
defmodule State do
defstruct redis: nil, namespace: nil
end
def start_link(opts \\ []) do
GenServer.start_link(__MODULE__, opts, name: server_name(opts[:name]))
end
## ===========================================================
## GenServer callbacks
## ===========================================================
def init(opts) do
{:ok, %State{redis: opts[:redis], namespace: opts[:namespace]}}
end
def handle_call(:processes, _from, state) do
processes = JobStat.processes(state.redis, state.namespace)
{:reply, {:ok, processes}, state}
end
def handle_call(:busy, _from, state) do
count = JobStat.busy(state.redis, state.namespace)
{:reply, {:ok, count}, state}
end
def handle_call(:nodes, _from, state) do
nodes = JobStat.nodes(state.redis, state.namespace)
{:reply, {:ok, nodes}, state}
end
def handle_call({:stats, key}, _from, state) do
count = JobStat.get_count(state.redis, state.namespace, key)
{:reply, {:ok, count}, state}
end
def handle_call({:stats, key, dates}, _from, state) do
counts = JobStat.get_counts(state.redis, state.namespace, Enum.map(dates, &"#{key}:#{&1}"))
{:reply, {:ok, counts}, state}
end
def handle_call(:queues, _from, state) do
queues = JobQueue.list_queues(state.redis, state.namespace)
{:reply, {:ok, queues}, state}
end
def handle_call({:failed, options}, _from, state) do
jobs = JobQueue.failed(state.redis, state.namespace, options)
{:reply, {:ok, jobs}, state}
end
def handle_call({:retries, options}, _from, state) do
jobs = JobQueue.scheduled_jobs(state.redis, state.namespace, "retry", options)
{:reply, {:ok, jobs}, state}
end
def handle_call(:jobs, _from, state) do
jobs = JobQueue.jobs(state.redis, state.namespace)
{:reply, {:ok, jobs}, state}
end
def handle_call({:jobs, :scheduled, options}, _from, state) do
jobs = JobQueue.scheduled_jobs(state.redis, state.namespace, "schedule", options)
{:reply, {:ok, jobs}, state}
end
def handle_call({:jobs, :scheduled_with_scores}, _from, state) do
jobs = JobQueue.scheduled_jobs_with_scores(state.redis, state.namespace, "schedule")
{:reply, {:ok, jobs}, state}
end
def handle_call({:jobs, queue, options}, _from, state) do
jobs = JobQueue.jobs(state.redis, state.namespace, queue, options)
{:reply, {:ok, jobs}, state}
end
def handle_call(:queue_size, _from, state) do
sizes = JobQueue.queue_size(state.redis, state.namespace)
{:reply, {:ok, sizes}, state}
end
def handle_call({:queue_size, queue}, _from, state) do
size = JobQueue.queue_size(state.redis, state.namespace, queue)
{:reply, {:ok, size}, state}
end
def handle_call(:scheduled_size, _from, state) do
size = JobQueue.scheduled_size(state.redis, state.namespace)
{:reply, {:ok, size}, state}
end
def handle_call(:retry_size, _from, state) do
size = JobQueue.retry_size(state.redis, state.namespace)
{:reply, {:ok, size}, state}
end
def handle_call(:failed_size, _from, state) do
size = JobQueue.failed_size(state.redis, state.namespace)
{:reply, {:ok, size}, state}
end
def handle_call({:find_failed, jid}, _from, state) do
{:ok, job} = JobStat.find_failed(state.redis, state.namespace, jid)
{:reply, {:ok, job}, state}
end
def handle_call({:find_failed, score, jid, options}, _from, state) do
{:ok, job} = JobStat.find_failed(state.redis, state.namespace, score, jid, options)
{:reply, {:ok, job}, state}
end
def handle_call({:find_job, queue, jid}, _from, state) do
response = JobQueue.find_job(state.redis, state.namespace, jid, queue)
{:reply, response, state}
end
def handle_call({:find_scheduled, jid}, _from, state) do
{:ok, job} = JobQueue.find_job(state.redis, state.namespace, jid, :scheduled)
{:reply, {:ok, job}, state}
end
def handle_call({:find_scheduled, score, jid, options}, _from, state) do
{:ok, job} = JobStat.find_scheduled(state.redis, state.namespace, score, jid, options)
{:reply, {:ok, job}, state}
end
def handle_call({:find_retry, jid}, _from, state) do
{:ok, job} = JobQueue.find_job(state.redis, state.namespace, jid, :retry)
{:reply, {:ok, job}, state}
end
def handle_call({:find_retry, score, jid, options}, _from, state) do
{:ok, job} = JobStat.find_retry(state.redis, state.namespace, score, jid, options)
{:reply, {:ok, job}, state}
end
def handle_call({:remove_queue, queue}, _from, state) do
JobStat.remove_queue(state.redis, state.namespace, queue)
{:reply, :ok, state}
end
def handle_call({:remove_job, queue, jid}, _from, state) do
JobQueue.remove_job(state.redis, state.namespace, queue, jid)
{:reply, :ok, state}
end
def handle_call({:remove_enqueued_jobs, queue, raw_jobs}, _from, state) do
JobQueue.remove_enqueued_jobs(state.redis, state.namespace, queue, raw_jobs)
{:reply, :ok, state}
end
def handle_call({:remove_retry, jid}, _from, state) do
JobQueue.remove_retry(state.redis, state.namespace, jid)
{:reply, :ok, state}
end
def handle_call({:remove_retry_jobs, raw_jobs}, _from, state) do
JobQueue.remove_retry_jobs(state.redis, state.namespace, raw_jobs)
{:reply, :ok, state}
end
def handle_call({:dequeue_retry_jobs, raw_jobs}, _from, state) do
result = JobQueue.dequeue_retry_jobs(state.redis, state.namespace, raw_jobs)
{:reply, result, state}
end
def handle_call({:remove_scheduled, jid}, _from, state) do
JobQueue.remove_scheduled(state.redis, state.namespace, jid)
{:reply, :ok, state}
end
def handle_call({:remove_scheduled_jobs, raw_jobs}, _from, state) do
JobQueue.remove_scheduled_jobs(state.redis, state.namespace, raw_jobs)
{:reply, :ok, state}
end
def handle_call({:dequeue_scheduled_jobs, raw_jobs}, _from, state) do
result = JobQueue.dequeue_scheduled_jobs(state.redis, state.namespace, raw_jobs)
{:reply, result, state}
end
def handle_call({:remove_failed, jid}, _from, state) do
JobStat.remove_failed(state.redis, state.namespace, jid)
{:reply, :ok, state}
end
def handle_call({:remove_failed_jobs, raw_jobs}, _from, state) do
JobQueue.remove_failed_jobs(state.redis, state.namespace, raw_jobs)
{:reply, :ok, state}
end
def handle_call(:clear_failed, _from, state) do
JobStat.clear_failed(state.redis, state.namespace)
{:reply, :ok, state}
end
def handle_call({:dequeue_failed_jobs, raw_jobs}, _from, state) do
result = JobQueue.dequeue_failed_jobs(state.redis, state.namespace, raw_jobs)
{:reply, result, state}
end
def handle_call(:clear_processes, _from, state) do
JobStat.clear_processes(state.redis, state.namespace)
{:reply, :ok, state}
end
def handle_call(:clear_scheduled, _from, state) do
JobQueue.delete_queue(state.redis, state.namespace, "schedule")
{:reply, :ok, state}
end
def handle_call(:clear_retries, _from, state) do
JobQueue.delete_queue(state.redis, state.namespace, "retry")
{:reply, :ok, state}
end
def handle_call(:realtime_stats, _from, state) do
{:ok, failures, successes} = JobStat.realtime_stats(state.redis, state.namespace)
{:reply, {:ok, failures, successes}, state}
end
def handle_call({:retry_job, jid}, _from, state) do
{:ok, job} = JobQueue.find_job(state.redis, state.namespace, jid, :retry)
JobQueue.retry_job(state.redis, state.namespace, job)
{:reply, :ok, state}
end
def terminate(_reason, _state) do
:ok
end
def server_name(name) do
name = name || Config.get(:name)
"#{name}.Api" |> String.to_atom()
end
end
|
lib/exq/api/server.ex
| 0.659515 | 0.448426 |
server.ex
|
starcoder
|
defmodule HL7.Composite.Spec do
@moduledoc "Macros and functions used to define HL7 composite fields"
@type option ::
{:separators, [{key :: atom, separator :: byte}]}
| {:trim, boolean}
@doc false
defmacro __using__(_) do
quote do
import unquote(__MODULE__)
end
end
@doc """
Macro that generates the code that allows a module to be used as a composite field for HL7
segments. A `composite` field definition looks like the following block:
composite do
component :number, type: :string
component :date, type: :date
component :source, type: :string
end
*Note*: when defining a composite, the fields have to be in the order they appear in the message.
"""
defmacro composite(do: components) do
caller_module = __CALLER__.module
quote do
Module.register_attribute(unquote(caller_module), :struct_fields, accumulate: true)
Module.register_attribute(unquote(caller_module), :components, accumulate: true)
@before_compile unquote(__MODULE__)
unquote(components)
end
end
@doc """
Macro that generates the code for each individual component within an HL7 composite field.
Each `component` definition looks like the following one:
component :price, type: :float
A `component` has a name that has to be an atom, a `type` and a `default` value. The default
`type` is `:string` and the default `value` is `""` for basic types and an empty struct for
composite types. The supported types are:
* `:string`
* `:integer`
* `:float`
* `:date`: a field containing a date as a `%Date{}` struct that is serialized using the
`YYYYMMDD` format.
* `:datetime`: a field containing a `%NaiveDateTime{}` struct that is serialized using the
`YYYYMMDD[hhmm[ss]]` format.
* an atom corresponding to a composite field's module name. The module must have been built
using the macros from the `HL7.Composite.Spec` module or following the behaviour of an
`HL7.Composite`.
"""
defmacro component(name, args \\ []) do
type = Keyword.get(args, :type, :string)
default = default_for(type, Keyword.get(args, :default))
quote bind_quoted: [name: name, type: type, default: default, module: __CALLER__.module] do
check_component!(name, type, default, module, Module.get_attribute(module, :components))
# Accumulate the components and fields of the struct that will be added to the module so
# that the corresponding functions can be generated in the __before_compile__ function.
@components {name, type}
@struct_fields {name, default}
end
end
defmacro __before_compile__(_env) do
composite_mod = __CALLER__.module
spec =
composite_mod
|> Module.get_attribute(:components)
|> Enum.reverse()
struct_fields =
composite_mod
|> Module.get_attribute(:struct_fields)
|> Enum.reverse()
quote do
defstruct unquote(Macro.escape(struct_fields))
@doc "Return the specification for the composite type."
@spec spec() :: [HL7.Composite.spec()]
def spec(), do: unquote(Macro.escape(spec))
end
end
def quote_base_type(:string), do: quote(context: Elixir, do: binary)
def quote_base_type(:integer), do: quote(context: Elixir, do: integer)
def quote_base_type(:float), do: quote(context: Elixir, do: float)
def quote_base_type(:date), do: quote(context: Elixir, do: Date.t())
def quote_base_type(:datetime), do: quote(context: Elixir, do: NaiveDateTime.t())
def quote_base_type(composite), do: quote(context: Elixir, do: unquote(base_type!(composite)).t)
@doc "Checks that a component definition is correct"
@spec check_component!(
name :: atom,
type :: atom,
default :: any,
module,
components :: [{name :: atom, type :: atom}]
) :: nil | no_return
def check_component!(name, type, default, module, components) do
check_type!(name, type)
check_default!(name, type, default)
unless List.keyfind(components, name, 0) === nil do
raise ArgumentError,
"component #{inspect(name)} is already present in composite '#{module}'"
end
end
@doc "Checks that the type of a component inside a composite field is valid"
def check_type!(name, type) do
unless check_type?(type) do
raise ArgumentError, "invalid type #{inspect(type)} in component #{inspect(name)}"
end
end
def check_type?(composite_type) when is_tuple(composite_type) do
composite_type
|> base_type()
|> check_type?()
end
def check_type?(type) do
check_base_type?(type) or composite_module?(type)
end
def check_base_type?(:string), do: true
def check_base_type?(:integer), do: true
def check_base_type?(:float), do: true
def check_base_type?(:date), do: true
def check_base_type?(:datetime), do: true
def check_base_type?(_type), do: false
@doc """
Function that receives the type used in a segment or composite field
definition and returns its basic type. If a composite type is passed, it will
navigate through its definition and return the basic type (i.e. `:string`;
`:integer`, `:float`; `:date`; `:datetime`) of the corresponding field.
It accepts both basic types and composite types, returning the atom
corresponding to the type or `nil` if the type is invalid.
## Examples
iex> alias HL7.Composite.Spec
...> alias HL7.Composite.Default.CX
...> Spec.base_type({CX, :assigning_authority, :universal_id})
:string
iex> Spec.base_type({CX, :effective_date})
:date
iex> Spec.base_type(:integer)
:integer
iex> Spec.base_type(:invalid_type)
nil
"""
def base_type(composite_type) when is_tuple(composite_type) do
composite_mod = elem(composite_type, 0)
if composite_module?(composite_mod) do
key = elem(composite_type, 1)
case List.keyfind(composite_mod.spec(), key, 0) do
{^key, type} when tuple_size(composite_type) === 2 ->
type
{^key, subcomposite_mod} when tuple_size(composite_type) === 3 ->
base_type({subcomposite_mod, elem(composite_type, 2)})
nil ->
nil
end
else
nil
end
end
def base_type(type)
when type === :string or type === :integer or type === :float or
type === :date or type === :datetime do
type
end
def base_type(_type) do
nil
end
def base_type!(type) do
case base_type(type) do
base_type when base_type !== nil ->
base_type
nil ->
raise ArgumentError, "invalid type #{inspect(type)}"
end
end
def composite_module?(module) when is_atom(module) do
case Code.ensure_compiled(module) do
{:module, module} -> function_exported?(module, :spec, 0)
_ -> false
end
end
def composite_module?(_), do: false
@doc "Checks that the default value assigned to a component inside a composite field is valid"
def check_default!(name, type, default) do
if check_default?(type, default) do
true
else
raise ArgumentError,
"invalid default value #{inspect(default)} for " <>
"#{type} component #{inspect(name)}"
end
end
def check_default?(_type, ""), do: true
def check_default?(:string, default), do: is_binary(default)
def check_default?(:integer, default), do: is_integer(default)
def check_default?(:float, default), do: is_float(default)
def check_default?(:date, date), do: is_date(date)
def check_default?(:datetime, datetime), do: is_datetime(datetime)
def check_default?(_type, _default), do: false
def default_for(_type, default) when is_nil(default) do
quote do: unquote("")
end
def default_for(_type, default) do
quote do: unquote(default)
end
def base_type?(:string), do: true
def base_type?(:integer), do: true
def base_type?(:float), do: true
def base_type?(:date), do: true
def base_type?(:datetime), do: true
def base_type?(_type), do: false
defp is_date(%Date{}), do: true
defp is_date(_date), do: false
defp is_datetime(%NaiveDateTime{}), do: true
defp is_datetime(_datetime), do: false
end
|
lib/ex_hl7/composite/spec.ex
| 0.89391 | 0.486088 |
spec.ex
|
starcoder
|
defmodule Tic_Toc do
@moduledoc """
This is the Tic Toc module.
This module is an implementation for Tic Toc game
It is a basic implementation to get knowledge about
tuples, if conditions and recursivity funtions.
How to use:
1.- The game ask you about the gamers names like this:
Input the name for first player with the X turn :
Input the name for second player with the O turn :
2.- The game shows the dashboard like This
Tic Toc Dashboard
Player One with X turn : Carolina
Player Two with O turn : Enrique
D E F
A 1 2 3
B 4 5 6
C 7 8 9
3.- The game asks to the player about will be the selection choice.
The gamer would selected ones number between 1 to 9
Input the number that you want play with X turn :
4.- The game will update the dashboard and turn on to the next player ans so on.
5.- The game will validates the dashboard status on every move to determinates the game result.
Author : <NAME>
Date : February 17 2018
Version : 0.2
"""
@doc """
start function is the begin of the program to start it.
Does
get the gamers name
setup the tuple that represent an initial dashboard
start the game
Returns `:ok` when the program has been finished.
"""
def start do
play_list_tuple = {1,2,3,4,5,6,7,8,9}
player_turn_initial= "X"
gamers_name = players_name()
play_list_tuple |> dashboard(gamers_name)
gamers_name |> play(play_list_tuple,player_turn_initial)
end
defp play(player_turn,name_players) do
case player_turn do
"X" -> IO.puts "The winner is : #{elem(name_players,0)}"
"O" -> IO.puts "The winner is : #{elem(name_players,1)}"
_ -> "Unknow player name"
end
end
@doc """
The play function is a recursive function to play the game.
Output Call functions to:
play(player_name) function to declare a winner.
or
the_game_is_tie() function if the game is tie.
"""
defp play(name_players,play_list_tuple, player_turn) do
# To validate if the board represent a tie
if (is_tie(play_list_tuple, tuple_size(play_list_tuple))) do
the_game_is_tie()
else
index = get_only_numbers(1,9,"Input the number that you want play with #{player_turn} turn : ")
dashboard_updated = position_is_free_to_play(play_list_tuple,index - 1,player_turn)
dashboard_updated |> dashboard(name_players)
if dashboard_updated |> validate_all_win_rules(player_turn) do
# to Show the winner
play(player_turn,name_players)
else
# Changing turn to the player
play(name_players,dashboard_updated, switch_user(player_turn))
end
end # End from game is Tie
end
defp switch_user(play_sign) do
if play_sign == "X", do: "O", else: "X"
end
@doc """
validate_all_win_rules function is useful to determinate if the gamer is the winner.
Returns
true if at least condition is a win rule.
false if there is not a win rule.
"""
defp validate_all_win_rules(play_list_tuple,player_turn) do
cond do
elem(play_list_tuple,0)== player_turn and elem(play_list_tuple,1) == player_turn and elem(play_list_tuple,2) == player_turn -> true
elem(play_list_tuple,3)== player_turn and elem(play_list_tuple,4) == player_turn and elem(play_list_tuple,5) == player_turn -> true
elem(play_list_tuple,6)== player_turn and elem(play_list_tuple,7) == player_turn and elem(play_list_tuple,8) == player_turn -> true
elem(play_list_tuple,0)== player_turn and elem(play_list_tuple,3) == player_turn and elem(play_list_tuple,6) == player_turn -> true
elem(play_list_tuple,1)== player_turn and elem(play_list_tuple,4) == player_turn and elem(play_list_tuple,7) == player_turn -> true
elem(play_list_tuple,2)== player_turn and elem(play_list_tuple,5) == player_turn and elem(play_list_tuple,8) == player_turn -> true
elem(play_list_tuple,0)== player_turn and elem(play_list_tuple,4) == player_turn and elem(play_list_tuple,8) == player_turn -> true
elem(play_list_tuple,2)== player_turn and elem(play_list_tuple,4) == player_turn and elem(play_list_tuple,6) == player_turn -> true
true -> false
end
end
@doc """
players_name function is useful to get the names of the gamers.
Returns
a tuple with both game names
"""
defp players_name() do
player1 = IO.gets("Input the name for first player with the X turn : ")
player2 = IO.gets("Input the name for second player with the O turn : ")
{player1,player2}
end
@doc """
dashboard function is useful to print on console the dashboard .
Printing at console the dashboard like :
Tic Toc Dashboard
Player One with X turn : Carolina
Player Two with O turn : Enrique
D E F
A 1 2 3
B 4 5 6
C 7 8 9
"""
defp dashboard(play_list_tuple, name_players) do
IO.puts "Tic Toc Dashboard"
IO.puts "Player One with X turn : #{elem(name_players,0)} "
IO.puts "Player Two with O turn : #{elem(name_players,1)} "
IO.puts ""
IO.puts " D E F"
IO.puts "A #{elem(play_list_tuple,0)} #{elem(play_list_tuple,1)} #{elem(play_list_tuple,2)}"
IO.puts "B #{elem(play_list_tuple,3)} #{elem(play_list_tuple,4)} #{elem(play_list_tuple,5)}"
IO.puts "C #{elem(play_list_tuple,6)} #{elem(play_list_tuple,7)} #{elem(play_list_tuple,8)}"
end
@doc """
This function validates if the position to move is allow and update the tuple
if the move is not free the tupla is not updated
"""
defp position_is_free_to_play(play_list_tuple, index, turn_sign) do
if elem(play_list_tuple,index) != "X" and elem(play_list_tuple,index) != "O" do
tuple_deleted = Tuple.delete_at(play_list_tuple,index)
tuple_played = Tuple.insert_at(tuple_deleted,index , turn_sign)
tuple_played
else
play_list_tuple
end
end
defp the_game_is_tie() do
IO.puts "The game is tie "
end
defp is_tie(play_list_tuple, 0) do
true
end
@doc """
is_tie(play_list_tuple, index) is a function to determinate if the game is tie.
This function is called every move.
It function get values from the dashboard tuple and compare with "X" or "O" sign
if all values of dashboard tuple are "X" or "O" the game wiil be tie.
Returns
true if all values are "X" or "O"
false if this is at least one value different of "X" or "O"
"""
defp is_tie(play_list_tuple, index) do
if elem(play_list_tuple, index - 1 ) != "X" and elem(play_list_tuple, index - 1) != "O" do
false
else
is_tie(play_list_tuple, index - 1)
end
end
defp get_only_numbers(num) do
num
end
@doc """
get_only_numbers(min,max,message) is a recursive function to get only numbers from the console input device.
Receives
This function receives a three parameter values
min - minimum value of the range specified
max - maximum value of the range specified
message - message to display to the user on console
Returns
It return just a valid number between a range specified from parameters.
"""
defp get_only_numbers(min,max,message) do
ent = IO.gets message
case Integer.parse(ent) do
:error -> get_only_numbers(min,max,message)
{num,_} -> if min <= num and num <= max , do: get_only_numbers(num), else: get_only_numbers(min,max,message)
end
end
end # End defmodule
|
tic_toc_v2.ex
| 0.60964 | 0.651258 |
tic_toc_v2.ex
|
starcoder
|
defmodule Elsa.Consumer.Worker do
@moduledoc """
Defines the worker GenServer that is managed by the DynamicSupervisor.
Workers are instantiated and assigned to a specific topic/partition
and process messages according to the specified message handler module
passed in from the manager before calling the ack function to
notify the cluster the messages have been successfully processed.
"""
use GenServer, restart: :temporary
require Logger
import Elsa.Supervisor, only: [registry: 1]
import Record, only: [defrecord: 2, extract: 2]
defrecord :kafka_message_set, extract(:kafka_message_set, from_lib: "brod/include/brod.hrl")
@subscribe_delay 200
@subscribe_retries 20
@start_failure_delay 5_000
defmodule State do
@moduledoc """
The running state of the worker process.
"""
defstruct [
:connection,
:topic,
:partition,
:generation_id,
:offset,
:subscriber_pid,
:handler,
:handler_init_args,
:handler_state,
:config
]
end
@doc """
Trigger the worker to gracefully disengage itself
from the supervision tree, unsubscribe from the topic
and partition and initiate its own stop sequence.
"""
@spec unsubscribe(pid()) :: {:stop, :normal, term(), struct()}
def unsubscribe(pid) do
GenServer.call(pid, :unsubscribe)
end
@doc """
Start the worker process and init the state with the given config.
"""
def start_link(init_args) do
GenServer.start_link(__MODULE__, init_args)
end
def init(init_args) do
state = %State{
connection: Keyword.fetch!(init_args, :connection),
topic: Keyword.fetch!(init_args, :topic),
partition: Keyword.fetch!(init_args, :partition),
generation_id: Keyword.get(init_args, :generation_id),
offset: Keyword.fetch!(init_args, :begin_offset),
handler: Keyword.fetch!(init_args, :handler),
handler_init_args: Keyword.get(init_args, :handler_init_args, []),
config: Keyword.get(init_args, :config, [])
}
Process.put(:elsa_connection, state.connection)
Process.put(:elsa_topic, state.topic)
Process.put(:elsa_partition, state.partition)
Process.put(:elsa_generation_id, state.generation_id)
Elsa.Registry.register_name({registry(state.connection), :"worker_#{state.topic}_#{state.partition}"}, self())
{:ok, handler_state} = state.handler.init(state.handler_init_args)
{:ok, %{state | handler_state: handler_state}, {:continue, :subscribe}}
end
def handle_continue(:subscribe, state) do
with {:ok, pid} <- subscribe(state) do
Process.monitor(pid)
{:noreply, %{state | subscriber_pid: pid}}
else
{:error, reason} ->
Logger.warn(
"Unable to subscribe to topic/partition/offset(#{state.topic}/#{state.partition}/#{state.offset}), reason #{
inspect(reason)
}"
)
Process.sleep(@start_failure_delay)
{:stop, reason, state}
end
end
def handle_info({_consumer_pid, kafka_message_set(topic: topic, partition: partition, messages: messages)}, state) do
transformed_messages = transform_messages(topic, partition, messages, state)
case send_messages_to_handler(transformed_messages, state) do
{ack, new_handler_state} when ack in [:ack, :acknowledge] ->
offset = transformed_messages |> List.last() |> Map.get(:offset)
ack_messages(topic, partition, offset, state)
{:noreply, %{state | offset: offset, handler_state: new_handler_state}}
{ack, offset, new_handler_state} when ack in [:ack, :acknowledge] ->
ack_messages(topic, partition, offset, state)
{:noreply, %{state | offset: offset, handler_state: new_handler_state}}
{no_ack, new_handler_state} when no_ack in [:no_ack, :noop] ->
{:noreply, %{state | handler_state: new_handler_state}}
{:continue, new_handler_state} ->
offset = transformed_messages |> List.last() |> Map.get(:offset)
:ok = Elsa.Consumer.ack(state.connection, topic, partition, offset)
{:noreply, %{state | handler_state: new_handler_state}}
end
end
def handle_info({:DOWN, _ref, :process, _pid, _message}, state) do
{:stop, :brod_consumer_stopped, state}
end
def handle_call(:unsubscribe, _from, state) do
result = :brod.unsubscribe(state.connection, state.topic, state.partition)
{:stop, :normal, result, state}
end
defp transform_messages(topic, partition, messages, state) do
Enum.map(messages, &Elsa.Message.new(&1, topic: topic, partition: partition, generation_id: state.generation_id))
end
defp send_messages_to_handler(messages, state) do
state.handler.handle_messages(messages, state.handler_state)
end
defp ack_messages(topic, partition, offset, %{generation_id: nil} = state) do
Elsa.Consumer.ack(state.connection, topic, partition, offset)
end
defp ack_messages(topic, partition, offset, state) do
Elsa.Group.Manager.ack(state.connection, topic, partition, state.generation_id, offset)
offset
end
defp subscribe(state, retries \\ @subscribe_retries)
defp subscribe(_state, 0) do
{:error, :failed_subscription}
end
defp subscribe(state, retries) do
opts = determine_subscriber_opts(state)
case Elsa.Consumer.subscribe(state.connection, state.topic, state.partition, opts) do
{:error, reason} ->
Logger.warn(
"Retrying to subscribe to topic #{state.topic} parition #{state.partition} offset #{state.offset} reason #{
inspect(reason)
}"
)
Process.sleep(@subscribe_delay)
subscribe(state, retries - 1)
{:ok, consumer_pid} ->
Logger.info("Subscribing to topic #{state.topic} partition #{state.partition} offset #{state.offset}")
{:ok, consumer_pid}
end
end
defp determine_subscriber_opts(state) do
begin_offset =
case state.offset do
:undefined ->
Keyword.get(state.config, :begin_offset, :latest)
offset ->
offset
end
Keyword.put(state.config, :begin_offset, begin_offset)
end
end
|
lib/elsa/consumer/worker.ex
| 0.72331 | 0.509764 |
worker.ex
|
starcoder
|
defmodule Exkismet.Api do
@moduledoc """
Provides a simple method for calling the Akismet API. To use, be sure to set
your hostname, and api key by adding the following line in config.exs:
`config :exkismet, key: "<your api key>", blog: "http://yourhostname.com"`
"""
@doc """
Validates your API key with akismet.com. If this fails, it's probably because
you haven't set your API key in config.exs, like so:
`config :exkismet, key: "<your api key>", blog: "http://yourhostname.com" `
"""
def verify do
case Exkismet.Service.verify do
%{body: "valid"} -> :valid
error -> {:invalid, error}
end
end
@doc """
Checks a comment with Akismet. Takes a map of meta data about the comment,
using the following keys:
```
is_test: <true> if you're testing, leave it out otherwise Keeps Akismet from
using this message for training,
```
These attributes are required.
```
blog: "http://<yourhostname>.com",
user_agent: the user agent of the *commenter*
user_ip: the ip of the commenter
```
The following are optional, but the more you have the better it works.
```
referrer: "http://google.com",
blog_charset: "UTF-8", // character set of the comment
comment_post_modified_gmt: "2015-12-30T10:40:10.047-0500", time that the blogpost
was updated in UTC, ISO8601 format.
comment_date_gmt: "2015-12-30T10:41:28.448-0500", time the comment was created
in UTC, ISO8601 format
comment_content: <the comment itself>,
comment_author_url: <the authors URL>,
comment_author_email: "<EMAIL>",
comment_author: "viagra-test-123",
comment_type: "comment", (other types include tracbacks, etc)
permalink: "http://127.0.0.1/my_blog_post",
```
"""
def comment_check(comment) when is_map(comment) do
case Exkismet.Service.comment_check(comment) do
%{headers: %{"X-akismet-pro-tip" => "discard"}} -> :discard
%{body: "true"} -> :spam
%{body: "false"} -> :ham
error -> {:error, error}
end
end
@doc """
Report a comment as being spam. Uses the same fields as described in
Exkismet.Api.comment_check
"""
def submit_spam(comment) when is_map(comment) do
case Exkismet.Service.submit_spam(comment) do
%{body: "Thanks" <> _} -> :ok
error -> {:error, error}
end
end
@doc """
Report a comment as being ham (not spam). Uses the same fields as described in
Exkismet.Api.comment_check
"""
def submit_ham(comment) when is_map(comment) do
case Exkismet.Service.submit_ham do
%{body: "Thanks" <> _} -> :ok
error -> {:error, error}
end
end
end
|
lib/exkismet/api.ex
| 0.660939 | 0.59564 |
api.ex
|
starcoder
|
defmodule AWS.CodeStarConnections do
@moduledoc """
AWS CodeStar Connections
The CodeStar Connections feature is in preview release and is subject to change.
This AWS CodeStar Connections API Reference provides descriptions and usage
examples of the operations and data types for the AWS CodeStar Connections API.
You can use the connections API to work with connections and installations.
*Connections* are configurations that you use to connect AWS resources to
external code repositories. Each connection is a resource that can be given to
services such as CodePipeline to connect to a third-party repository such as
Bitbucket. For example, you can add the connection in CodePipeline so that it
triggers your pipeline when a code change is made to your third-party code
repository. Each connection is named and associated with a unique ARN that is
used to reference the connection.
When you create a connection, the console initiates a third-party connection
handshake. *Installations* are the apps that are used to conduct this handshake.
For example, the installation for the Bitbucket provider type is the Bitbucket
Cloud app. When you create a connection, you can choose an existing installation
or create one.
When you want to create a connection to an installed provider type such as
GitHub Enterprise Server, you create a *host* for your connections.
You can work with connections by calling:
* `CreateConnection`, which creates a uniquely named connection that
can be referenced by services such as CodePipeline.
* `DeleteConnection`, which deletes the specified connection.
* `GetConnection`, which returns information about the connection,
including the connection status.
* `ListConnections`, which lists the connections associated with
your account.
You can work with hosts by calling:
* `CreateHost`, which creates a host that represents the
infrastructure where your provider is installed.
* `DeleteHost`, which deletes the specified host.
* `GetHost`, which returns information about the host, including the
setup status.
* `ListHosts`, which lists the hosts associated with your account.
You can work with tags in AWS CodeStar Connections by calling the following:
* `ListTagsForResource`, which gets information about AWS tags for a
specified Amazon Resource Name (ARN) in AWS CodeStar Connections.
* `TagResource`, which adds or updates tags for a resource in AWS
CodeStar Connections.
* `UntagResource`, which removes tags for a resource in AWS CodeStar
Connections.
For information about how to use AWS CodeStar Connections, see the [Developer Tools User
Guide](https://docs.aws.amazon.com/dtconsole/latest/userguide/welcome-connections.html).
"""
@doc """
Creates a connection that can then be given to other AWS services like
CodePipeline so that it can access third-party code repositories.
The connection is in pending status until the third-party connection handshake
is completed from the console.
"""
def create_connection(client, input, options \\ []) do
request(client, "CreateConnection", input, options)
end
@doc """
Creates a resource that represents the infrastructure where a third-party
provider is installed.
The host is used when you create connections to an installed third-party
provider type, such as GitHub Enterprise Server. You create one host for all
connections to that provider.
A host created through the CLI or the SDK is in `PENDING` status by default. You
can make its status `AVAILABLE` by setting up the host in the console.
"""
def create_host(client, input, options \\ []) do
request(client, "CreateHost", input, options)
end
@doc """
The connection to be deleted.
"""
def delete_connection(client, input, options \\ []) do
request(client, "DeleteConnection", input, options)
end
@doc """
The host to be deleted.
Before you delete a host, all connections associated to the host must be
deleted.
A host cannot be deleted if it is in the VPC_CONFIG_INITIALIZING or
VPC_CONFIG_DELETING state.
"""
def delete_host(client, input, options \\ []) do
request(client, "DeleteHost", input, options)
end
@doc """
Returns the connection ARN and details such as status, owner, and provider type.
"""
def get_connection(client, input, options \\ []) do
request(client, "GetConnection", input, options)
end
@doc """
Returns the host ARN and details such as status, provider type, endpoint, and,
if applicable, the VPC configuration.
"""
def get_host(client, input, options \\ []) do
request(client, "GetHost", input, options)
end
@doc """
Lists the connections associated with your account.
"""
def list_connections(client, input, options \\ []) do
request(client, "ListConnections", input, options)
end
@doc """
Lists the hosts associated with your account.
"""
def list_hosts(client, input, options \\ []) do
request(client, "ListHosts", input, options)
end
@doc """
Gets the set of key-value pairs (metadata) that are used to manage the resource.
"""
def list_tags_for_resource(client, input, options \\ []) do
request(client, "ListTagsForResource", input, options)
end
@doc """
Adds to or modifies the tags of the given resource.
Tags are metadata that can be used to manage a resource.
"""
def tag_resource(client, input, options \\ []) do
request(client, "TagResource", input, options)
end
@doc """
Removes tags from an AWS resource.
"""
def untag_resource(client, input, options \\ []) do
request(client, "UntagResource", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "codestar-connections"}
host = build_host("codestar-connections", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.0"},
{"X-Amz-Target", "com.amazonaws.codestar.connections.CodeStar_connections_20191201.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/code_star_connections.ex
| 0.883387 | 0.51379 |
code_star_connections.ex
|
starcoder
|
defmodule Mogrify do
use Mogrify.Compat
alias Mogrify.Compat
alias Mogrify.Image
alias Mogrify.Option
@doc """
Opens image source.
"""
def open(path) do
path = Path.expand(path)
unless File.regular?(path), do: raise(File.Error)
%Image{path: path, ext: Path.extname(path)}
end
@doc """
Saves modified image.
## Options
* `:path` - The output path of the image. Defaults to a temporary file.
* `:in_place` - Overwrite the original image, ignoring `:path` option. Default `false`.
"""
def save(image, opts \\ []) do
cmd_opts = [stderr_to_stdout: true]
if opts[:in_place] do
final_output_path = if image.dirty[:path], do: image.dirty[:path], else: image.path
args = arguments_for_saving_in_place(image)
{_, 0} = cmd_mogrify(args, cmd_opts)
image_after_command(image, final_output_path)
else
cmd_output_path = output_path_for(image, opts)
final_output_path = Keyword.get(opts, :path, cmd_output_path)
create_folder_if_doesnt_exist!(cmd_output_path)
create_folder_if_doesnt_exist!(final_output_path)
args = arguments_for_saving(image, cmd_output_path)
{_, 0} = cmd_convert(args, cmd_opts)
# final output path may differ if temporary path was used for image format
if cmd_output_path != final_output_path do
# copy then rm, because File.rename/2 may fail across filesystem boundary
File.copy!(cmd_output_path, final_output_path)
File.rm!(cmd_output_path)
end
image_after_command(image, final_output_path)
end
end
@doc """
Creates or saves image.
Uses the `convert` command, which accepts both existing images, or image
operators. If you have an existing image, prefer save/2.
## Options
* `:path` - The output path of the image. Defaults to a temporary file.
* `:in_place` - Overwrite the original image, ignoring `:path` option. Default `false`.
* `:buffer` - Pass `true` to write to Collectable in Image.buffer instead of file.
* `:into` - Used with `:buffer` to specify a Collectable. Defaults to `""`. See `System.cmd/3`.
"""
def create(image, opts \\ []) do
if opts[:buffer] do
cmd_opts = [stderr_to_stdout: false]
cmd_opts = if opts[:into], do: cmd_opts ++ [into: opts[:into]], else: cmd_opts
{image_collectable, 0} = cmd_convert(arguments(image), cmd_opts)
image_after_buffer_command(image, image_collectable)
else
cmd_opts = [stderr_to_stdout: true]
output_path = output_path_for(image, opts)
create_folder_if_doesnt_exist!(output_path)
{_, 0} = cmd_convert(arguments_for_creating(image, output_path), cmd_opts)
image_after_command(image, output_path)
end
end
@doc """
Returns the histogram of the image
Runs ImageMagick's `histogram:info:-` command.
Results are returned as a list of maps where each map includes keys red,
blue, green, hex and count.
## Examples
iex> open("test/fixtures/rbgw.png") |> histogram
[
%{"alpha" => 255, "blue" => 255, "count" => 400, "green" => 0, "hex" => "#0000ff", "red" => 0},
%{"alpha" => 255, "blue" => 0, "count" => 225, "green" => 255, "hex" => "#00ff00", "red" => 0},
%{"alpha" => 255, "blue" => 0, "count" => 525, "green" => 0, "hex" => "#ff0000", "red" => 255},
%{"alpha" => 255, "blue" => 255, "count" => 1350, "green" => 255, "hex" => "#ffffff", "red" => 255}
]
"""
def histogram(image) do
img = image |> custom("format", "%c")
args = arguments(img) ++ [image.path, "histogram:info:-"]
res = cmd_convert(args, stderr_to_stdout: false)
res
|> elem(0)
|> process_histogram_output
end
defp image_after_command(image, output_path) do
format = Map.get(image.dirty, :format, image.format)
%{
clear_operations(image)
| path: output_path,
ext: Path.extname(output_path),
format: format
}
end
defp image_after_buffer_command(image, image_collectable) do
%{
clear_operations(image)
| buffer: image_collectable
}
end
defp clear_operations(image) do
%{image | operations: [], dirty: %{}}
end
defp cleanse_histogram(hist) do
hist
|> Enum.into(%{}, &clean_histogram_entry/1)
end
defp clean_histogram_entry({"hex", v}), do: {"hex", v}
defp clean_histogram_entry({"alpha", ""}), do: {"alpha", 255}
defp clean_histogram_entry({k, ""}), do: {k, 0}
defp clean_histogram_entry({k, v}), do: {k, v |> Float.parse() |> elem(0) |> Float.round(0) |> trunc}
def extract_histogram_data(entry) do
~r/^\s+(?<count>\d+):\s+\((?<red>[\d(?:\.\d+)?)\s]+),(?<green>[\d(?:\.\d+)?)\s]+),(?<blue>[\d(?:\.\d+)?)\s]+)(,(?<alpha>[\d(?:\.\d+)?)\s]+))?\)\s+(?<hex>\#[abcdef\d]{6,8})\s+/i
|> Regex.named_captures(entry)
|> Enum.map(fn {k, v} -> {k, v |> Compat.string_trim()} end)
|> cleanse_histogram
end
defp process_histogram_output(histogram_output) do
histogram_output
|> String.split("\n")
|> Enum.reject(fn s -> s |> String.length() == 0 end)
|> Enum.map(&extract_histogram_data/1)
end
defp output_path_for(image, save_opts) do
cond do
save_opts[:in_place] -> image.path
image.dirty[:path] -> temporary_path_for(image) # temp file to ensure image format applied
save_opts[:path] -> save_opts[:path]
true -> temporary_path_for(image)
end
end
# used with `convert`
defp arguments_for_saving(image, path) do
[image.path] ++ arguments(image) ++ [path]
end
# used with `mogrify`
defp arguments_for_saving_in_place(image) do
arguments(image) ++ [image.path]
end
defp arguments_for_creating(image, path) do
basename = if image.path, do: Path.basename(image.path), else: Path.basename(path)
base_arguments = [Path.join(Path.dirname(path), basename)]
arguments(image) ++ base_arguments
end
defp arguments(image) do
Enum.flat_map(image.operations, &normalize_arguments/1)
end
defp normalize_arguments({:image_operator, params}), do: ~w(#{params})
defp normalize_arguments({"annotate", params}),
do: ["-annotate"] ++ String.split(params, " ", parts: 2)
defp normalize_arguments({"histogram:" <> option, nil}), do: ["histogram:#{option}"]
defp normalize_arguments({"pango", params}), do: ["pango:#{params}"]
defp normalize_arguments({"stdout", params}), do: ["#{params}"]
defp normalize_arguments({"plasma", params}), do: ["plasma:#{params}"]
defp normalize_arguments({"canvas", params}), do: ["canvas:#{params}"]
defp normalize_arguments({"+" <> option, nil}), do: ["+#{option}"]
defp normalize_arguments({"-" <> option, nil}), do: ["-#{option}"]
defp normalize_arguments({option, nil}), do: ["-#{option}"]
defp normalize_arguments({"+" <> option, params}), do: ["+#{option}", to_string(params)]
defp normalize_arguments({"-" <> option, params}), do: ["-#{option}", to_string(params)]
defp normalize_arguments({option, params}), do: ["-#{option}", to_string(params)]
@doc """
Makes a copy of original image.
"""
def copy(image) do
temp = temporary_path_for(image)
File.cp!(image.path, temp)
Map.put(image, :path, temp)
end
def temporary_path_for(%{dirty: %{path: dirty_path}} = _image) do
do_temporary_path_for(dirty_path)
end
def temporary_path_for(%{path: path} = _image) do
do_temporary_path_for(path)
end
defp do_temporary_path_for(path) do
name = if path, do: Path.basename(path), else: Compat.rand_uniform(999_999)
random = Compat.rand_uniform(999_999)
Path.join(System.tmp_dir(), "#{random}-#{name}")
end
@doc """
Provides detailed information about the image.
This corresponds to the `mogrify -verbose` output which is similar to `identify`.
It does NOT correspond to `identify -verbose` which prints out much more information.
"""
def verbose(image) do
Map.merge(image, identify(image.path))
end
@doc """
Provides "identify" information about an image.
"""
def identify(file_path) do
args = [file_path]
{output, 0} = cmd_identify(args, stderr_to_stdout: false)
output
|> image_information_string_to_map()
|> put_frame_count(output)
end
defp image_information_string_to_map(image_information_string) do
~r/\b(?<animated>\[0])? (?<format>\S+) (?<width>\d+)x(?<height>\d+)/
|> Regex.named_captures(image_information_string)
|> Enum.map(&normalize_verbose_term/1)
|> Enum.into(%{})
end
defp normalize_verbose_term({"animated", "[0]"}), do: {:animated, true}
defp normalize_verbose_term({"animated", ""}), do: {:animated, false}
defp normalize_verbose_term({key, value}) when key in ["width", "height"] do
{String.to_atom(key), String.to_integer(value)}
end
defp normalize_verbose_term({key, value}), do: {String.to_atom(key), String.downcase(value)}
defp put_frame_count(%{animated: false} = map, _),
do: Map.put(map, :frame_count, 1)
defp put_frame_count(map, text) do
# skip the [0] lines which may be duplicated
matches = Regex.scan(~r/\b\[[1-9][0-9]*] \S+ \d+x\d+/, text)
# add 1 for the skipped [0] frame
frame_count = length(matches) + 1
Map.put(map, :frame_count, frame_count)
end
@doc """
Converts the image to the image format you specify.
"""
def format(image, format) do
downcase_format = String.downcase(format)
ext = ".#{downcase_format}"
rootname = Path.rootname(image.path, image.ext)
%{
image
| operations: image.operations ++ [format: format],
dirty: %{path: "#{rootname}#{ext}", format: downcase_format, ext: ext}
|> Enum.into(image.dirty)
}
end
@doc """
Resizes the image with provided geometry.
"""
def resize(image, params) do
%{image | operations: image.operations ++ [resize: params]}
end
@doc """
Changes quality of the image to desired quality.
"""
def quality(image, params) do
%{image | operations: image.operations ++ [quality: params]}
end
@doc """
Extends the image to the specified dimensions.
"""
def extent(image, params) do
%{image | operations: image.operations ++ [extent: params]}
end
@doc """
Sets the gravity of the image.
"""
def gravity(image, params) do
%{image | operations: image.operations ++ [gravity: params]}
end
@doc """
Resize the image to fit within the specified dimensions while retaining
the original aspect ratio.
Will only resize the image if it is larger than the specified dimensions. The
resulting image may be shorter or narrower than specified in the smaller
dimension but will not be larger than the specified values.
"""
def resize_to_limit(image, params) do
resize(image, "#{params}>")
end
@doc """
Resize the image to fit within the specified dimensions while retaining
the aspect ratio of the original image.
If necessary, crop the image in the larger dimension.
"""
def resize_to_fill(image, params) do
[_, width, height] = Regex.run(~r/(\d+)x(\d+)/, params)
image = Mogrify.verbose(image)
{width, _} = Float.parse(width)
{height, _} = Float.parse(height)
cols = image.width
rows = image.height
if width != cols || height != rows do
# .to_f
scale_x = width / cols
# .to_f
scale_y = height / rows
larger_scale = max(scale_x, scale_y)
cols = (larger_scale * (cols + 0.5)) |> Float.round()
rows = (larger_scale * (rows + 0.5)) |> Float.round()
image = resize(image, if(scale_x >= scale_y, do: "#{cols}", else: "x#{rows}"))
if width != cols || height != rows do
extent(image, params)
else
image
end
else
image
end
end
def auto_orient(image) do
%{image | operations: image.operations ++ ["auto-orient": nil]}
end
def canvas(image, color) do
image_operator(image, "xc:#{color}")
end
def add_option(image, option) do
validate_option!(option)
custom(image, option.name, option.argument)
end
def custom(image, action, options \\ nil) do
%{image | operations: image.operations ++ [{action, options}]}
end
def image_operator(image, operator) do
%{image | operations: image.operations ++ [{:image_operator, operator}]}
end
defp valid_option?(%Option{require_arg: true, argument: nil}), do: false
defp valid_option?(_), do: true
defp validate_option!(%Option{name: name} = option) do
if valid_option?(option) do
option
else
[prefix, leading] = extract_prefix_and_leading(name)
option_name = name |> String.replace_leading(leading, "") |> String.replace("-", "_")
raise ArgumentError,
message:
"the option #{option_name} need arguments. Be sure to pass arguments to option_#{prefix}#{
option_name
}(arg)"
end
end
defp extract_prefix_and_leading(name) do
if String.contains?(name, "+") do
["plus_", "+"]
else
["", "-"]
end
end
defp cmd_magick(tool, args, opts) do
{command, additional_args} = command_options(tool)
System.cmd(command, additional_args ++ args, opts)
rescue
e in [ErlangError] ->
if e.original == :enoent do
raise "missing prerequisite: '#{tool}'"
else
reraise e, __STACKTRACE__
end
end
defp cmd_mogrify(args, opts), do: cmd_magick(:mogrify, args, opts)
defp cmd_identify(args, opts), do: cmd_magick(:identify, args, opts)
@doc false
def cmd_convert(args, opts), do: cmd_magick(:convert, args, opts)
defp create_folder_if_doesnt_exist!(path) do
path |> Path.dirname() |> File.mkdir_p!()
end
defp command_options(command) do
config = Application.get_env(:mogrify, :"#{command}_command", [])
path = Keyword.get(config, :path)
args = Keyword.get(config, :args, [])
if path do
{path, args}
else
default_command(command)
end
end
defp default_command(command) do
case :os.type() do
{:win32, _} -> {"magick", ["#{command}"]}
_ -> {"#{command}", []}
end
end
end
|
lib/mogrify.ex
| 0.806358 | 0.438966 |
mogrify.ex
|
starcoder
|
defmodule ConduitAMQP do
@moduledoc """
AMQP adapter for Conduit.
* `url` - Full connection url. Can be used instead of the individual connection options. Default is `amqp://guest:guest@localhost:5672/`.
* `host` - Hostname of the broker (defaults to \"localhost\");
* `port` - Port the broker is listening on (defaults to `5672`);
* `username` - Username to connect to the broker as (defaults to \"guest\");
* `password` - Password to connect to the broker with (defaults to \"guest\");
* `virtual_host` - Name of a virtual host in the broker (defaults to \"/\");
* `heartbeat` - Hearbeat interval in seconds (defaults to `0` - turned off);
* `connection_timeout` - Connection timeout in milliseconds (defaults to `infinity`);
* `conn_pool_size` - Number of active connections to the broker
* `pub_pool_size` - Number of publisher channels
* `options` - Extra RabbitMQ options
"""
use Conduit.Adapter
use Supervisor
use AMQP
require Logger
alias ConduitAMQP.Meta
alias ConduitAMQP.Util
@type broker :: module
@type chan :: AMQP.Channel.t()
@type conn :: AMQP.Connection.t()
@pool_size 5
def child_spec([broker, _, _, _] = args) do
%{
id: name(broker),
start: {__MODULE__, :start_link, args},
type: :supervisor
}
end
def start_link(broker, topology, subscribers, opts) do
Meta.create(broker)
Supervisor.start_link(__MODULE__, [broker, topology, subscribers, opts], name: name(broker))
end
def init([broker, topology, subscribers, opts]) do
Logger.info("AMQP Adapter started!")
children = [
{ConduitAMQP.ConnPool, [broker, opts]},
{ConduitAMQP.PubPool, [broker, opts]},
{ConduitAMQP.SubPool, [broker, subscribers, opts]},
{ConduitAMQP.Tasks, [broker]},
{ConduitAMQP.Setup, [broker, topology]}
]
Supervisor.init(children, strategy: :one_for_one)
end
def name(broker) do
Module.concat(broker, Adapter)
end
# TODO: Remove when conduit goes to 1.0
# Conduit will never call this if publish/4 is defined
def publish(message, _config, _opts) do
{:ok, message}
end
def publish(broker, message, _config, opts) do
exchange = Keyword.get(opts, :exchange)
props = ConduitAMQP.Props.get(message)
{publisher_confirms, opts} =
Keyword.pop(
opts,
:publisher_confirms,
:no_confirmation
)
{publisher_confirms_timeout, _opts} =
Keyword.pop(
opts,
:publisher_confirms_timeout,
:infinity
)
select = fn chan ->
if publisher_confirms in [:wait, :die] do
Confirm.select(chan)
else
:ok
end
end
wait_for_confirms = fn chan ->
case {publisher_confirms, publisher_confirms_timeout} do
{:no_confirmation, _} -> true
{:wait, :infinity} -> Confirm.wait_for_confirms(chan)
{:wait, timeout} -> Confirm.wait_for_confirms(chan, timeout)
{:die, :infinity} -> Confirm.wait_for_confirms_or_die(chan)
{:die, timeout} -> Confirm.wait_for_confirms_or_die(chan, timeout)
end
end
with_chan(broker, fn chan ->
with :ok <- select.(chan),
:ok <- Basic.publish(chan, exchange, message.destination, message.body, props),
true <- wait_for_confirms.(chan) do
{:ok, message}
end
end)
end
@spec with_conn(broker, (conn -> term)) :: {:error, term} | {:ok, term} | term
def with_conn(broker, fun) when is_function(fun, 1) do
with {:ok, conn} <- get_conn(broker, @pool_size) do
fun.(conn)
end
end
@spec with_chan(broker, (chan -> term)) :: {:error, term} | {:ok, term} | term
def with_chan(broker, fun) when is_function(fun, 1) do
with {:ok, chan} <- get_chan(broker, @pool_size) do
fun.(chan)
end
end
@doc false
defp get_conn(broker, retries) do
pool = ConduitAMQP.ConnPool.name(broker)
Util.retry([attempts: retries], fn ->
:poolboy.transaction(pool, &GenServer.call(&1, :conn))
end)
end
@doc false
defp get_chan(broker, retries) do
pool = ConduitAMQP.PubPool.name(broker)
Util.retry([attempts: retries], fn ->
:poolboy.transaction(pool, &GenServer.call(&1, :chan))
end)
end
end
|
lib/conduit_amqp.ex
| 0.699665 | 0.55941 |
conduit_amqp.ex
|
starcoder
|
defmodule Ash.Type.CiString do
@constraints [
max_length: [
type: :non_neg_integer,
doc: "Enforces a maximum length on the value"
],
min_length: [
type: :non_neg_integer,
doc: "Enforces a minimum length on the value"
],
match: [
type: {:custom, __MODULE__, :match, []},
doc: "Enforces that the string matches a passed in regex"
],
trim?: [
type: :boolean,
doc: "Trims the value.",
default: true
],
allow_empty?: [
type: :boolean,
doc: "Sets the value to `nil` if it's empty.",
default: false
]
]
@moduledoc """
Stores a case insensitive string in the database
See `Ash.CiString` for more information.
A builtin type that can be referenced via `:ci_string`
### Constraints
#{Ash.OptionsHelpers.docs(@constraints)}
"""
use Ash.Type
@impl true
def storage_type, do: :string
@impl true
def constraints, do: @constraints
def apply_constraints(%Ash.CiString{} = value, constraints) do
constraints =
if constraints[:min_length] do
Keyword.put(constraints, :allow_empty?, true)
else
constraints
end
value
|> Ash.CiString.value()
|> apply_constraints(constraints)
|> case do
{:ok, nil} ->
{:ok, nil}
{:ok, value} ->
{:ok, %Ash.CiString{string: value, lowered?: true}}
other ->
other
end
end
def apply_constraints(value, constraints) do
{value, errors} =
return_value(constraints[:allow_empty?], constraints[:trim?], value, constraints)
case errors do
[] -> {:ok, value}
errors -> {:error, errors}
end
end
defp return_value(false, true, value, constraints) do
trimmed = String.trim(value)
if trimmed == "" do
{nil, []}
else
{trimmed, validate(trimmed, constraints)}
end
end
defp return_value(false, false, value, constraints) do
if String.trim(value) == "" do
{nil, []}
else
{value, validate(value, constraints)}
end
end
defp return_value(true, true, value, constraints) do
trimmed = String.trim(value)
{trimmed, validate(trimmed, constraints)}
end
defp return_value(true, false, value, constraints),
do: {value, validate(value, constraints)}
defp validate(value, constraints) do
Enum.reduce(constraints, [], fn
{:max_length, max_length}, errors ->
if String.length(value) > max_length do
[[message: "length must be less than or equal to %{max}", max: max_length] | errors]
else
errors
end
{:min_length, min_length}, errors ->
if String.length(value) < min_length do
[
[message: "length must be greater than or equal to %{min}", min: min_length]
| errors
]
else
errors
end
{:match, regex}, errors ->
if String.match?(value, regex) do
errors
else
[[message: "must match the pattern %{regex}", regex: inspect(regex)] | errors]
end
_, errors ->
errors
end)
end
@impl true
def cast_input(%Ash.CiString{} = value), do: {:ok, value}
def cast_input(value) do
case Ecto.Type.cast(:string, value) do
{:ok, value} -> {:ok, Ash.CiString.new(value)}
:error -> :error
end
end
@impl true
def cast_stored(value) do
case Ecto.Type.load(:string, value) do
{:ok, value} -> {:ok, Ash.CiString.new(value)}
:error -> :error
end
end
@impl true
def dump_to_native(%Ash.CiString{} = ci_string) do
case Ecto.Type.dump(:string, Ash.CiString.value(ci_string)) do
{:ok, value} -> {:ok, value}
:error -> :error
end
end
def dump_to_native(value) do
case Ecto.Type.dump(:string, value) do
{:ok, value} -> {:ok, String.downcase(value)}
:error -> :error
end
end
def match(%Regex{} = regex), do: {:ok, regex}
def match(_) do
{:error, "Must provide a regex to match, e.g ~r/foobar/"}
end
end
|
lib/ash/type/ci_string.ex
| 0.836354 | 0.493958 |
ci_string.ex
|
starcoder
|
defmodule Ecto.Adapters.Jamdb.Oracle do
@moduledoc """
Adapter module for Oracle. `Ecto.Adapters.SQL` callbacks implementation.
It uses `jamdb_oracle` for communicating to the database.
## Features
* Using prepared statement functionality, the SQL statement you want
to run is precompiled and stored in a database object, and you can run it
as many times as required without compiling it every time it is run. If the data in the
statement changes, you can use bind variables as placeholders for the data and then
provide literal values at run time.
* Using bind variables:
`{"select 1+:1, sysdate, rowid from dual where 1=:1"`, `[1]}`
* Calling stored procedure:
`{"begin proc(:1, :2, :3); end;"`, `[1.0, 2.0, 3.0]}`
* Calling stored function:
`{"begin :1 := func(:2); end;"`, `[{:out, :varchar}, "one hundred"]}`
* Using cursor variable:
`{"begin open :1 for select * from tabl where dat>:2; end;"`, `[:cursor, {2016, 8, 1}]}`
* Using returning clause:
`{"insert into tabl values (tablid.nextval, sysdate) return id into :1"`, `[{:out, :number}]}`
`YourApp.Repo.insert_all(Post,[[id: 100]], [returning: [:created_at], out: [:date]])`
* Update batching:
`{:batch, "insert into tabl values (:1, :2, :3)"`, `[[1, 2, 3],[4, 5, 6],[7, 8, 9]]}`
* Row prefetching:
`{:fetch, "select * from tabl where id>:1"`, `[1]}`
`{:fetch, cursor, row_format, last_row}`
## Options
Adapter options split in different categories described
below. All options can be given via the repository
configuration:
config :your_app, YourApp.Repo,
...
### Connection options
* `:hostname` - Server hostname (Name or IP address of the database server)
* `:port` - Server port (Number of the port where the server listens for requests)
* `:database` - Database (Database service name or SID with colon as prefix)
* `:username` - Username (Name for the connecting user)
* `:password` - User password (Password for the connecting user)
* `:parameters` - Keyword list of connection parameters
* `:socket_options` - Options to be given to the underlying socket
* `:timeout` - The default timeout to use on queries, defaults to `15000`
### Pool options
* `:pool` - The connection pool module, defaults to `DBConnection.ConnectionPool`
* `:pool_size` - The size of the pool, defaults to `1`
* `:idle_interval` - The ping interval to validate an idle connection, defaults to `1000`
### Connection parameters
* `:charset` - Client character set, defaults to UTF8
* `:autocommit` - Mode that issued an automatic COMMIT operation
* `:fetch` - Number of rows to fetch from the server
* `:sdu` - Size of session data unit
* `:read_timeout` - Read timeout while reading from the socket, defaults to `500`
* `:role` - Mode that is used in an internal logon
* `:prelim` - Mode that is permitted when the database is down
### Output parameters
Using syntax for keyword lists: `[{:out, :cursor}]`, `[out: :cursor]`
Oracle types | Literal syntax in params
:------------------------------- | :-----------------------
`NUMBER`,`FLOAT`,`BINARY_FLOAT` | `:number`, `:integer`, `:float`, `:decimal`
`CHAR`, `VARCHAR2` | `:varchar`, `:char`, `:string`
`NCHAR`, `NVARCHAR2` | `:nvarchar`, `:nchar`, `:binary`
`DATE` | `:date`
`TIMESTAMP` | `:timestamp`
`TIMESTAMP WITH TIME ZONE` | `:timestamptz`
`SYS_REFCURSOR` | `:cursor`
### Primitive types
The primitive types are:
Ecto types | Oracle types | Literal syntax in params
:---------------------- | :------------------------------- | :-----------------------
`:id`, `:integer` | `NUMBER (*,0)`, `INTEGER` | 1, 2, 3
`:float` | `NUMBER`,`FLOAT`,`BINARY_FLOAT` | 1.0, 2.0, 3.0
`:decimal` | `NUMBER`,`FLOAT`,`BINARY_FLOAT` | [`Decimal`](https://hexdocs.pm/decimal)
`:string`, `:binary` | `CHAR`, `VARCHAR2`, `CLOB` | "one hundred"
`:string`, `:binary` | `NCHAR`, `NVARCHAR2`, `NCLOB` | "ηΎε
", "δΈε"
`{:array, :integer}` | `RAW`, `BLOB` | 'E799BE'
`:boolean` | `CHAR`, `VARCHAR2`, `NUMBER` | true, false
`:map` | `CLOB`, `NCLOB` | %{"one" => 1, "hundred" => "ηΎ"}
`:naive_datetime` | `DATE`, `TIMESTAMP` | [`NaiveDateTime`](https://hexdocs.pm/elixir)
`:utc_datetime` | `TIMESTAMP WITH TIME ZONE` | [`DateTime`](https://hexdocs.pm/elixir)
### Character sets
`:us7ascii`, `:we8iso8859p1`, `:ee8iso8859p2`, `:nee8iso8859p4`, `:cl8iso8859p5`, `:ar8iso8859p6`,`:el8iso8859p7`,
`:iw8iso8859p8`, `:we8iso8859p9`, `:ne8iso8859p10`, `:th8tisascii`, `:vn8mswin1258`, `:we8iso8859p15`,
`:blt8iso8859p13`, `:ee8mswin1250`, `:cl8mswin1251`, `:el8mswin1253`, `:iw8mswin1255`, `:tr8mswin1254`,
`:we8mswin1252`, `:blt8mswin1257`, `:ar8mswin1256`, `:ja16euc`, `:ja16sjis`, `:ja16euctilde`,`:ja16sjistilde`,
`:ko16mswin949`, `:zhs16gbk`, `:zht32euc`, `:zht16big5`, `:zht16mswin950`, `:zht16hkscs`
#### Examples
iex> Ecto.Adapters.SQL.query(YourApp.Repo, "select 1+:1, sysdate, rowid from dual where 1=:1 ", [1])
{:ok, %{num_rows: 1, rows: [[2, ~N[2016-08-01 13:14:15], "AAAACOAABAAAAWJAAA"]]}}
Imagine you have this migration:
defmodule YourApp.Migration do
use Ecto.Migration
def up do
create table(:users, comment: "users table") do
add :name, :string, comment: "name column"
add :namae, :string, national: true
add :custom_id, :uuid
timestamps()
end
end
end
You can execute it manually with:
Ecto.Migrator.up(YourApp.Repo, 20160801131415, YourApp.Migration)
"""
use Ecto.Adapters.SQL, driver: Jamdb.Oracle, migration_lock: nil
@behaviour Ecto.Adapter.Storage
@behaviour Ecto.Adapter.Structure
@impl true
def loaders({:array, _}, type), do: [&array_decode/1, type]
def loaders({:embed, _}, type), do: [&json_decode/1, &Ecto.Adapters.SQL.load_embed(type, &1)]
def loaders({:map, _}, type), do: [&json_decode/1, &Ecto.Adapters.SQL.load_embed(type, &1)]
def loaders(:map, type), do: [&json_decode/1, type]
def loaders(:float, type), do: [&float_decode/1, type]
def loaders(:boolean, type), do: [&bool_decode/1, type]
def loaders(:binary_id, type), do: [Ecto.UUID, type]
def loaders(_, type), do: [type]
defp bool_decode("0"), do: {:ok, false}
defp bool_decode("1"), do: {:ok, true}
defp bool_decode(0), do: {:ok, false}
defp bool_decode(1), do: {:ok, true}
defp bool_decode(x), do: {:ok, x}
defp float_decode(%Decimal{} = decimal), do: {:ok, Decimal.to_float(decimal)}
defp float_decode(x), do: {:ok, x}
defp json_decode(x) when is_binary(x), do: {:ok, Jamdb.Oracle.json_library().decode!(x)}
defp json_decode(x), do: {:ok, x}
defp array_decode(x) when is_binary(x), do: {:ok, :binary.bin_to_list(x)}
defp array_decode(x), do: {:ok, x}
@impl true
def storage_up(_opts), do: err()
@impl true
def storage_down(_opts), do: err()
@impl true
def storage_status(_opts), do: err()
@impl true
def structure_dump(_default, _config), do: err()
@impl true
def structure_load(_default, _config), do: err()
@impl true
def supports_ddl_transaction? do
false
end
defp err, do: {:error, false}
end
defmodule Ecto.Adapters.Jamdb.Oracle.Connection do
@moduledoc false
@behaviour Ecto.Adapters.SQL.Connection
@impl true
def child_spec(opts) do
DBConnection.child_spec(Jamdb.Oracle, opts)
end
@impl true
def execute(conn, query, params, opts) do
DBConnection.execute(conn, query!(query, ""), params, opts)
end
@impl true
def prepare_execute(conn, name, query, params, opts) do
DBConnection.prepare_execute(conn, query!(query, name), params, opts)
end
@impl true
def stream(conn, query, params, opts) do
DBConnection.stream(conn, query!(query, ""), params, opts)
end
@impl true
def query(conn, query, params, opts) do
case DBConnection.prepare_execute(conn, query!(query, ""), params, opts) do
{:ok, _, result} -> {:ok, result}
{:error, err} -> {:error, err}
end
end
defp query!(sql, name) when is_binary(sql) or is_list(sql) do
%Jamdb.Oracle.Query{statement: IO.iodata_to_binary(sql), name: name}
end
defp query!(%{} = query, _name) do
query
end
defdelegate all(query), to: Jamdb.Oracle.Query
defdelegate update_all(query), to: Jamdb.Oracle.Query
defdelegate delete_all(query), to: Jamdb.Oracle.Query
defdelegate insert(prefix, table, header, rows, on_conflict, returning), to: Jamdb.Oracle.Query
defdelegate update(prefix, table, fields, filters, returning), to: Jamdb.Oracle.Query
defdelegate delete(prefix, table, filters, returning), to: Jamdb.Oracle.Query
defdelegate table_exists_query(table), to: Jamdb.Oracle.Query
defdelegate execute_ddl(command), to: Jamdb.Oracle.Query
@impl true
def to_constraints(_err), do: []
@impl true
def ddl_logs(_result), do: []
end
|
lib/jamdb_oracle_ecto.ex
| 0.749637 | 0.651854 |
jamdb_oracle_ecto.ex
|
starcoder
|
defmodule Phoenix.HTML do
@moduledoc """
Helpers for working with HTML strings and templates.
When used, it imports the given modules:
* `Phoenix.HTML` - functions to handle HTML safety;
* `Phoenix.HTML.Tag` - functions for generating HTML tags;
* `Phoenix.HTML.Form` - functions for working with forms;
* `Phoenix.HTML.Link` - functions for generating links and urls;
* `Phoenix.HTML.Format` - functions for formatting text;
## HTML Safe
One of the main responsibilities of this module is to
provide convenience functions for escaping and marking
HTML code as safe.
By default, data output in templates is not considered
safe:
<%= "<hello>" %>
will be shown as:
<hello>
User data or data coming from the database is almost never
considered safe. However, in some cases, you may want to tag
it as safe and show its "raw" contents:
<%= raw "<hello>" %>
Keep in mind most helpers will automatically escape your data
and return safe content:
<%= content_tag :p, "<hello>" %>
will properly output:
<p><hello></p>
"""
@doc false
defmacro __using__(_) do
quote do
import Phoenix.HTML
import Phoenix.HTML.Form
import Phoenix.HTML.Link
import Phoenix.HTML.Tag, except: [attributes_escape: 1]
import Phoenix.HTML.Format
end
end
@typedoc "Guaranteed to be safe"
@type safe :: {:safe, iodata}
@typedoc "May be safe or unsafe (i.e. it needs to be converted)"
@type unsafe :: Phoenix.HTML.Safe.t()
@doc false
@deprecated "use the ~H sigil instead"
defmacro sigil_e(expr, opts) do
handle_sigil(expr, opts, __CALLER__)
end
@doc false
@deprecated "use the ~H sigil instead"
defmacro sigil_E(expr, opts) do
handle_sigil(expr, opts, __CALLER__)
end
defp handle_sigil({:<<>>, meta, [expr]}, [], caller) do
options = [
engine: Phoenix.HTML.Engine,
file: caller.file,
line: caller.line + 1,
indentation: meta[:indentation] || 0
]
EEx.compile_string(expr, options)
end
defp handle_sigil(_, _, _) do
raise ArgumentError,
"interpolation not allowed in ~e sigil. " <>
"Remove the interpolation, use <%= %> to insert values, " <>
"or use ~E to show the interpolation literally"
end
@doc """
Marks the given content as raw.
This means any HTML code inside the given
string won't be escaped.
iex> raw("<hello>")
{:safe, "<hello>"}
iex> raw({:safe, "<hello>"})
{:safe, "<hello>"}
iex> raw(nil)
{:safe, ""}
"""
@spec raw(iodata | safe | nil) :: safe
def raw({:safe, value}), do: {:safe, value}
def raw(nil), do: {:safe, ""}
def raw(value) when is_binary(value) or is_list(value), do: {:safe, value}
@doc """
Escapes the HTML entities in the given term, returning safe iodata.
iex> html_escape("<hello>")
{:safe, [[[] | "<"], "hello" | ">"]}
iex> html_escape('<hello>')
{:safe, ["<", 104, 101, 108, 108, 111, ">"]}
iex> html_escape(1)
{:safe, "1"}
iex> html_escape({:safe, "<hello>"})
{:safe, "<hello>"}
"""
@spec html_escape(unsafe) :: safe
def html_escape({:safe, _} = safe), do: safe
def html_escape(other), do: {:safe, Phoenix.HTML.Engine.encode_to_iodata!(other)}
@doc """
Converts a safe result into a string.
Fails if the result is not safe. In such cases, you can
invoke `html_escape/1` or `raw/1` accordingly before.
You can combine `html_escape/1` and `safe_to_string/1`
to convert a data structure to a escaped string:
data |> html_escape() |> safe_to_string()
"""
@spec safe_to_string(safe) :: String.t()
def safe_to_string({:safe, iodata}) do
IO.iodata_to_binary(iodata)
end
@doc ~S"""
Escapes an enumerable of attributes, returning iodata.
The attributes are rendered in the given order. Note if
a map is given, the key ordering is not guaranteed.
The keys and values can be of any shape, as long as they
implement the `Phoenix.HTML.Safe` protocol. In addition,
if the key is an atom, it will be "dasherized". In other
words, `:phx_value_id` will be converted to `phx-value-id`.
Furthemore, the following attributes provide behaviour:
* `:data` and `:aria` - they accept a keyword list as value.
`data: [confirm: "are you sure?"]` is converted to
`data-confirm="are you sure?"`.
* `:class` - it accepts a list of classes as argument. Each
element in the list is separated by space. Nil or false
elements are discarded. `class: ["foo", nil, "bar"]`
then becomes `class="foo bar"`.
* `:id` - it is validated raise if a number is given as ID,
which is not allowed by the HTML spec and leads to unpredictable
behaviour.
## Examples
iex> safe_to_string attributes_escape(title: "the title", id: "the id", selected: true)
" title=\"the title\" id=\"the id\" selected"
iex> safe_to_string attributes_escape(%{data: [confirm: "Are you sure?"], class: "foo"})
" class=\"foo\" data-confirm=\"Are you sure?\""
iex> safe_to_string attributes_escape(%{phx: [value: [foo: "bar"]], class: "foo"})
" class=\"foo\" phx-value-foo=\"bar\""
"""
def attributes_escape(attrs) when is_list(attrs) do
{:safe, build_attrs(attrs)}
end
def attributes_escape(attrs) do
{:safe, attrs |> Enum.to_list() |> build_attrs()}
end
defp build_attrs([{k, true} | t]),
do: [?\s, key_escape(k) | build_attrs(t)]
defp build_attrs([{_, false} | t]),
do: build_attrs(t)
defp build_attrs([{_, nil} | t]),
do: build_attrs(t)
defp build_attrs([{:id, v} | t]),
do: [" id=\"", id_value(v), ?" | build_attrs(t)]
defp build_attrs([{:class, v} | t]),
do: [" class=\"", class_value(v), ?" | build_attrs(t)]
defp build_attrs([{:aria, v} | t]) when is_list(v),
do: nested_attrs(v, " aria", t)
defp build_attrs([{:data, v} | t]) when is_list(v),
do: nested_attrs(v, " data", t)
defp build_attrs([{:phx, v} | t]) when is_list(v),
do: nested_attrs(v, " phx", t)
defp build_attrs([{"id", v} | t]),
do: [" id=\"", id_value(v), ?" | build_attrs(t)]
defp build_attrs([{"class", v} | t]),
do: [" class=\"", class_value(v), ?" | build_attrs(t)]
defp build_attrs([{"aria", v} | t]) when is_list(v),
do: nested_attrs(v, " aria", t)
defp build_attrs([{"data", v} | t]) when is_list(v),
do: nested_attrs(v, " data", t)
defp build_attrs([{"phx", v} | t]) when is_list(v),
do: nested_attrs(v, " phx", t)
defp build_attrs([{k, v} | t]),
do: [?\s, key_escape(k), ?=, ?", attr_escape(v), ?" | build_attrs(t)]
defp build_attrs([]), do: []
defp nested_attrs([{k, v} | kv], attr, t) when is_list(v),
do: [nested_attrs(v, "#{attr}-#{key_escape(k)}", []) | nested_attrs(kv, attr, t)]
defp nested_attrs([{k, v} | kv], attr, t),
do: [attr, ?-, key_escape(k), ?=, ?", attr_escape(v), ?" | nested_attrs(kv, attr, t)]
defp nested_attrs([], _attr, t),
do: build_attrs(t)
defp id_value(value) when is_number(value) do
raise ArgumentError,
"attempting to set id attribute to #{value}, but the DOM ID cannot be set to a number"
end
defp id_value(value) do
attr_escape(value)
end
defp class_value(value) when is_list(value) do
value
|> Enum.filter(& &1)
|> Enum.join(" ")
|> attr_escape()
end
defp class_value(value) do
attr_escape(value)
end
defp key_escape(value) when is_atom(value), do: String.replace(Atom.to_string(value), "_", "-")
defp key_escape(value), do: attr_escape(value)
defp attr_escape({:safe, data}), do: data
defp attr_escape(nil), do: []
defp attr_escape(other) when is_binary(other), do: Phoenix.HTML.Engine.encode_to_iodata!(other)
defp attr_escape(other), do: Phoenix.HTML.Safe.to_iodata(other)
@doc """
Escapes HTML content to be inserted a JavaScript string.
This function is useful in JavaScript responses when there is a need
to escape HTML rendered from other templates, like in the following:
$("#container").append("<%= javascript_escape(render("post.html", post: @post)) %>");
It escapes quotes (double and single), double backslashes and others.
"""
@spec javascript_escape(binary) :: binary
@spec javascript_escape(safe) :: safe
def javascript_escape({:safe, data}),
do: {:safe, data |> IO.iodata_to_binary() |> javascript_escape("")}
def javascript_escape(data) when is_binary(data),
do: javascript_escape(data, "")
defp javascript_escape(<<0x2028::utf8, t::binary>>, acc),
do: javascript_escape(t, <<acc::binary, "\\u2028">>)
defp javascript_escape(<<0x2029::utf8, t::binary>>, acc),
do: javascript_escape(t, <<acc::binary, "\\u2029">>)
defp javascript_escape(<<0::utf8, t::binary>>, acc),
do: javascript_escape(t, <<acc::binary, "\\u0000">>)
defp javascript_escape(<<"</", t::binary>>, acc),
do: javascript_escape(t, <<acc::binary, ?<, ?\\, ?/>>)
defp javascript_escape(<<"\r\n", t::binary>>, acc),
do: javascript_escape(t, <<acc::binary, ?\\, ?n>>)
defp javascript_escape(<<h, t::binary>>, acc) when h in [?", ?', ?\\, ?`],
do: javascript_escape(t, <<acc::binary, ?\\, h>>)
defp javascript_escape(<<h, t::binary>>, acc) when h in [?\r, ?\n],
do: javascript_escape(t, <<acc::binary, ?\\, ?n>>)
defp javascript_escape(<<h, t::binary>>, acc),
do: javascript_escape(t, <<acc::binary, h>>)
defp javascript_escape(<<>>, acc), do: acc
end
|
lib/phoenix_html.ex
| 0.85315 | 0.539711 |
phoenix_html.ex
|
starcoder
|
defmodule ThousandIsland do
@moduledoc """
Thousand Island is a modern, pure Elixir socket server, inspired heavily by
[ranch](https://github.com/ninenines/ranch). It aims to be easy to understand
& reason about, while also being at least as stable and performant as alternatives.
Thousand Island is implemented as a supervision tree which is intended to be hosted
inside a host application, often as a dependency embedded within a higher-level
protocol library such as [Bandit](https://github.com/mtrudel/bandit). Aside from
supervising the Thousand Island process tree, applications interact with Thousand
Island primarily via the `ThousandIsland.Handler` behaviour.
## Handlers
The `ThousandIsland.Handler` behaviour defines the interface that Thousand Island
uses to pass `ThousandIsland.Socket`s up to the application level; together they
form the primary interface that most applications will have with Thousand Island.
Thousand Island comes with a few simple protocol handlers to serve as examples;
these can be found in the [examples](https://github.com/mtrudel/thousand_island/tree/main/examples)
folder of this project. A simple implementation would look like this:
```elixir
defmodule Echo do
use ThousandIsland.Handler
@impl ThousandIsland.Handler
def handle_data(data, socket, state) do
ThousandIsland.Socket.send(socket, data)
{:continue, state}
end
end
{:ok, pid} = ThousandIsland.start_link(port: 1234, handler_module: Echo)
```
For more information, please consult the `ThousandIsland.Handler` documentation.
## Starting a Thousand Island Server
A typical use of `ThousandIsland` might look like the following:
```elixir
defmodule MyApp.Supervisor do
# ... other Supervisor boilerplate
def init(config) do
children = [
# ... other children as dictated by your app
{ThousandIsland, port: 1234, handler_module: MyApp.ConnectionHandler}
]
Supervisor.init(children, strategy: :one_for_one)
end
end
```
You can also start servers directly via the `start_link/1` function:
```elixir
{:ok, pid} = ThousandIsland.start_link(port: 1234, handler_module: MyApp.ConnectionHandler)
```
## Configuration
A number of options are defined when starting a server. The complete list is
defined by the `t:ThousandIsland.options/0` type.
## Connection Draining & Shutdown
`ThousandIsland` instances are just a process tree consisting of standard
Supervisor, GenServer and Task modules, and so the usual rules regarding
shutdown and shutdown timeouts apply. Immediately upon beginning the shutdown
sequence the ThousandIsland.ShutdownListener process will cause the listening socket
to shut down, which in turn will cause all of the `Acceptor` processes to shut
down as well. At this point all that is left in the supervision tree are several
layers of Supervisors and whatever `Handler` processes were in progress when
shutdown was initiated. At this point, standard Supervisor shutdown timeout
semantics give existing connections a chance to finish things up. `Handler`
processes trap exit, so they continue running beyond shutdown until they either
complete or are `:brutal_kill`ed after their shutdown timeout expires.
## Logging & Telemetry
As a low-level library, Thousand Island purposely does not do any inline
logging of any kind. The `ThousandIsland.Logging` module defines a number of
functions to aid in tracing connections at various log levels, and such logging
can be dynamically enabled and disabled against an already running server. This
logging is backed by `:telemetry` events internally, and if desired these events
can also be hooked by your application for logging or metric purposes. The following is a complete list of events emitted by Thousand Island:
* `[:listener, :start]`: Emitted when the server successfully listens on the configured port.
* `[:listener, :error]`: Emitted when the server encounters an error listening on the configured port.
* `[:listener, :shutdown]`: Emitted when the server shuts down.
* `[:acceptor, :start]`: Emitted when an acceptor process starts up.
* `[:acceptor, :accept]`: Emitted when an acceptor process accepts a new client connection.
* `[:acceptor, :shutdown]`: Emitted when an acceptor process shuts down.
* `[:handler, :start]`: Emitted whenever a `ThousandIsland.Handler` process is made ready
* `[:handler, :async_recv]`: Emitted whenever a `ThousandIsland.Handler` process receives data asynchronously
* `[:handler, :shutdown]`: Emitted whenever a `ThousandIsland.Handler` process terminates
* `[:handler, :error]`: Emitted whenever a `ThousandIsland.Handler` process shuts down due to error
* `[:socket, :handshake]`: Emitted whenever a `ThousandIsland.Socket.handshake/1` call completes.
* `[:socket, :handshake_error]`: Emitted whenever a `ThousandIsland.Socket.handshake/1` call errors.
* `[:socket, :recv]`: Emitted whenever a `ThousandIsland.Socket.recv/3` call completes.
* `[:socket, :send]`: Emitted whenever a `ThousandIsland.Socket.send/2` call completes.
* `[:socket, :sendfile]`: Emitted whenever a `ThousandIsland.Socket.sendfile/4` call completes.
* `[:socket, :shutdown]`: Emitted whenever a `ThousandIsland.Socket.shutdown/2` call completes.
* `[:socket, :close]`: Emitted whenever a `ThousandIsland.Socket.close/1` call completes.
Where meaurements indicate a time duration they are are expressed in `System`
`:native` units for performance reasons. They can be conveted to any desired
time unit via `System.convert_time_unit/3`.
"""
@typedoc """
Possible options to configure a server. Valid option values are as follows:
* `handler_module`: The name of the module used to handle connections to this server.
The module is expected to implement the `ThousandIsland.Handler` behaviour. Required.
* `handler_options`: A term which is passed as the initial state value to
`c:ThousandIsland.Handler.handle_connection/2` calls. Optional, defaulting to nil.
* `genserver_options`: A term which is passed as the value to the handler module's
underlying `GenServer.start_link/3` call. Optional, defaulting to [].
* `port`: The TCP port number to listen on. If not specified this defaults to 4000.
If a port number of `0` is given, the server will dynamically assign a port number
which can then be obtained via `local_info/1`.
* `transport_module`: The name of the module which provides basic socket functions.
Thousand Island provides `ThousandIsland.Transports.TCP` and `ThousandIsland.Transports.SSL`,
which provide clear and TLS encrypted TCP sockets respectively. If not specified this
defaults to `ThousandIsland.Transports.TCP`.
* `transport_options`: A keyword list of options to be passed to the transport module's
`c:ThousandIsland.Transport.listen/2` function. Valid values depend on the transport
module specified in `transport_module` and can be found in the documentation for the
`ThousandIsland.Transports.TCP` and `ThousandIsland.Transports.SSL` modules. Any options
in terms of interfaces to listen to / certificates and keys to use for SSL connections
will be passed in via this option.
* `num_acceptors`: The numbner of acceptor processes to run. Defaults to 10.
"""
@type options :: [
handler_module: module(),
handler_options: term(),
genserver_options: GenServer.options(),
port: :inet.port_number(),
transport_module: module(),
transport_options: transport_options(),
num_acceptors: pos_integer(),
read_timeout: timeout()
]
@type transport_options() ::
ThousandIsland.Transports.TCP.options() | ThousandIsland.Transports.SSL.options()
alias ThousandIsland.{Listener, Server, ServerConfig, Transport}
@doc false
@spec child_spec(options()) :: Supervisor.child_spec()
def child_spec(opts) do
%{
id: __MODULE__,
start: {__MODULE__, :start_link, [opts]},
type: :supervisor,
restart: :permanent,
shutdown: 5000
}
end
@doc """
Starts a `ThousandIsland` instance with the given options. Returns a pid
that can be used to further manipulate the server via other functions defined on
this module in the case of success, or an error tuple describing the reason the
server was unable to start in the case of failure.
"""
@spec start_link(options()) :: Supervisor.on_start()
def start_link(opts \\ []) do
opts
|> ServerConfig.new()
|> Server.start_link()
end
@doc """
Returns information about the address and port that the server is listening on
"""
@spec listener_info(pid()) :: {:ok, Transport.socket_info()}
def listener_info(pid) do
pid |> Server.listener_pid() |> Listener.listener_info()
end
@doc """
Synchronously stops the given server, waiting up to the given number of milliseconds
for existing connections to finish up. Immediately upon calling this function,
the server stops listening for new connections, and then proceeds to wait until
either all existing connections have completed or the specified timeout has
elapsed.
"""
@spec stop(pid(), timeout()) :: :ok
def stop(pid, connection_wait \\ 15_000) do
Supervisor.stop(pid, :normal, connection_wait)
end
end
|
lib/thousand_island.ex
| 0.917428 | 0.871803 |
thousand_island.ex
|
starcoder
|
defmodule AWS.CostExplorer do
@moduledoc """
The Cost Explorer API enables you to programmatically query your cost and usage
data.
You can query for aggregated data such as total monthly costs or total daily
usage. You can also query for granular data, such as the number of daily write
operations for Amazon DynamoDB database tables in your production environment.
Service Endpoint
The Cost Explorer API provides the following endpoint:
* `https://ce.us-east-1.amazonaws.com`
For information about costs associated with the Cost Explorer API, see [AWS Cost Management Pricing](http://aws.amazon.com/aws-cost-management/pricing/).
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: "AWS Cost Explorer",
api_version: "2017-10-25",
content_type: "application/x-amz-json-1.1",
credential_scope: "us-east-1",
endpoint_prefix: "ce",
global?: true,
protocol: "json",
service_id: "Cost Explorer",
signature_version: "v4",
signing_name: "ce",
target_prefix: "AWSInsightsIndexService"
}
end
@doc """
Creates a new cost anomaly detection monitor with the requested type and monitor
specification.
"""
def create_anomaly_monitor(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateAnomalyMonitor", input, options)
end
@doc """
Adds a subscription to a cost anomaly detection monitor.
You can use each subscription to define subscribers with email or SNS
notifications. Email subscribers can set a dollar threshold and a time frequency
for receiving notifications.
"""
def create_anomaly_subscription(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateAnomalySubscription", input, options)
end
@doc """
Creates a new Cost Category with the requested name and rules.
"""
def create_cost_category_definition(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateCostCategoryDefinition", input, options)
end
@doc """
Deletes a cost anomaly monitor.
"""
def delete_anomaly_monitor(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteAnomalyMonitor", input, options)
end
@doc """
Deletes a cost anomaly subscription.
"""
def delete_anomaly_subscription(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteAnomalySubscription", input, options)
end
@doc """
Deletes a Cost Category.
Expenses from this month going forward will no longer be categorized with this
Cost Category.
"""
def delete_cost_category_definition(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteCostCategoryDefinition", input, options)
end
@doc """
Returns the name, ARN, rules, definition, and effective dates of a Cost Category
that's defined in the account.
You have the option to use `EffectiveOn` to return a Cost Category that is
active on a specific date. If there is no `EffectiveOn` specified, youβll see a
Cost Category that is effective on the current date. If Cost Category is still
effective, `EffectiveEnd` is omitted in the response.
"""
def describe_cost_category_definition(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeCostCategoryDefinition", input, options)
end
@doc """
Retrieves all of the cost anomalies detected on your account, during the time
period specified by the `DateInterval` object.
"""
def get_anomalies(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetAnomalies", input, options)
end
@doc """
Retrieves the cost anomaly monitor definitions for your account.
You can filter using a list of cost anomaly monitor Amazon Resource Names
(ARNs).
"""
def get_anomaly_monitors(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetAnomalyMonitors", input, options)
end
@doc """
Retrieves the cost anomaly subscription objects for your account.
You can filter using a list of cost anomaly monitor Amazon Resource Names
(ARNs).
"""
def get_anomaly_subscriptions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetAnomalySubscriptions", input, options)
end
@doc """
Retrieves cost and usage metrics for your account.
You can specify which cost and usage-related metric, such as `BlendedCosts` or
`UsageQuantity`, that you want the request to return. You can also filter and
group your data by various dimensions, such as `SERVICE` or `AZ`, in a specific
time range. For a complete list of valid dimensions, see the
[GetDimensionValues](https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_GetDimensionValues.html) operation. Management account in an organization in AWS Organizations have
access to all member accounts.
For information about filter limitations, see [Quotas and
restrictions](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/billing-limits.html)
in the *Billing and Cost Management User Guide*.
"""
def get_cost_and_usage(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetCostAndUsage", input, options)
end
@doc """
Retrieves cost and usage metrics with resources for your account.
You can specify which cost and usage-related metric, such as `BlendedCosts` or
`UsageQuantity`, that you want the request to return. You can also filter and
group your data by various dimensions, such as `SERVICE` or `AZ`, in a specific
time range. For a complete list of valid dimensions, see the
[GetDimensionValues](https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_GetDimensionValues.html) operation. Management account in an organization in AWS Organizations have
access to all member accounts. This API is currently available for the Amazon
Elastic Compute Cloud β Compute service only.
This is an opt-in only feature. You can enable this feature from the Cost
Explorer Settings page. For information on how to access the Settings page, see
[Controlling Access for Cost
Explorer](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/ce-access.html)
in the *AWS Billing and Cost Management User Guide*.
"""
def get_cost_and_usage_with_resources(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetCostAndUsageWithResources", input, options)
end
@doc """
Retrieves an array of Cost Category names and values incurred cost.
If some Cost Category names and values are not associated with any cost, they
will not be returned by this API.
"""
def get_cost_categories(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetCostCategories", input, options)
end
@doc """
Retrieves a forecast for how much Amazon Web Services predicts that you will
spend over the forecast time period that you select, based on your past costs.
"""
def get_cost_forecast(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetCostForecast", input, options)
end
@doc """
Retrieves all available filter values for a specified filter over a period of
time.
You can search the dimension values for an arbitrary string.
"""
def get_dimension_values(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetDimensionValues", input, options)
end
@doc """
Retrieves the reservation coverage for your account.
This enables you to see how much of your Amazon Elastic Compute Cloud, Amazon
ElastiCache, Amazon Relational Database Service, or Amazon Redshift usage is
covered by a reservation. An organization's management account can see the
coverage of the associated member accounts. This supports dimensions, Cost
Categories, and nested expressions. For any time period, you can filter data
about reservation usage by the following dimensions:
* AZ
* CACHE_ENGINE
* DATABASE_ENGINE
* DEPLOYMENT_OPTION
* INSTANCE_TYPE
* LINKED_ACCOUNT
* OPERATING_SYSTEM
* PLATFORM
* REGION
* SERVICE
* TAG
* TENANCY
To determine valid values for a dimension, use the `GetDimensionValues`
operation.
"""
def get_reservation_coverage(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetReservationCoverage", input, options)
end
@doc """
Gets recommendations for which reservations to purchase.
These recommendations could help you reduce your costs. Reservations provide a
discounted hourly rate (up to 75%) compared to On-Demand pricing.
AWS generates your recommendations by identifying your On-Demand usage during a
specific time period and collecting your usage into categories that are eligible
for a reservation. After AWS has these categories, it simulates every
combination of reservations in each category of usage to identify the best
number of each type of RI to purchase to maximize your estimated savings.
For example, AWS automatically aggregates your Amazon EC2 Linux, shared tenancy,
and c4 family usage in the US West (Oregon) Region and recommends that you buy
size-flexible regional reservations to apply to the c4 family usage. AWS
recommends the smallest size instance in an instance family. This makes it
easier to purchase a size-flexible RI. AWS also shows the equal number of
normalized units so that you can purchase any instance size that you want. For
this example, your RI recommendation would be for `c4.large` because that is the
smallest size instance in the c4 instance family.
"""
def get_reservation_purchase_recommendation(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"GetReservationPurchaseRecommendation",
input,
options
)
end
@doc """
Retrieves the reservation utilization for your account.
Management account in an organization have access to member accounts. You can
filter data by dimensions in a time period. You can use `GetDimensionValues` to
determine the possible dimension values. Currently, you can group only by
`SUBSCRIPTION_ID`.
"""
def get_reservation_utilization(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetReservationUtilization", input, options)
end
@doc """
Creates recommendations that help you save cost by identifying idle and
underutilized Amazon EC2 instances.
Recommendations are generated to either downsize or terminate instances, along
with providing savings detail and metrics. For details on calculation and
function, see [Optimizing Your Cost with Rightsizing Recommendations](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/ce-rightsizing.html)
in the *AWS Billing and Cost Management User Guide*.
"""
def get_rightsizing_recommendation(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetRightsizingRecommendation", input, options)
end
@doc """
Retrieves the Savings Plans covered for your account.
This enables you to see how much of your cost is covered by a Savings Plan. An
organizationβs management account can see the coverage of the associated member
accounts. This supports dimensions, Cost Categories, and nested expressions. For
any time period, you can filter data for Savings Plans usage with the following
dimensions:
* `LINKED_ACCOUNT`
* `REGION`
* `SERVICE`
* `INSTANCE_FAMILY`
To determine valid values for a dimension, use the `GetDimensionValues`
operation.
"""
def get_savings_plans_coverage(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetSavingsPlansCoverage", input, options)
end
@doc """
Retrieves your request parameters, Savings Plan Recommendations Summary and
Details.
"""
def get_savings_plans_purchase_recommendation(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"GetSavingsPlansPurchaseRecommendation",
input,
options
)
end
@doc """
Retrieves the Savings Plans utilization for your account across date ranges with
daily or monthly granularity.
Management account in an organization have access to member accounts. You can
use `GetDimensionValues` in `SAVINGS_PLANS` to determine the possible dimension
values.
You cannot group by any dimension values for `GetSavingsPlansUtilization`.
"""
def get_savings_plans_utilization(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetSavingsPlansUtilization", input, options)
end
@doc """
Retrieves attribute data along with aggregate utilization and savings data for a
given time period.
This doesn't support granular or grouped data (daily/monthly) in response. You
can't retrieve data by dates in a single response similar to
`GetSavingsPlanUtilization`, but you have the option to make multiple calls to
`GetSavingsPlanUtilizationDetails` by providing individual dates. You can use
`GetDimensionValues` in `SAVINGS_PLANS` to determine the possible dimension
values.
`GetSavingsPlanUtilizationDetails` internally groups data by `SavingsPlansArn`.
"""
def get_savings_plans_utilization_details(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetSavingsPlansUtilizationDetails", input, options)
end
@doc """
Queries for available tag keys and tag values for a specified period.
You can search the tag values for an arbitrary string.
"""
def get_tags(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetTags", input, options)
end
@doc """
Retrieves a forecast for how much Amazon Web Services predicts that you will use
over the forecast time period that you select, based on your past usage.
"""
def get_usage_forecast(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetUsageForecast", input, options)
end
@doc """
Returns the name, ARN, `NumberOfRules` and effective dates of all Cost
Categories defined in the account.
You have the option to use `EffectiveOn` to return a list of Cost Categories
that were active on a specific date. If there is no `EffectiveOn` specified,
youβll see Cost Categories that are effective on the current date. If Cost
Category is still effective, `EffectiveEnd` is omitted in the response.
`ListCostCategoryDefinitions` supports pagination. The request can have a
`MaxResults` range up to 100.
"""
def list_cost_category_definitions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListCostCategoryDefinitions", input, options)
end
@doc """
Modifies the feedback property of a given cost anomaly.
"""
def provide_anomaly_feedback(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ProvideAnomalyFeedback", input, options)
end
@doc """
Updates an existing cost anomaly monitor.
The changes made are applied going forward, and does not change anomalies
detected in the past.
"""
def update_anomaly_monitor(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateAnomalyMonitor", input, options)
end
@doc """
Updates an existing cost anomaly monitor subscription.
"""
def update_anomaly_subscription(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateAnomalySubscription", input, options)
end
@doc """
Updates an existing Cost Category.
Changes made to the Cost Category rules will be used to categorize the current
monthβs expenses and future expenses. This wonβt change categorization for the
previous months.
"""
def update_cost_category_definition(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateCostCategoryDefinition", input, options)
end
end
|
lib/aws/generated/cost_explorer.ex
| 0.919904 | 0.532304 |
cost_explorer.ex
|
starcoder
|
defmodule Stream.Reducers do
# Collection of reducers shared by Enum and Stream.
@moduledoc false
defmacro chunk(n, step, limit, f \\ nil) do
quote do
fn entry, acc(h, { buffer, count }, t) ->
buffer = [entry|buffer]
count = count + 1
new =
if count >= unquote(limit) do
left = count - unquote(step)
{ Enum.take(buffer, left), left }
else
{ buffer, count }
end
if count == unquote(n) do
cont_with_acc(unquote(f), :lists.reverse(buffer), h, new, t)
else
{ :cont, acc(h, new, t) }
end
end
end
end
defmacro chunk_by(callback, f \\ nil) do
quote do
fn
entry, acc(h, { buffer, value }, t) ->
new_value = unquote(callback).(entry)
if new_value == value do
{ :cont, acc(h, { [entry|buffer], value }, t) }
else
cont_with_acc(unquote(f), :lists.reverse(buffer), h, { [entry], new_value }, t)
end
entry, acc(h, nil, t) ->
{ :cont, acc(h, { [entry], unquote(callback).(entry) }, t) }
end
end
end
defmacro drop(f \\ nil) do
quote do
fn
_entry, acc(h, n, t) when n > 0 ->
{ :cont, acc(h, n-1, t) }
entry, acc(h, n, t) ->
cont_with_acc(unquote(f), entry, h, n, t)
end
end
end
defmacro drop_while(callback, f \\ nil) do
quote do
fn entry, acc(h, bool, t) = orig ->
if bool and unquote(callback).(entry) do
{ :cont, orig }
else
cont_with_acc(unquote(f), entry, h, false, t)
end
end
end
end
defmacro filter(callback, f \\ nil) do
quote do
fn(entry, acc) ->
if unquote(callback).(entry) do
cont(unquote(f), entry, acc)
else
{ :cont, acc }
end
end
end
end
defmacro filter_map(filter, mapper, f \\ nil) do
quote do
fn(entry, acc) ->
if unquote(filter).(entry) do
cont(unquote(f), unquote(mapper).(entry), acc)
else
{ :cont, acc }
end
end
end
end
defmacro map(callback, f \\ nil) do
quote do
fn(entry, acc) ->
cont(unquote(f), unquote(callback).(entry), acc)
end
end
end
defmacro reject(callback, f \\ nil) do
quote do
fn(entry, acc) ->
unless unquote(callback).(entry) do
cont(unquote(f), entry, acc)
else
{ :cont, acc }
end
end
end
end
defmacro scan_2(callback, f \\ nil) do
quote do
fn
entry, acc(h, :first, t) ->
cont_with_acc(unquote(f), entry, h, { :ok, entry }, t)
entry, acc(h, { :ok, acc }, t) ->
value = unquote(callback).(entry, acc)
cont_with_acc(unquote(f), value, h, { :ok, value }, t)
end
end
end
defmacro scan_3(callback, f \\ nil) do
quote do
fn(entry, acc(h, acc, t)) ->
value = unquote(callback).(entry, acc)
cont_with_acc(unquote(f), value, h, value, t)
end
end
end
defmacro take(f \\ nil) do
quote do
fn(entry, acc(h, n, t) = orig) ->
if n >= 1 do
cont_with_acc(unquote(f), entry, h, n-1, t)
else
{ :halt, orig }
end
end
end
end
defmacro take_every(nth, f \\ nil) do
quote do
fn
entry, acc(h, n, t) when n === :first
when n === unquote(nth) ->
cont_with_acc(unquote(f), entry, h, 1, t)
entry, acc(h, n, t) ->
{ :cont, acc(h, n+1, t) }
end
end
end
defmacro take_while(callback, f \\ nil) do
quote do
fn(entry, acc) ->
if unquote(callback).(entry) do
cont(unquote(f), entry, acc)
else
{ :halt, acc }
end
end
end
end
defmacro uniq(callback, f \\ nil) do
quote do
fn(entry, acc(h, prev, t) = acc) ->
value = unquote(callback).(entry)
if :lists.member(value, prev) do
{ :cont, acc }
else
cont_with_acc(unquote(f), entry, h, [value|prev], t)
end
end
end
end
defmacro with_index(f \\ nil) do
quote do
fn(entry, acc(h, counter, t)) ->
cont_with_acc(unquote(f), { entry, counter }, h, counter + 1, t)
end
end
end
end
|
lib/elixir/lib/stream/reducers.ex
| 0.510741 | 0.463626 |
reducers.ex
|
starcoder
|
defmodule Clickhousex.Helpers do
@moduledoc false
defmodule BindQueryParamsError do
@moduledoc false
defexception [:message, :query, :params]
end
@doc false
def bind_query_params(query, []), do: query
def bind_query_params(query, params) do
query_parts = String.split(query, "?")
case length(query_parts) do
1 ->
case length(params) do
0 ->
query
_ ->
raise BindQueryParamsError,
message: "Extra params: the query doesn't contain '?'",
query: query,
params: params
end
len ->
if len - 1 != length(params) do
raise BindQueryParamsError,
message:
"The number of parameters does not correspond to the number of question marks",
query: query,
params: params
end
param_for_query(query_parts, params)
end
end
@doc false
defp param_for_query(query_parts, params) when length(params) == 0 do
Enum.join(query_parts, "")
end
defp param_for_query([query_head | query_tail], [params_head | params_tail]) do
query_head <> param_as_string(params_head) <> param_for_query(query_tail, params_tail)
end
@doc false
defp param_as_string(param) when is_list(param) do
param
|> Enum.map(fn p -> param_as_string(p) end)
|> Enum.join(",")
end
defp param_as_string(param) when is_integer(param) do
Integer.to_string(param)
end
defp param_as_string(param) when is_boolean(param) do
to_string(param)
end
defp param_as_string(param) when is_float(param) do
to_string(param)
end
defp param_as_string(param) when is_float(param) do
to_string(param)
end
defp param_as_string({date_tuple = {_year, _month, _day}, {hour, minute, second, msecond}}) do
case NaiveDateTime.from_erl({date_tuple, {hour, minute, second}}, {msecond, 3}) do
{:ok, ndt} ->
"'#{NaiveDateTime.to_iso8601(ndt)}'"
{:error, _reason} ->
{:error, %Clickhousex.Error{message: :wrong_date_time}}
end
end
defp param_as_string(date = {_year, _month, _day}) do
case Date.from_erl(date) do
{:ok, date} ->
"'#{Date.to_string(date)}'"
{:error, _reason} ->
{:error, %Clickhousex.Error{message: :wrong_date}}
end
end
defp param_as_string(param) do
"'" <> param <> "'"
end
end
|
lib/clickhousex/helpers.ex
| 0.712432 | 0.427158 |
helpers.ex
|
starcoder
|
defmodule Stripe.Error do
@moduledoc """
A struct which represents an error which occurred during a Stripe API call.
This struct is designed to provide all the information needed to effectively log and maybe respond
to an error.
It contains the following fields:
- `:source` β this is one of
* `:internal` β the error occurred within the library. This is usually caused by an unexpected
or missing parameter.
* `:network` β the error occurred while making the network request (i.e. `:hackney.request/5`
returned an error.) In this case, `:code` will always be `:network_error`. The
`:hackney_reason` field in the `:extra` map contains the actual error reason received from
hackney.
* `:stripe` β an error response was received from Stripe.
- `:code` β an atom indicating the particular error. See "Error Codes" for more detail.
- `:request_id` β if `:source` is `:stripe`, this will contain the
[request ID](https://stripe.com/docs/api#request_ids) for logging and troubleshooting.
Otherwise, this field is `nil`.
- `:message` β a loggable message describing the error. This should not be shown to your users
but is intended for logging and troubleshooting.
- `:user_message` β if Stripe has provided a user-facing message (e.g. when a card is declined),
this field will contain it. Otherwise it is `nil`.
- `:extra` - a map which may contain some additional information about the error. See "Extra
Fields" for details.
## Extra Fields
The `:extra` field contains a map of miscellaneous information about the error which may be
useful. The fields are not present if not relevant. The possible fields are:
- `:card_code` β when `:code` is `:card_error`, contains one of Stripe's
[decline reasons](https://stripe.com/docs/api#errors).
- `:decline_code` β an optional short string provided by the bank when a card is declined.
- `:param` β for errors where a particular parameter was the cause, indicates which parameter
was invalid.
- `:charge_id` β when a Charge was declined, indicates the ID of the failed Charge which was
created.
- `:http_status` β for `:stripe` errors, the HTTP status returned with the error.
- `:raw_error` β the raw error map received from Stripe.
- `:hackney_reason` β for `:network` errors, contains the error reason received from hackney.
## Error Codes
The `:code` fields may be one of the following:
- `:api_connection_error`, `:api_error`, `:authentication_error`, `:card_error`,
`:invalid_request_error`, `:rate_limit_error`, `:validation_error` β as per the
[Stripe docs](https://stripe.com/docs/api#errors)
- `:bad_request`, `:unauthorized`, `:request_failed`, `:not_found`, `:conflict`,
`:too_many_requests`, `:server_error`, `:unknown_error` β these only occur if Stripe did not
send an explicit `type` for the error. They have the meaning as defined in [Stripe's HTTP status
code summary](https://stripe.com/docs/api#errors)
- `:network_code` β used only when `:source` is `:network`. Indicates an error occurred while
making the request.
- `:valid_keys_failed`, `:required_keys_failed`, `:endpoint_fun_invalid_result`,
`:invalid_endpoint` β used when `:source` is `:internal`. See `Stripe.Request` for details.
"""
@type error_source :: :internal | :network | :stripe
@type error_status ::
:bad_request
| :unauthorized
| :request_failed
| :not_found
| :conflict
| :too_many_requests
| :server_error
| :unknown_error
@type stripe_error_type ::
:api_connection_error
| :api_error
| :authentication_error
| :card_error
| :invalid_request_error
| :rate_limit_error
| :validation_error
@type card_error_code ::
:invalid_number
| :invalid_expiry_month
| :invalid_expiry_year
| :invalid_cvc
| :invalid_swipe_data
| :incorrect_number
| :expired_card
| :incorrect_cvc
| :incorrect_zip
| :card_declined
| :missing
| :processing_error
@type t :: %__MODULE__{
source: error_source,
code: error_status | stripe_error_type | Stripe.Request.error_code() | :network_error,
request_id: String.t() | nil,
message: String.t(),
user_message: String.t() | nil,
extra: %{
optional(:card_code) => card_error_code,
optional(:decline_code) => String.t(),
optional(:param) => atom,
optional(:charge_id) => Stripe.id(),
optional(:http_status) => 400..599,
optional(:raw_error) => map,
optional(:hackney_reason) => any
}
}
@enforce_keys [:source, :code, :message]
defstruct [:source, :code, :request_id, :extra, :message, :user_message]
@doc false
@spec new(Keyword.t()) :: t
def new(fields) do
struct!(__MODULE__, fields)
end
@doc false
@spec from_hackney_error(any) :: t
def from_hackney_error(reason) do
%__MODULE__{
source: :network,
code: :network_error,
message:
"An error occurred while making the network request. The HTTP client returned the following reason: #{
inspect(reason)
}",
extra: %{
hackney_reason: reason
}
}
end
@doc false
@spec from_stripe_error(400..599, nil, String.t() | nil) :: t
def from_stripe_error(status, nil, request_id) do
%__MODULE__{
source: :stripe,
code: code_from_status(status),
request_id: request_id,
extra: %{http_status: status},
message: status |> message_from_status()
}
end
@spec from_stripe_error(400..599, map, String.t()) :: t
def from_stripe_error(status, error_data, request_id) do
case error_data |> Map.get("type") |> maybe_to_atom() do
nil ->
from_stripe_error(status, nil, request_id)
type ->
stripe_message = error_data |> Map.get("message")
user_message =
case type do
:card_error -> stripe_message
_ -> nil
end
message = stripe_message || message_from_type(type)
extra =
%{raw_error: error_data, http_status: status}
|> maybe_put(:card_code, error_data |> Map.get("code") |> maybe_to_atom())
|> maybe_put(:decline_code, error_data |> Map.get("decline_code"))
|> maybe_put(:param, error_data |> Map.get("param") |> maybe_to_atom())
|> maybe_put(:charge_id, error_data |> Map.get("charge"))
%__MODULE__{
source: :stripe,
code: type,
request_id: request_id,
message: message,
user_message: user_message,
extra: extra
}
end
end
defp code_from_status(400), do: :bad_request
defp code_from_status(401), do: :unauthorized
defp code_from_status(402), do: :request_failed
defp code_from_status(404), do: :not_found
defp code_from_status(409), do: :conflict
defp code_from_status(429), do: :too_many_requests
defp code_from_status(s) when s in [500, 502, 503, 504], do: :server_error
defp code_from_status(_), do: :unknown_error
defp message_from_status(400),
do: "The request was unacceptable, often due to missing a required parameter."
defp message_from_status(401), do: "No valid API key provided."
defp message_from_status(402), do: "The parameters were valid but the request failed."
defp message_from_status(404), do: "The requested resource doesn't exist."
defp message_from_status(409),
do:
"The request conflicts with another request (perhaps due to using the same idempotent key)."
defp message_from_status(429),
do:
"Too many requests hit the API too quickly. We recommend an exponential backoff of your requests."
defp message_from_status(s) when s in [500, 502, 503, 504],
do: "Something went wrong on Stripe's end."
defp message_from_status(s), do: "An unknown HTTP code of #{s} was received."
defp message_from_type(:api_connection_error), do: "The connection to Stripe's API failed."
defp message_from_type(:api_error),
do: "An internal Stripe error occurred. This is usually temporary."
defp message_from_type(:authentication_error),
do: "You failed to properly authenticate yourself in the request."
defp message_from_type(:card_error), do: "The card could not be charged for some reason."
defp message_from_type(:invalid_request_error), do: "Your request had invalid parameters."
defp message_from_type(:rate_limit_error),
do:
"Too many requests hit the API too quickly. We recommend an exponential backoff of your requests."
defp message_from_type(:validation_error),
do: "A client-side library failed to validate a field."
defp maybe_put(map, _key, nil), do: map
defp maybe_put(map, key, value), do: map |> Map.put(key, value)
defp maybe_to_atom(nil), do: nil
defp maybe_to_atom(string) when is_binary(string), do: string |> String.to_atom()
end
|
lib/stripe/error.ex
| 0.917329 | 0.757357 |
error.ex
|
starcoder
|
defmodule Freddy.RPC.Client do
@moduledoc ~S"""
This module allows to build RPC client for any Freddy-compliant microservice.
## Example
defmodule PaymentsService do
use Freddy.RPC.Client
@config [timeout: 3500]
def start_link(conn, initial, opts \\ []) do
Freddy.RPC.Client.start_link(__MODULE__, conn, @config, initial, opts)
end
end
{:ok, client} = PaymentsService.start_link()
PaymentsService.request(client, "Payments", %{type: "get_history", site_id: "xxx"})
"""
@type payload :: term
@type request :: Freddy.RPC.Request.t()
@type response :: term
@type routing_key :: String.t()
@type opts :: Keyword.t()
@type meta :: map
@type state :: term
@doc """
Called when the RPC client process is first started. `start_link/5` will block
until it returns.
It receives as argument the fourth argument given to `start_link/5`.
Returning `{:ok, state}` will cause `start_link/5` to return `{:ok, pid}`
and attempt to open a channel on the given connection, declare the exchange,
declare a server-named queue, and consume it.
After that it will enter the main loop with `state` as its internal state.
Returning `:ignore` will cause `start_link/5` to return `:ignore` and the
process will exit normally without entering the loop, opening a channel or calling
`c:terminate/2`.
Returning `{:stop, reason}` will cause `start_link/5` to return `{:error, reason}` and
the process will exit with reason `reason` without entering the loop, opening a channel,
or calling `c:terminate/2`.
"""
@callback init(initial :: term) ::
{:ok, state}
| :ignore
| {:stop, reason :: term}
@doc """
Called when the RPC client process has opened AMQP channel before registering
itself as a consumer.
First argument is a map, containing `:channel`, `:exchange` and `:queue` structures.
Returning `{:noreply, state}` will cause the process to enter the main loop
with the given state.
Returning `{:error, state}` will cause the process to reconnect (i.e. open
new channel, declare exchange and queue, etc).
Returning `{:stop, reason, state}` will terminate the main loop and call
`c:terminate/2` before the process exits with reason `reason`.
"""
@callback handle_connected(Freddy.Consumer.connection_info(), state) ::
{:noreply, state}
| {:noreply, state, timeout | :hibernate}
| {:error, state}
| {:stop, reason :: term, state}
@doc """
Called when the AMQP server has registered the process as a consumer of the
server-named queue and it will start to receive messages.
Returning `{:noreply, state}` will causes the process to enter the main loop
with the given state.
Returning `{:stop, reason, state}` will not send the message, terminate
the main loop and call `c:terminate/2` before the process exits with
reason `reason`.
"""
@callback handle_ready(meta, state) ::
{:noreply, state}
| {:noreply, state, timeout | :hibernate}
| {:stop, reason :: term, state}
@doc """
Called when the AMQP server has been disconnected from the AMQP broker.
Returning `{:noreply, state}` will cause the process to enter the main loop
with the given state. The server will not consume any new messages until
connection to AMQP broker is restored.
Returning `{:stop, reason, state}` will terminate the main loop and call
`c:terminate/2` before the process exits with reason `reason`.
"""
@callback handle_disconnected(reason :: term, state) ::
{:noreply, state}
| {:stop, reason :: term, state}
@doc """
Called before a request will be performed to the exchange.
It receives as argument the RPC request structure which contains the message
payload, the routing key and the options for that publication, and the
internal client state.
Returning `{:ok, state}` will cause the request to be performed with no
modification, block the client until the response is received, and enter
the main loop with the given state.
Returning `{:ok, request, state}` will cause the payload, routing key and
options from the given `request` to be used instead of the original ones,
block the client until the response is received, and enter the main loop
with the given state.
Returning `{:reply, response, state}` will respond the client inmediately
without performing the request with the given response, and enter the main
loop again with the given state.
Returning `{:stop, reason, response, state}` will not send the message,
respond to the caller with `response`, terminate the main loop
and call `c:terminate/2` before the process exits with reason `reason`.
Returning `{:stop, reason, state}` will not send the message, terminate
the main loop and call `c:terminate/2` before the process exits with
reason `reason`.
"""
@callback before_request(request, state) ::
{:ok, state}
| {:ok, request, state}
| {:reply, response, state}
| {:stop, reason :: term, response, state}
| {:stop, reason :: term, state}
@doc """
Called before a message will be published to the exchange.
It receives as argument the RPC request structure and the internal state.
Returning `{:ok, request, state}` will cause the returned `request` to be
published to the exchange, and the process to enter the main loop with the
given state.
Returning `{:reply, response, state}` will respond the client inmediately
without performing the request with the given response, and enter the main
loop again with the given state.
Returning `{:stop, reason, response, state}` will not send the message,
respond to the caller with `response`, and terminate the main loop
and call `c:terminate/2` before the process exits with reason `reason`.
Returning `{:stop, reason, state}` will not send the message, terminate
the main loop and call `c:terminate/2` before the process exits with
reason `reason`.
"""
@callback encode_request(request, state) ::
{:ok, request, state}
| {:reply, response, state}
| {:reply, response, state, timeout | :hibernate}
| {:stop, reason :: term, response, state}
| {:stop, reason :: term, state}
@doc """
Called when a response message is delivered from the queue before passing it into a
`on_response` function.
The arguments are the message's raw payload, response metatdata, original RPC request
for which the response has arrived, and the internal state.
The metadata is a map containing all metadata given by the AMQP client when receiving
the message plus the `:exchange` and `:queue` values.
Returning `{:ok, payload, state}` or `{:ok, payload, meta, state}` will pass the decoded
payload and meta into `handle_message/3` function.
Returning `{:noreply, state}` will do nothing, and therefore the message should
be acknowledged by using `Freddy.Consumer.ack/2`, `Freddy.Consumer.nack/2` or
`Freddy.Consumer.reject/2`.
Returning `{:stop, reason, state}` will terminate the main loop and call
`c:terminate/2` before the process exits with reason `reason`.
"""
@callback decode_response(payload :: String.t(), meta, request, state) ::
{:ok, payload, state}
| {:ok, payload, meta, state}
| {:reply, reply :: term, state}
| {:reply, reply :: term, state, timeout | :hibernate}
| {:noreply, state}
| {:stop, reason :: term, state}
@doc """
Called when a response has been received, before it is delivered to the caller.
It receives as argument the decoded and parse response, original RPC request
for which the response has arrived, and the internal state.
Returning `{:reply, reply, state}` will cause the given reply to be
delivered to the caller instead of the original response, and enter
the main loop with the given state.
Returning `{:noreply, state}` will enter the main loop with the given state
without responding to the caller (that will eventually timeout or keep blocked
forever if the timeout was set to `:infinity`).
Returning `{:stop, reason, reply, state}` will deliver the given reply to
the caller instead of the original response and call `c:terminate/2`
before the process exits with reason `reason`.
Returning `{:stop, reason, state}` not reply to the caller and call
`c:terminate/2` before the process exits with reason `reason`.
"""
@callback on_response(response, request, state) ::
{:reply, response, state}
| {:reply, response, state, timeout | :hibernate}
| {:noreply, state}
| {:noreply, state, timeout | :hibernate}
| {:stop, reason :: term, response, state}
| {:stop, reason :: term, state}
@doc """
Called when a request has timed out.
Returning `{:reply, reply, state}` will cause the given reply to be
delivered to the caller, and enter the main loop with the given state.
Returning `{:noreply, state}` will enter the main loop with the given state
without responding to the caller (that will eventually timeout or keep blocked
forever if the timeout was set to `:infinity`).
Returning `{:stop, reason, reply, state}` will deliver the given reply to
the caller, and call `c:terminate/2` before the process exits
with reason `reason`.
Returning `{:stop, reason, state}` will not reply to the caller and call
`c:terminate/2` before the process exits with reason `reason`.
"""
@callback on_timeout(request, state) ::
{:reply, response, state}
| {:reply, response, state, timeout | :hibernate}
| {:noreply, state}
| {:noreply, state, timeout | :hibernate}
| {:stop, reason :: term, response, state}
| {:stop, reason :: term, state}
@doc """
Called when a request has been returned by AMPQ broker.
Returning `{:reply, reply, state}` will cause the given reply to be
delivered to the caller, and enter the main loop with the given state.
Returning `{:noreply, state}` will enter the main loop with the given state
without responding to the caller (that will eventually timeout or keep blocked
forever if the timeout was set to `:infinity`).
Returning `{:stop, reason, reply, state}` will deliver the given reply to
the caller, and call `c:terminate/2` before the process exits
with reason `reason`.
Returning `{:stop, reason, state}` will not reply to the caller and call
`c:terminate/2` before the process exits with reason `reason`.
"""
@callback on_return(request, state) ::
{:reply, response, state}
| {:reply, response, state, timeout | :hibernate}
| {:noreply, state}
| {:noreply, state, timeout | :hibernate}
| {:stop, reason :: term, response, state}
| {:stop, reason :: term, state}
@doc """
Called when the process receives a call message sent by `call/3`. This
callback has the same arguments as the `GenServer` equivalent and the
`:reply`, `:noreply` and `:stop` return tuples behave the same.
"""
@callback handle_call(request :: term, GenServer.from(), state) ::
{:reply, reply :: term, state}
| {:reply, reply :: term, state, timeout | :hibernate}
| {:noreply, state}
| {:noreply, state, timeout | :hibernate}
| {:stop, reason :: term, state}
| {:stop, reason :: term, reply :: term, state}
@doc """
Called when the process receives a cast message sent by `cast/2`. This
callback has the same arguments as the `GenServer` equivalent and the
`:noreply` and `:stop` return tuples behave the same.
"""
@callback handle_cast(request :: term, state) ::
{:noreply, state}
| {:noreply, state, timeout | :hibernate}
| {:stop, reason :: term, state}
@doc """
Called when the process receives a message. This callback has the same
arguments as the `GenServer` equivalent and the `:noreply` and `:stop`
return tuples behave the same.
"""
@callback handle_info(message :: term, state) ::
{:noreply, state}
| {:noreply, state, timeout | :hibernate}
| {:stop, reason :: term, state}
@doc """
This callback is the same as the `GenServer` equivalent and is called when the
process terminates. The first argument is the reason the process is about
to exit with.
"""
@callback terminate(reason :: term, state) :: any
defmacro __using__(_opts \\ []) do
quote location: :keep do
@behaviour Freddy.RPC.Client
@impl true
def init(initial) do
{:ok, initial}
end
@impl true
def handle_connected(_meta, state) do
{:noreply, state}
end
@impl true
def handle_ready(_meta, state) do
{:noreply, state}
end
@impl true
def handle_disconnected(_reason, state) do
{:noreply, state}
end
@impl true
def before_request(_request, state) do
{:ok, state}
end
@impl true
def encode_request(request, state) do
case Jason.encode(request.payload) do
{:ok, new_payload} ->
new_request =
request
|> Freddy.RPC.Request.set_payload(new_payload)
|> Freddy.RPC.Request.put_option(:content_type, "application/json")
{:ok, new_request, state}
{:error, reason} ->
{:reply, {:error, {:bad_request, reason}}, state}
end
end
@impl true
def decode_response(payload, _meta, _request, state) do
case Jason.decode(payload) do
{:ok, decoded} -> {:ok, decoded, state}
{:error, reason} -> {:reply, {:error, {:bad_response, reason}}, state}
end
end
@impl true
def on_response(response, _request, state) do
{:reply, response, state}
end
@impl true
def on_timeout(_request, state) do
{:reply, {:error, :timeout}, state}
end
@impl true
def on_return(_request, state) do
{:reply, {:error, :no_route}, state}
end
@impl true
def handle_call(message, _from, state) do
{:stop, {:bad_call, message}, state}
end
@impl true
def handle_cast(message, state) do
{:stop, {:bad_cast, message}, state}
end
@impl true
def handle_info(_message, state) do
{:noreply, state}
end
@impl true
def terminate(_reason, _state) do
:ok
end
defoverridable Freddy.RPC.Client
end
end
use Freddy.Consumer
require Record
alias Freddy.RPC.Request
alias Freddy.Core.Channel
alias Freddy.Core.Exchange
@type config :: [timeout: timeout, exchange: Keyword.t()]
@default_timeout 3000
@default_gen_server_timeout 5000
@config [
queue: [opts: [auto_delete: true, exclusive: true]],
consumer: [no_ack: true]
]
Record.defrecordp(
:state,
mod: nil,
given: nil,
timeout: @default_timeout,
channel: nil,
exchange: nil,
queue: nil,
waiting: %{}
)
@doc """
Starts a `Freddy.RPC.Client` process linked to the current process.
This function is used to start a `Freddy.RPC.Client` process in a supervision
tree. The process will be started by calling `c:init/1` with the given initial
value.
## Arguments
* `mod` - the module that defines the server callbacks (like in `GenServer`)
* `connection` - the pid of a `Freddy.Connection` process
* `config` - the configuration of the RPC Client (describing the exchange and timeout value)
* `initial` - the value that will be given to `c:init/1`
* `options` - the `GenServer` options
## Configuration
* `:exchange` - a keyword list or `%Freddy.Core.Exchange{}` structure, describing an
exchange that will be used to publish RPC requests to. If not present, the default
RabbitMQ exchange will be used. See `Freddy.Core.Exchange` for available options
* `:timeout` - specified default request timeout in milliseconds
"""
@spec start_link(module, GenServer.server(), config, initial :: term, GenServer.options()) ::
GenServer.on_start()
def start_link(mod, connection, config, initial, options \\ []) do
Freddy.Consumer.start_link(
__MODULE__,
connection,
prepare_config(config),
prepare_init_args(mod, config, initial),
options
)
end
@doc """
Starts a `Freddy.RPC.Client` process without linking to the current process,
see `start_link/5` for more information.
"""
@spec start(module, GenServer.server(), config, initial :: term, GenServer.options()) ::
GenServer.on_start()
def start(mod, connection, config, initial, options \\ []) do
Freddy.Consumer.start(
__MODULE__,
connection,
prepare_config(config),
prepare_init_args(mod, config, initial),
options
)
end
defdelegate call(client, message, timeout \\ 5000), to: Connection
defdelegate cast(client, message), to: Connection
defdelegate stop(client, reason \\ :normal), to: GenServer
@doc """
Performs a RPC request and blocks until the response arrives.
The `routing_key` parameter specifies the routing key for the message. The routing
key is used by the RabbitMQ server to route a message from an exchange to worker
queues or another exchanges.
The `payload` parameter specifies the message content as an Erlang term. The payload
is converted to binary string by the `c:encode_request/2` callback before sending it
to server.
## Options
* `:timeout` - if present, the client is allowed to wait given number of
milliseconds for the response message from the server
* `:headers` - message headers
* `:persistent` - if set, uses persistent delivery mode
* `:priority` - message priority, ranging from 0 to 9
* `:message_id` - message identifier
* `:timestamp` - timestamp associated with this message (epoch time)
* `:user_id` - creating user ID. RabbitMQ will validate this against the active connection user
* `:app_id` - publishing application ID
"""
@spec request(GenServer.server(), routing_key, payload, Keyword.t()) ::
{:ok, response}
| {:error, reason :: term}
| {:error, reason :: term, hint :: term}
def request(client, routing_key, payload, options \\ []) do
Freddy.Consumer.call(
client,
{:"$request", payload, routing_key, options},
gen_server_timeout(options)
)
end
defp prepare_config(config) do
exchange = Keyword.get(config, :exchange, [])
Keyword.put(@config, :exchange, exchange)
end
defp prepare_init_args(mod, config, initial) do
timeout = Keyword.get(config, :timeout, @default_timeout)
{mod, timeout, initial}
end
defp gen_server_timeout(opts) do
case Keyword.get(opts, :timeout, :undefined) do
:undefined -> @default_gen_server_timeout
:infinity -> :infinity
timeout when is_integer(timeout) -> timeout + 100
end
end
@impl true
def init({mod, timeout, initial}) do
case mod.init(initial) do
{:ok, given} ->
{:ok, state(mod: mod, given: given, timeout: timeout)}
:ignore ->
:ignore
{:stop, reason} ->
{:stop, reason}
end
end
@impl true
def handle_connected(meta, state(mod: mod, given: given) = state) do
case mod.handle_connected(meta, given) do
{:noreply, new_given} ->
{:noreply, state(state, given: new_given)}
{:noreply, new_given, timeout} ->
{:noreply, state(state, given: new_given), timeout}
{:error, new_given} ->
{:noreply, state(state, given: new_given)}
{:stop, reason, new_given} ->
{:stop, reason, state(state, given: new_given)}
end
end
@impl true
def handle_ready(
%{channel: channel, queue: queue, exchange: exchange} = meta,
state(mod: mod, given: given) = state
) do
:ok = Channel.register_return_handler(channel, self())
new_state = state(state, channel: channel, exchange: exchange, queue: queue)
case mod.handle_ready(meta, given) do
{:noreply, new_given} ->
{:noreply, state(new_state, given: new_given)}
{:noreply, new_given, timeout} ->
{:noreply, state(new_state, given: new_given), timeout}
{:stop, reason, new_given} ->
{:stop, reason, state(state, given: new_given)}
end
end
@impl true
def handle_disconnected(reason, state(mod: mod, given: given) = state) do
disconnected = state(state, channel: nil, exchange: nil, queue: nil)
case mod.handle_disconnected(reason, given) do
{:noreply, new_given} -> {:noreply, state(disconnected, given: new_given)}
{:stop, reason, new_given} -> {:stop, reason, state(disconnected, given: new_given)}
end
end
@impl true
def handle_call({:"$request", payload, routing_key, opts}, from, state) do
handle_request(payload, routing_key, opts, from, state)
end
def handle_call(message, from, state(mod: mod, given: given) = state) do
message
|> mod.handle_call(from, given)
|> handle_mod_callback(state)
end
@impl true
def decode_message(
payload,
%{correlation_id: request_id} = meta,
state(mod: mod, given: given) = state
) do
pop_waiting(request_id, state, fn request, state ->
case mod.decode_response(payload, meta, request, given) do
{:ok, new_payload, new_given} ->
{:ok, response_to_tuple(new_payload), Map.put(meta, :request, request),
state(state, given: new_given)}
{:ok, new_payload, new_meta, new_given} ->
{:ok, response_to_tuple(new_payload), Map.put(new_meta, :request, request),
state(state, given: new_given)}
{:reply, response, new_given} ->
{:reply, response, state(state, given: new_given)}
{:reply, response, new_given, timeout} ->
{:reply, response, state(state, given: new_given), timeout}
{:noreply, new_given} ->
{:noreply, state(state, given: new_given)}
{:stop, reason, new_given} ->
{:stop, reason, state(state, given: new_given)}
end
end)
end
# TODO: this should be moved out of here
defp response_to_tuple(%{"success" => true, "output" => result}) do
{:ok, result}
end
defp response_to_tuple(%{"success" => true} = payload) do
{:ok, Map.delete(payload, "success")}
end
defp response_to_tuple(%{"success" => false, "error" => error}) do
{:error, :invalid_request, error}
end
defp response_to_tuple(%{"success" => false} = payload) do
{:error, :invalid_request, Map.delete(payload, "success")}
end
defp response_to_tuple(payload) do
{:ok, payload}
end
@impl true
def handle_message(response, %{request: request} = _meta, state(mod: mod, given: given) = state) do
response
|> mod.on_response(request, given)
|> handle_mod_callback(state)
|> handle_mod_callback_reply(request)
end
@impl true
def handle_cast(message, state) do
handle_async(message, :handle_cast, state)
end
@impl true
def handle_info(
{:return, _payload, %{correlation_id: request_id} = _meta},
state(mod: mod, given: given) = state
) do
pop_waiting(request_id, state, fn request, state ->
request
|> mod.on_return(given)
|> handle_mod_callback(state)
|> handle_mod_callback_reply(request)
end)
end
def handle_info({:request_timeout, request_id}, state(mod: mod, given: given) = state) do
pop_waiting(request_id, state, fn request, state ->
request
|> mod.on_timeout(given)
|> handle_mod_callback(state)
|> handle_mod_callback_reply(request)
end)
end
def handle_info(message, state) do
handle_async(message, :handle_info, state)
end
@impl true
def terminate(reason, state(mod: mod, given: given)) do
mod.terminate(reason, given)
end
# Private functions
defp handle_request(_payload, _routing_key, _opts, _from, state(channel: nil) = state) do
{:reply, {:error, :not_connected}, state}
end
defp handle_request(payload, routing_key, opts, from, state(mod: mod, given: given) = state) do
request = Request.start(from, payload, routing_key, opts)
case mod.before_request(request, given) do
{:ok, new_given} ->
send_request(request, state(state, given: new_given))
{:ok, new_request, new_given} ->
send_request(new_request, state(state, given: new_given))
{:reply, response, new_given} ->
{:reply, response, state(state, given: new_given)}
{:stop, reason, response, new_given} ->
{:stop, reason, response, state(state, given: new_given)}
{:stop, reason, new_given} ->
{:stop, reason, state(state, given: new_given)}
end
end
defp send_request(request, state(mod: mod, given: given) = state) do
case mod.encode_request(request, given) do
{:ok, new_request, new_given} ->
publish(new_request, state(state, given: new_given))
{:reply, response, new_given} ->
{:reply, response, state(state, given: new_given)}
{:reply, response, new_given, timeout} ->
{:reply, response, state(state, given: new_given), timeout}
{:stop, reason, new_given} ->
{:stop, reason, state(state, given: new_given)}
{:stop, reason, response, new_given} ->
{:stop, reason, response, state(state, given: new_given)}
end
end
defp publish(
request,
state(exchange: exchange, queue: queue, channel: channel, timeout: timeout) = state
) do
ttl = Request.get_option(request, :timeout, timeout)
request =
request
|> Request.put_option(:mandatory, true)
|> Request.put_option(:type, "request")
|> Request.put_option(:correlation_id, request.id)
|> Request.put_option(:reply_to, queue.name)
|> Request.set_timeout(ttl)
exchange
|> Exchange.publish(channel, request.payload, request.routing_key, request.options)
|> after_publish(request, state)
end
defp after_publish(:ok, request, state(waiting: waiting) = state) do
request =
case Request.get_timeout(request) do
:infinity ->
request
timeout ->
timer = Process.send_after(self(), {:request_timeout, request.id}, timeout)
%{request | timer: timer}
end
{:noreply, state(state, waiting: Map.put(waiting, request.id, request))}
end
defp after_publish(error, _request, state) do
{:reply, error, state}
end
defp handle_mod_callback(response, state) do
case response do
{:reply, response, new_given} ->
{:reply, response, state(state, given: new_given)}
{:reply, response, new_given, timeout} ->
{:reply, response, state(state, given: new_given), timeout}
{:noreply, new_given} ->
{:noreply, state(state, given: new_given)}
{:noreply, new_given, timeout} ->
{:noreply, state(state, given: new_given), timeout}
{:stop, reason, response, new_given} ->
{:stop, reason, response, state(state, given: new_given)}
{:stop, reason, new_given} ->
{:stop, reason, state(state, given: new_given)}
end
end
defp handle_mod_callback_reply(result, request) do
case result do
{:reply, response, new_state} ->
GenServer.reply(request.from, response)
{:noreply, new_state}
{:reply, response, new_state, timeout} ->
GenServer.reply(request.from, response)
{:noreply, new_state, timeout}
other ->
other
end
end
defp handle_async(message, fun, state(mod: mod, given: given) = state) do
case apply(mod, fun, [message, given]) do
{:noreply, new_given} ->
{:noreply, state(state, given: new_given)}
{:noreply, new_given, timeout} ->
{:noreply, state(state, given: new_given), timeout}
{:stop, reason, new_given} ->
{:stop, reason, state(state, given: new_given)}
end
end
defp pop_waiting(request_id, state(waiting: waiting) = state, func) do
{request, new_waiting} = Map.pop(waiting, request_id)
if request do
if request.timer do
Process.cancel_timer(request.timer)
end
request
|> Request.finish()
|> func.(state(state, waiting: new_waiting))
else
{:noreply, state}
end
end
end
|
lib/freddy/rpc/client.ex
| 0.886623 | 0.439807 |
client.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.