code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
defmodule EtsDeque do
@moduledoc """
EtsDeque is an Elixir implementation of a double-ended queue (deque), using
Erlang's ETS library as a backing store.
Using ETS ensures that all functions in the `EtsDeque` module execute in
amortized O(1) time with a minimum of memory allocations, offering bounded
or unbounded operation with high performance and favorable RAM usage.
Using ETS also means that `EtsDeque` is not a purely functional data
structure, and is not suitable for direct concurrent usage in multiple
processes. Use the `EtsDeque.Server` GenServer if you would like safe
access to an `EtsDeque` from multiple processes.
You can push items onto, pop items from, or peek at items from the head
or tail of the queue. Additionally, any item can be accessed or replaced
by its index using `at/2` and `replace_at/3`.
`EtsQueue` implements Elixir's
[Access](https://hexdocs.pm/elixir/Access.html) behaviour and
[Enumerable](https://hexdocs.pm/elixir/Enumerable.html) and
[Collectable](https://hexdocs.pm/elixir/Collectable.html) protocols,
so code like `deque[0]` and `Enum.count(deque)` and
`Enum.into([1, 2, 3], EtsDeque.new())` works as it should.
## Example
iex> deque = EtsDeque.new(3)
iex> {:ok, deque} = EtsDeque.push_head(deque, :moe)
iex> {:ok, deque} = EtsDeque.push_tail(deque, :larry)
iex> {:ok, deque} = EtsDeque.push_tail(deque, :curly)
iex> :error = EtsDeque.push_head(deque, :shemp) ## deque is full
iex> {:ok, :curly, deque} = EtsDeque.pop_tail(deque)
iex> {:ok, deque} = EtsDeque.push_tail(deque, :shemp)
iex> Enum.to_list(deque)
[:moe, :larry, :shemp]
"""
defstruct [:table, :size, :length, :head]
@type t :: %__MODULE__{}
@doc ~S"""
Creates a deque, optionally limited to a given size.
"""
@spec new(non_neg_integer | :infinity) :: t()
def new(size \\ :infinity) do
table = :ets.new(nil, [:set, :public])
%__MODULE__{table: table, size: size, length: 0, head: -1}
end
@doc ~S"""
Returns the number of items in the given deque. Equivalent to `deque.length`.
"""
@spec length(t) :: non_neg_integer
def length(deque), do: deque.length
@doc ~S"""
Returns the maximum capacity of the given deque. Equivalent to `deque.size`.
"""
@spec size(t) :: non_neg_integer | :infinity
def size(deque), do: deque.size
@doc ~S"""
Adds an item onto the head of the queue. Returns the updated deque,
or `:error` if the queue is full.
"""
@spec push_head(t, any) :: {:ok, t} | :error
def push_head(deque, item) do
if deque.length + 1 > deque.size do
:error
else
new_head = new_head(deque, 1)
true = :ets.insert(deque.table, {new_head, item})
{:ok, %{deque | head: new_head, length: deque.length + 1}}
end
end
@doc ~S"""
Adds an item onto the head of the queue. Returns the updated deque,
or raises `ArgumentError` if the queue is full.
"""
@spec push_head!(t, any) :: t
def push_head!(deque, item) do
case push_head(deque, item) do
{:ok, deque} -> deque
:error -> raise ArgumentError, "deque is full"
end
end
@doc ~S"""
Adds an item onto the tail of the queue. Returns the updated deque,
or `:error` if the queue is full.
"""
@spec push_tail(t, any) :: {:ok, t} | :error
def push_tail(deque, item) do
if deque.length + 1 > deque.size do
:error
else
tail = tail(deque, 1)
head = if deque.length == 0, do: tail, else: deque.head
true = :ets.insert(deque.table, {tail, item})
{:ok, %{deque | length: deque.length + 1, head: head}}
end
end
@doc ~S"""
Adds an item onto the tail of the queue. Returns the updated deque,
or raises `ArgumentError` if the queue is full.
"""
@spec push_tail!(t, any) :: t
def push_tail!(deque, item) do
case push_tail(deque, item) do
{:ok, deque} -> deque
:error -> raise ArgumentError, "deque is full"
end
end
@doc ~S"""
Removes the item at the head of the queue, returning it along with the
updated deque.
Returns `:error` if queue is empty.
"""
@spec pop_head(t) :: {:ok, any, t} | :error
def pop_head(deque) do
if deque.length == 0 do
:error
else
[{_, item}] = :ets.take(deque.table, deque.head)
new_head = new_head(deque, -1)
new_deque = %{deque | length: deque.length - 1, head: new_head}
{:ok, item, new_deque}
end
end
@doc ~S"""
Removes the item at the head of the queue, returning it along with the
updated deque.
Raises `ArgumentError` if queue is empty.
"""
@spec pop_head!(t) :: {any, t}
def pop_head!(deque) do
case pop_head(deque) do
{:ok, item, deque} -> {item, deque}
:error -> raise ArgumentError, "deque is empty"
end
end
@doc ~S"""
Removes the item at the tail of the queue, returning it along with the
updated deque.
Returns `:error` if queue is empty.
"""
@spec pop_tail(t) :: {:ok, any, t} | :error
def pop_tail(deque) do
if deque.length == 0 do
:error
else
tail = tail(deque)
[{_, item}] = :ets.take(deque.table, tail)
new_deque = %{deque | length: deque.length - 1}
{:ok, item, new_deque}
end
end
@doc ~S"""
Removes the item at the tail of the queue, returning it along with the
updated deque.
Raises `ArgumentError` if queue is empty.
"""
@spec pop_tail!(t) :: {any, t}
def pop_tail!(deque) do
case pop_tail(deque) do
{:ok, item, deque} -> {item, deque}
:error -> raise ArgumentError, "deque is empty"
end
end
@doc ~S"""
Returns the item at the head of the queue, or `:error` if the queue
is empty.
"""
@spec peek_head(t) :: {:ok, any} | :error
def peek_head(deque) do
if deque.length == 0 do
:error
else
[{_, item}] = :ets.lookup(deque.table, deque.head)
{:ok, item}
end
end
@doc ~S"""
Returns the item at the head of the queue, or raises `ArgumentError`
if the queue is empty.
"""
@spec peek_head!(t) :: any
def peek_head!(deque) do
case peek_head(deque) do
{:ok, item} -> item
:error -> raise ArgumentError, "deque is empty"
end
end
@doc ~S"""
Returns the item at the tail of the queue, or `:error` if the queue
is empty.
"""
@spec peek_tail(t) :: {:ok, any} | :error
def peek_tail(deque) do
if deque.length == 0 do
:error
else
tail = tail(deque)
[{_, item}] = :ets.lookup(deque.table, tail)
{:ok, item}
end
end
@doc ~S"""
Returns the item at the tail of the queue, or raises `ArgumentError`
if the queue is empty.
"""
@spec peek_tail!(t) :: any
def peek_tail!(deque) do
case peek_tail(deque) do
{:ok, item} -> item
:error -> raise ArgumentError, "deque is empty"
end
end
@doc ~S"""
Returns the item at the given index, where index `0` is the head.
Returns `:error` if index is out of bounds.
"""
@spec at(t, non_neg_integer) :: {:ok, any} | :error
def at(deque, index) do
if deque.length > index do
[{_, item}] = :ets.lookup(deque.table, real_index(deque, index))
{:ok, item}
else
:error
end
end
@doc ~S"""
Returns the item at the given index, where index `0` is the head.
Raises `ArgumentError` if index is out of bounds.
"""
@spec at!(t, non_neg_integer) :: any
def at!(deque, index) do
case at(deque, index) do
{:ok, item} -> item
:error -> raise ArgumentError, "index #{index} out of bounds"
end
end
@doc ~S"""
Replaces the item at the given index, returning the updated deque.
Returns `:error` if index is out of bounds.
"""
@spec replace_at(t, non_neg_integer, any) :: {:ok, t} | :error
def replace_at(deque, index, item) do
if deque.length > index do
true = :ets.insert(deque.table, {real_index(deque, index), item})
{:ok, deque}
else
:error
end
end
@doc ~S"""
Replaces the item at the given index, returning the updated deque.
Raises `ArgumentError` if index is out of bounds.
"""
@spec replace_at!(t, non_neg_integer, any) :: t
def replace_at!(deque, index, item) do
case replace_at(deque, index, item) do
{:ok, deque} -> deque
:error -> raise ArgumentError, "index #{index} out of bounds"
end
end
@doc false
@spec new_head(t, integer) :: integer
def new_head(%{size: :infinity} = deque, increment) do
deque.head + increment
end
def new_head(deque, increment) do
rem(deque.size + deque.head + increment, deque.size)
end
defp tail(deque, decrement \\ 0)
defp tail(%{size: :infinity} = deque, decrement) do
deque.head - deque.length + 1 - decrement
end
defp tail(deque, decrement) do
rem(deque.size + deque.head - deque.length + 1 - decrement, deque.size)
end
defp real_index(deque, index) do
if deque.size == :infinity do
deque.head - index
else
rem(deque.size + deque.head - index, deque.size)
end
end
@behaviour Access
@impl Access
@doc false
def fetch(deque, index), do: at(deque, index)
@impl Access
@doc false
def get_and_update(deque, index, fun) do
case at(deque, index) do
{:ok, current} ->
case fun.(current) do
{get, update} ->
{:ok, deque} = replace_at(deque, index, update)
{get, deque}
:pop ->
{:ok, item, deque} = deque |> pop_head
{item, deque}
end
_error ->
raise ArgumentError, "index out of bounds"
end
end
@impl Access
@doc false
def pop(deque, index) do
cond do
index == 0 ->
{:ok, item, deque} = deque |> pop_head
{item, deque}
index == deque.length - 1 ->
{:ok, item, deque} = deque |> pop_tail
{item, deque}
:else ->
raise ArgumentError, "removing items not at head or tail is unsupported"
end
end
end
defimpl Collectable, for: EtsDeque do
def into(orig) do
{orig,
fn
deque, {:cont, item} ->
{:ok, deque} = deque |> EtsDeque.push_tail(item)
deque
deque, :done ->
deque
_, :halt ->
:ok
end}
end
end
defimpl Enumerable, for: EtsDeque do
def count(deque), do: {:ok, deque.length}
def member?(_deque, _item), do: {:error, __MODULE__}
def reduce(_deque, {:halt, acc}, _fun), do: {:halted, acc}
def reduce(deque, {:suspend, acc}, fun), do: {:suspended, acc, &reduce(deque, &1, fun)}
def reduce(%{length: 0}, {:cont, acc}, _fun), do: {:done, acc}
def reduce(deque, {:cont, acc}, fun) do
{:ok, head} = deque |> EtsDeque.peek_head()
new_head = EtsDeque.new_head(deque, -1)
deque = %{deque | head: new_head, length: deque.length - 1}
reduce(deque, fun.(head, acc), fun)
end
def slice(deque) do
{:ok, deque.length,
fn
_start, 0 ->
[]
start, len ->
Enum.reduce((start + len - 1)..start, [], fn index, acc ->
{:ok, item} = EtsDeque.at(deque, index)
[item | acc]
end)
end}
end
end
|
lib/ets_deque.ex
| 0.913392 | 0.678513 |
ets_deque.ex
|
starcoder
|
defmodule Exbackoff do
use Bitwise
@moduledoc """
ExBackoff is an Elixir library to deal with exponential backoffs and timers
to be used within OTP processes when dealing with cyclical events, such as
reconnections, or generally retrying things.
"""
@typep max :: pos_integer | :infinity
defstruct start: nil, max: nil, current: nil, type: :normal, value: nil, dest: nil
@typedoc """
struct contain state of the backoff module including the start, current and
max value. Type of the backoff `:normal` or `:jitter`. Value to send and the
destination when need to fire off message whe :timeout
"""
@type backoff :: %__MODULE__{
start: pos_integer,
max: max,
current: pos_integer,
type: :normal | :jitter,
value: any,
dest: pid
}
@doc """
Just do the increments by hand!
"""
@spec increment(pos_integer) :: pos_integer
def increment(n) when is_integer(n), do: n <<< 1
@doc """
Increments the value (but won't excess the max value).
"""
@spec increment(pos_integer, pos_integer) :: pos_integer
def increment(n, max), do: min(increment(n), max)
@doc """
Just do the random increments by hand!
Choose delay uniformly from [0.5 * Time, 1.5 * Time] as recommended in:
<NAME> and <NAME>, The Synchronization of Periodic Routing Messages,
April 1994 IEEE/ACM Transactions on Networking.
http://ee.lbl.gov/papers/sync_94.pdf
"""
@spec rand_increment(pos_integer) :: pos_integer
def rand_increment(n) do
# New delay chosen from [N, 3N], i.e. [0.5 * 2N, 1.5 * 2N]
width = n <<< 1
n + :rand.uniform(width + 1) - 1
end
@doc """
Do the random increments. It wont excess the max value
"""
@spec rand_increment(pos_integer, pos_integer) :: pos_integer
def rand_increment(n, max) do
# The largest interval for [0.5 * Time, 1.5 * Time] with maximum Max is
# [Max div 3, Max].
max_min_delay = div(max, 3)
cond do
max_min_delay === 0 ->
:rand.uniform(max)
n > max_min_delay ->
rand_increment(max_min_delay)
true ->
rand_increment(n)
end
end
# Increments + Timer support
@doc """
init function when the user doesn't feel like using a timer
provided by this library
"""
@spec init(pos_integer, max) :: backoff
def init(start, max), do: init(start, max, nil, nil)
@doc """
init function when the user feels like using a timer
provided by this library
"""
@spec init(pos_integer, max, pid | nil, any | nil) :: backoff
def init(start, max, dest, value) do
%Exbackoff{start: start, current: start, max: max, value: value, dest: dest}
end
@doc """
Starts a timer from the `backoff()` argument, using erlang:start_timer/3.
No reference tracking is done, and this is left to the user. This function
is purely a convenience function.
"""
@spec fire(backoff) :: reference
def fire(%Exbackoff{current: delay, value: value, dest: dest}) do
:erlang.start_timer(delay, dest, value)
end
@doc """
Reads the current backoff value.
"""
@spec get(backoff) :: pos_integer
def get(%Exbackoff{current: delay}), do: delay
@doc """
Swaps between the states of the backoff.
"""
@spec type(backoff, :normal | :jitter) :: backoff
def type(b = %Exbackoff{}, :jitter), do: %{b | type: :jitter}
def type(b = %Exbackoff{}, :normal), do: %{b | type: :normal}
@doc """
Increments the value and return the new state with the `new_delay`
"""
@spec fail(backoff) :: {pos_integer, backoff}
def fail(b = %Exbackoff{current: delay, max: :infinity, type: :normal}) do
new_delay = increment(delay)
{new_delay, %{b | current: new_delay}}
end
def fail(b = %Exbackoff{current: delay, max: max, type: :normal}) do
new_delay = increment(delay, max)
{new_delay, %{b | current: new_delay}}
end
def fail(b = %Exbackoff{current: delay, max: :infinity, type: :jitter}) do
new_delay = rand_increment(delay)
{new_delay, %{b | current: new_delay}}
end
def fail(b = %Exbackoff{current: delay, max: max, type: :jitter}) do
new_delay = rand_increment(delay, max)
{new_delay, %{b | current: new_delay}}
end
@doc """
resets the values
"""
@spec succeed(backoff) :: {pos_integer, backoff}
def succeed(b = %Exbackoff{start: start}) do
{start, %{b | current: start}}
end
end
|
lib/exbackoff.ex
| 0.931205 | 0.518485 |
exbackoff.ex
|
starcoder
|
defmodule Livebook.JSInterop do
@moduledoc false
alias Livebook.Delta
@doc """
Returns the result of applying `delta` to `string`.
The delta operation lenghts (retain, delete) are treated
such that they match the JavaScript strings behavior.
JavaScript uses UTF-16 encoding, in which every character is stored
as either one or two 16-bit code units. JS treats the number of units
as string length and this also impacts position-based functions like `String.slice`.
To match this behavior we first convert normal UTF-8 string
into a list of UTF-16 code points, then apply the delta to this list
and finally convert back to a UTF-8 string.
"""
@spec apply_delta_to_string(Delta.t(), String.t()) :: String.t()
def apply_delta_to_string(delta, string) do
code_units = string_to_utf16_code_units(string)
delta.ops
|> apply_to_code_units(code_units)
|> utf16_code_units_to_string()
end
defp apply_to_code_units([], code_units), do: code_units
defp apply_to_code_units([{:retain, n} | ops], code_units) do
{left, right} = Enum.split(code_units, n)
left ++ apply_to_code_units(ops, right)
end
defp apply_to_code_units([{:insert, inserted} | ops], code_units) do
string_to_utf16_code_units(inserted) ++ apply_to_code_units(ops, code_units)
end
defp apply_to_code_units([{:delete, n} | ops], code_units) do
apply_to_code_units(ops, Enum.slice(code_units, n..-1))
end
@doc """
Returns a column number in the Elixir string corresponding to
the given column interpreted in terms of UTF-16 code units.
"""
@spec js_column_to_elixir(pos_integer(), String.t()) :: pos_integer()
def js_column_to_elixir(column, line) do
line
|> string_to_utf16_code_units()
|> Enum.take(column - 1)
|> utf16_code_units_to_string()
|> String.length()
|> Kernel.+(1)
end
@doc """
Returns a column represented in terms of UTF-16 code units
corresponding to the given column number in Elixir string.
"""
@spec elixir_column_to_js(pos_integer(), String.t()) :: pos_integer()
def elixir_column_to_js(column, line) do
line
|> string_take(column - 1)
|> string_to_utf16_code_units()
|> length()
|> Kernel.+(1)
end
defp string_take(_string, 0), do: ""
defp string_take(string, n) when n > 0, do: String.slice(string, 0..(n - 1))
# UTF-16 helpers
defp string_to_utf16_code_units(string) do
string
|> :unicode.characters_to_binary(:utf8, :utf16)
|> utf16_binary_to_code_units([])
|> Enum.reverse()
end
defp utf16_binary_to_code_units(<<>>, code_units), do: code_units
defp utf16_binary_to_code_units(<<code_unit::size(16), rest::binary>>, code_units) do
utf16_binary_to_code_units(rest, [code_unit | code_units])
end
defp utf16_code_units_to_string(code_units) do
code_units
|> Enum.reverse()
|> code_units_to_utf16_binary(<<>>)
|> :unicode.characters_to_binary(:utf16, :utf8)
end
defp code_units_to_utf16_binary([], utf16_binary), do: utf16_binary
defp code_units_to_utf16_binary([code_unit | code_units], utf16_binary) do
code_units_to_utf16_binary(code_units, <<code_unit::size(16), utf16_binary::binary>>)
end
end
|
lib/livebook/js_interop.ex
| 0.839915 | 0.746647 |
js_interop.ex
|
starcoder
|
defmodule Ecall.Framing do
@behaviour Circuits.UART.Framing
@moduledoc """
Based on Circuits.UART.Framing.Line, the difference is there is
one separator for sending data and another for receiving
"""
defmodule State do
@moduledoc false
defstruct max_length: nil,
in_separator: nil,
out_separator: nil,
processed: <<>>,
in_process: <<>>
end
def init(args) do
max_length = Keyword.get(args, :max_length, 4096)
state = %State{max_length: max_length,
in_separator: "\r\n",
out_separator: "\r"}
{:ok, state}
end
def add_framing(data, state) do
{:ok, data <> state.out_separator, state}
end
def remove_framing(data, state) do
{new_processed, new_in_process, lines} =
process_data(
state.in_separator,
byte_size(state.in_separator),
state.max_length,
state.processed,
state.in_process <> data,
[]
)
new_state = %{state | processed: new_processed, in_process: new_in_process}
rc = if buffer_empty?(new_state), do: :ok, else: :in_frame
{rc, lines, new_state}
end
def frame_timeout(state) do
partial_line = {:partial, state.processed <> state.in_process}
new_state = %{state | processed: <<>>, in_process: <<>>}
{:ok, [partial_line], new_state}
end
def flush(direction, state) when direction == :receive or direction == :both do
%{state | processed: <<>>, in_process: <<>>}
end
def flush(:transmit, state) do
state
end
def buffer_empty?(%State{processed: <<>>, in_process: <<>>}), do: true
def buffer_empty?(_state), do: false
# Handle not enough data case
defp process_data(_separator, sep_length, _max_length, processed, to_process, lines)
when byte_size(to_process) < sep_length do
{processed, to_process, lines}
end
# Process data until separator or next char
defp process_data(separator, sep_length, max_length, processed, to_process, lines) do
case to_process do
# Handle separater
<<^separator::binary-size(sep_length), rest::binary>> ->
new_lines = lines ++ [processed]
process_data(separator, sep_length, max_length, <<>>, rest, new_lines)
# Handle line too long case
to_process
when byte_size(processed) == max_length and to_process != <<>> ->
new_lines = lines ++ [{:partial, processed}]
process_data(separator, sep_length, max_length, <<>>, to_process, new_lines)
# Handle next char
<<next_char::binary-size(1), rest::binary>> ->
process_data(separator, sep_length, max_length, processed <> next_char, rest, lines)
end
end
end
|
lib/port/ecall_framing.ex
| 0.598077 | 0.453322 |
ecall_framing.ex
|
starcoder
|
defmodule Movement.Migrator do
@moduledoc """
Route migration to the module which will execute it or return
a value without a function call.
Using a simple DSL with an `up` and `down` function, it creates functions in the same
fashion as the `Plug` library.
Module use to execute operation should implement the `Migration` behaviour.
## Exemple
# Given an `up` statement:
up :correct_conflict, Migration.Conflict, :correct
# And a function call on Migrator
Migrator.up(:correct_conflict, operation)
This will call `Accent.Migrator.Migration.Conflict.call(:correct, operation)` where
operation is the same operation object passed to `Migrator.up/2`.
"""
import Movement.Migrator.Macros
alias Movement.Migration.{Conflict, Translation, Rollback}
def up(operations) when is_list(operations), do: Enum.map(operations, &up/1)
def down(operations) when is_list(operations), do: Enum.map(operations, &down/1)
# Noop
up(:noop, {:ok, :noop})
down(:noop, {:ok, :noop})
# Autocorrect
up(:autocorrect, {:ok, :autocorrect})
down(:autocorrect, {:ok, :autocorrect})
# Conflicts
up(:correct_conflict, Conflict, :correct)
up(:uncorrect_conflict, Conflict, :uncorrect)
up(:conflict_on_proposed, Conflict, :on_proposed)
up(:merge_on_proposed, Conflict, :on_proposed)
up(:merge_on_proposed_force, Conflict, :on_proposed)
up(:conflict_on_slave, Conflict, :on_slave)
up(:conflict_on_corrected, Conflict, :on_corrected)
up(:merge_on_corrected, Conflict, :on_corrected)
up(:merge_on_corrected_force, Conflict, :on_proposed)
# Translations
up(:remove, Translation, :remove)
up(:update, Translation, :update)
up(:update_proposed, Translation, :update_proposed)
up(:version_new, Translation, :version_new)
up(:new, Translation, :new)
up(:renew, Translation, :renew)
# Rollback
up(:rollback, Rollback, :restore)
down(:new, Rollback, :new)
down(:renew, Rollback, :new)
down(:remove, Rollback, :remove)
down(:update, Rollback, :restore)
down(:update_proposed, Rollback, :restore)
down(:conflict_on_slave, Rollback, :restore)
down(:conflict_on_proposed, Rollback, :restore)
down(:conflict_on_corrected, Rollback, :restore)
down(:merge_on_proposed_force, Rollback, :restore)
down(:merge_on_proposed, Rollback, :restore)
down(:merge_on_corrected, Rollback, :restore)
down(:correct_conflict, Rollback, :restore)
down(:uncorrect_conflict, Rollback, :restore)
down(:rollback, Rollback, :rollback)
end
|
lib/movement/migrator.ex
| 0.831964 | 0.504028 |
migrator.ex
|
starcoder
|
defmodule Commanded.Aggregate.Multi do
@moduledoc """
Use `Commanded.Aggregate.Multi` to generate multiple events from a single
command.
This can be useful when you want to emit multiple events that depend upon the
aggregate state being updated.
## Example
In the example below, money is withdrawn from the bank account and the
updated balance is used to check whether the account is overdrawn.
defmodule BankAccount do
defstruct [
account_number: nil,
balance: 0,
state: nil,
]
alias Commanded.Aggregate.Multi
def withdraw(
%BankAccount{state: :active} = account,
%WithdrawMoney{amount: amount})
when is_number(amount) and amount > 0
do
account
|> Multi.new()
|> Multi.execute(&withdraw_money(&1, amount))
|> Multi.execute(&check_balance/1)
end
defp withdraw_money(%BankAccount{account_number: account_number, balance: balance}, amount) do
%MoneyWithdrawn{
account_number: account_number,
amount: amount,
balance: balance - amount
}
end
defp check_balance(%BankAccount{account_number: account_number, balance: balance})
when balance < 0
do
%AccountOverdrawn{account_number: account_number, balance: balance}
end
defp check_balance(%BankAccount{}), do: []
end
"""
alias Commanded.Aggregate.Multi
@type t :: %__MODULE__{
aggregate: struct(),
executions: list(function())
}
defstruct [:aggregate, executions: []]
@doc """
Create a new `Commanded.Aggregate.Multi` struct.
"""
@spec new(aggregate :: struct()) :: Multi.t()
def new(aggregate), do: %Multi{aggregate: aggregate}
@doc """
Adds a command execute function to the multi.
"""
@spec execute(Multi.t(), function()) :: Multi.t()
def execute(%Multi{} = multi, execute_fun) when is_function(execute_fun, 1) do
%Multi{executions: executions} = multi
%Multi{multi | executions: [execute_fun | executions]}
end
@doc """
Reduce an enumerable by executing the function for each item.
The aggregate `apply/2` function will be called after each event returned by
the execute function. This allows you to calculate values from the aggregate
state based upon events produced by previous items in the enumerable, such as
running totals.
## Example
alias Commanded.Aggregate.Multi
aggregate
|> Multi.new()
|> Multi.reduce([1, 2, 3], fn aggregate, item ->
%AnEvent{item: item, total: aggregate.total + item}
end)
"""
@spec reduce(Multi.t(), Enum.t(), function()) :: Multi.t()
def reduce(%Multi{} = multi, enumerable, execute_fun) when is_function(execute_fun, 2) do
Enum.reduce(enumerable, multi, fn item, %Multi{} = multi ->
execute(multi, &execute_fun.(&1, item))
end)
end
@doc """
Run the execute functions contained within the multi, returning the updated
aggregate state and all created events.
"""
@spec run(Multi.t()) ::
{aggregate :: struct(), list(event :: struct())} | {:error, reason :: any()}
def run(%Multi{aggregate: aggregate, executions: executions}) do
try do
executions
|> Enum.reverse()
|> Enum.reduce({aggregate, []}, fn execute_fun, {aggregate, events} ->
case execute_fun.(aggregate) do
{:error, _reason} = error ->
throw(error)
%Multi{} = multi ->
Multi.run(multi)
none when none in [:ok, nil, []] ->
{aggregate, events}
{:ok, pending_events} ->
pending_events = List.wrap(pending_events)
{apply_events(aggregate, pending_events), events ++ pending_events}
pending_events ->
pending_events = List.wrap(pending_events)
{apply_events(aggregate, pending_events), events ++ pending_events}
end
end)
catch
{:error, _error} = error -> error
end
end
defp apply_events(aggregate, events) do
Enum.reduce(events, aggregate, &aggregate.__struct__.apply(&2, &1))
end
end
|
lib/commanded/aggregates/multi.ex
| 0.88284 | 0.632574 |
multi.ex
|
starcoder
|
defmodule Parselix.Basic do
use Parselix
@moduledoc """
Provide basic parsers.
"""
defmacro __using__(_opts) do
quote do
import unquote(__MODULE__)
end
end
@doc "Replaces error messages."
def error_message(parser, message) do
fn target, position ->
case parser.(target, position) do
{:error, _, _} -> {:error, message, position}
x -> x
end
end
end
@doc "Attaches a meta data to the result of the given parser."
def meta(parser), do: meta(parser, nil)
parser :meta, [parser, label] do
fn target, position ->
mapper = fn result ->
%Meta{label: label, value: result, position: position}
end
map(parser, mapper).(target, position)
end
end
@doc "Parses a string which matches against the given regex."
parser :regex, [regex] do
fn target, position ->
case (Regex.run regex, target, return: :index) |> Enum.find(fn {x, _} -> x == 0 end) do
{0, len} -> {:ok, String.slice(target, 0, len), len}
_ -> {:error, "The regex does not match."}
end
|> format_result("regex", target, position)
end
end
@doc "Parses a specified string."
parser :string, [option] do
fn target, position ->
if String.starts_with?(target, option) do
{:ok, option, String.length option}
else
{:error, "There is not the string."}
end
|> format_result("string", target, position)
end
end
@doc "Parses a specified character."
parser :char, [option] do
fn target, position ->
case any.(target, position) do
{:ok, char, remainder, position} ->
if String.contains? option, char do
{:ok, char, remainder, position}
else
{:error, "There is not an expected character."}
end
x -> x
end
|> format_result("char", target, position)
end
end
@doc "Parses a not specified character."
parser :not_char, [option] do
fn target, position ->
case char(option).(target, position) do
{:ok, _, _, _} -> {:error, "\"#{String.first target}\" appeared.", position}
_ -> any().(target, position)
end
|> format_result("not_char", target, position)
end
end
@doc "Parses any character."
parser :any do
fn target, position ->
case target do
"" -> {:error, "EOF appeared.", position}
x -> {:ok, String.first(x), 1}
end
|> format_result("any", target, position)
end
end
@doc "Returns a result of the given parser which succeeds first."
def choice([]) do
fn _, position ->
{:error, "No parser succeeded", position}
end
end
def choice([parser | tail]) do
fn target, position ->
case parser.(target, position) do
{:ok, _, _, _} = result -> result
{:error, _, pos1} = error1 ->
case choice(tail).(target, position) do
{:ok, _, _, _} = result -> result
{:error, _, pos2} = error2 -> if pos1.index < pos2.index do
error2
else
error1
end
end
end
|> format_result("choice", target, position)
end
end
@doc "Parses 0 times or once."
parser :option, [option] do
fn target, position ->
case option.(target, position) do
{:ok, _, _, _} = x -> x
_ -> {:ok, :empty, target, position}
end
end
end
@doc "Returns a default value when parser failed."
parser :default, [parser, default] do
parser |> option |> map(fn x -> if x == :empty, do: default, else: x end)
end
@doc "Replaces the result of the given parser."
parser :replace, [parser, replacement] do
parser |> map(fn _ -> replacement end)
end
@doc "Parses in sequence."
def sequence([]), do: fn target, position -> {:ok, [], target, position} end
def sequence([parser | tail]) do
fn target, position ->
case parser.(target, position) do
{:ok, result, remainder, position} ->
sequence(tail)
|> map(fn tail_result ->
[result | tail_result]
end)
|> parse(remainder, position)
x -> x
end
end
end
@doc "Parses 0 or more times."
def many(parser, min..max), do: many(parser, min, max)
def many(parser, min \\ 0, max \\ -1) do
fn target, position ->
if max == 0 do
{:ok, [], target, position}
else
case parser.(target, position) do
{:ok, result, remainder, position} ->
many(parser, min - 1, max - 1)
|> map(fn tail_result ->
[result | tail_result]
end)
|> parse(remainder, position)
{:error, _, _} ->
if min <= 0 do
{:ok, [], target, position}
else
{:error, "The count is out of the range.", position}
end
end
|> case do
{:error, message, _} -> {:error, message, position}
x -> x
end
end
end
end
@doc "Parses X times."
def times(_parser, time) when time <= 0, do: fn target, position -> {:ok, [], target, position} end
def times(parser, time) do
fn target, position ->
case parser.(target, position) do
{:ok, result, remainder, position} ->
times(parser, time - 1)
|> map(fn tail_result ->
[result | tail_result]
end)
|> parse(remainder, position)
x -> x
end
end
end
@doc "Maps the result of the given parser."
def map(parser, func) do
fn target, position ->
case parser.(target, position) do
{:ok, result, remainder, position} -> {:ok, func.(result), remainder, position}
x -> x
end
end
end
@doc "Removes :empty from the result of the given parser."
parser :clean, [parser] do
parser |> map(fn x -> Enum.filter x, fn x -> x != :empty end end)
end
@doc "Flattens the result of the given parser."
parser :flat, [parser] do
func = fn x, func ->
case x do
list when is_list(list) -> flatten(list, &func.(&1, func))
x -> x
end
end
parser |> map(&flatten(&1, fn x -> func.(x, func) end))
end
@doc "Flattens the result of the given parser once."
parser :flat_once, [parser] do
parser |> map(&flatten/1)
end
defp flatten(_list, _mapper \\ fn x -> x end)
defp flatten([head | tail], mapper) do
case head do
head when is_list(head) -> mapper.(head) ++ flatten(tail, mapper)
head -> [mapper.(head) | flatten(tail, mapper)]
end
end
defp flatten([], _mapper), do: []
defp flatten(x, _mapper), do: [x]
@doc "Concatenates the result of the given parser to a string."
parser :concat, [parser] do
parser |> flat |> map(fn x -> (Enum.filter x, fn x -> x !== :empty end) |> Enum.join end)
end
@doc "Puts the result of the given parser into an empty array."
parser :wrap, [parser] do
parser |> map(&([&1]))
end
@doc "Puts the value out of the result of the given parser."
parser :unwrap, [parser] do
parser |> map(fn [x] -> x end)
end
@doc "Recursively puts the value out of the result of the given parser."
parser :unwrap_r, [parser] do
unwrap = fn
[x], unwrap -> unwrap.(x, unwrap)
x, _ -> x
end
parser |> map(&unwrap.(&1, unwrap))
end
@doc "Picks one value from the result of the given parser."
parser :pick, [parser, index] do
parser |> map(&Enum.at(&1, index))
end
@doc "Slices the result of the given parser."
parser :slice, [parser, range] do
parser |> map(&Enum.slice(&1, range))
end
parser :slice, [parser, start, count] do
parser |> map(&Enum.slice(&1, start, count))
end
@doc "Parses 1 or more times."
parser :many_1, [option] do
fn target, position ->
many(option, 1)
|> parse(target, position)
end
end
@doc "Dumps the result of the given parser."
parser :dump, [parser] do
parser |> map(fn _ -> :empty end)
end
@doc "Ignores the result of the given parser."
def ignore(parser) do
fn target, position ->
parser
|> parse(target, position)
|> case do
{:ok, _, _, _} -> {:ok, :empty, target, position}
x -> x
end
end
end
@doc "Validates the result of the given parser."
parser :check, [parser, func] do
fn target, position ->
case parser.(target, position) do
{:ok, result, remainder, position} ->
if func.(result) === true, do: {:ok, result, remainder, position}, else: {:error, "#{inspect result} is a bad result."}
x -> x
end
|> format_result("check", target, position)
end
end
@doc "Parses the end of text."
parser :eof do
fn
"", position -> {:ok, :eof, "", position}
_, position -> {:error, "There is not EOF.", position}
end
end
end
|
lib/parselix/basic.ex
| 0.835215 | 0.512205 |
basic.ex
|
starcoder
|
defmodule K8s.Client.Runner.Watch do
@moduledoc """
`K8s.Client` runner that will watch a resource or resources and stream results back to a process.
"""
@resource_version_json_path ~w(metadata resourceVersion)
alias K8s.Client.Runner.Base
alias K8s.Operation
@doc """
Watch a resource or list of resources. Provide the `stream_to` option or results will be stream to `self()`.
Note: Current resource version will be looked up automatically.
## Examples
```elixir
operation = K8s.Client.list("v1", "Namespace")
{:ok, reference} = Watch.run(operation, :test, stream_to: self())
```
```elixir
operation = K8s.Client.get("v1", "Namespace", [name: "test"])
{:ok, reference} = Watch.run(operation, :test, stream_to: self())
```
"""
@spec run(Operation.t(), binary, keyword(atom)) :: no_return
def run(operation = %Operation{method: :get}, cluster_name, opts) do
case get_resource_version(operation, cluster_name) do
{:ok, rv} -> run(operation, cluster_name, rv, opts)
error -> error
end
end
def run(op, _, _),
do: {:error, "Only HTTP GET operations (list, get) are supported. #{inspect(op)}"}
@doc """
Watch a resource or list of resources from a specific resource version. Provide the `stream_to` option or results will be stream to `self()`.
## Examples
```elixir
operation = K8s.Client.list("v1", "Namespace")
resource_version = 3003
{:ok, reference} = Watch.run(operation, :test, resource_version, stream_to: self())
```
```elixir
operation = K8s.Client.get("v1", "Namespace", [name: "test"])
resource_version = 3003
{:ok, reference} = Watch.run(operation, :test, resource_version, stream_to: self())
```
"""
def run(operation = %Operation{method: :get, verb: verb}, cluster_name, rv, opts)
when verb in [:list, :list_all_namespaces] do
opts_w_watch_params = add_watch_params_to_opts(opts, rv)
Base.run(operation, cluster_name, opts_w_watch_params)
end
def run(operation = %Operation{method: :get, verb: :get}, cluster_name, rv, opts) do
{list_op, field_selector_param} = get_to_list(operation)
params = Map.merge(opts[:params] || %{}, field_selector_param)
opts = Keyword.put(opts, :params, params)
run(list_op, cluster_name, rv, opts)
end
def run(op, _, _, _),
do: {:error, "Only HTTP GET operations (list, get) are supported. #{inspect(op)}"}
@spec get_resource_version(Operation.t(), binary) :: {:ok, binary} | {:error, binary}
defp get_resource_version(operation = %Operation{}, cluster_name) do
case Base.run(operation, cluster_name) do
{:ok, payload} ->
rv = parse_resource_version(payload)
{:ok, rv}
{:error, error} ->
{:error, error}
end
end
defp add_watch_params_to_opts(opts, rv) do
params = Map.merge(opts[:params] || %{}, %{"resourceVersion" => rv, "watch" => true})
Keyword.put(opts, :params, params)
end
@spec parse_resource_version(any) :: binary
defp parse_resource_version(payload = %{}),
do: get_in(payload, @resource_version_json_path) || "0"
defp parse_resource_version(_), do: "0"
defp get_to_list(get_op) do
list_op = %{get_op | verb: :list, path_params: []}
name = get_op.path_params[:name]
params = %{"fieldSelector" => "metadata.name%3D#{name}"}
{list_op, params}
end
end
|
lib/k8s/client/runner/watch.ex
| 0.888967 | 0.657404 |
watch.ex
|
starcoder
|
defmodule Kino.Ecto do
@moduledoc """
A widget for interactively viewing `Ecto` query results.
The data must be an enumerable of records, where each
record is either map, struct, keyword list or tuple.
## Examples
The widget primarly allows for viewing a database table
given a schema:
Kino.Ecto.new(Weather, Repo)
However, the first argument can be any queryable, so
you can pipe arbitrary queries directly to the widget:
from(w in Weather, where: w.city == "New York")
|> Kino.Ecto.new(Repo)
"""
@doc false
use GenServer, restart: :temporary
alias Kino.Utils.Table
defstruct [:pid]
@type t :: %__MODULE__{pid: pid()}
@typedoc false
@type state :: %{
parent_monitor_ref: reference(),
repo: Ecto.Repo.t(),
queryable: Ecto.Queryable.t()
}
@doc """
Starts a widget process with the given queryable as
the data source.
"""
@spec new(Ecto.Queryable.t(), Ecto.Repo.t()) :: t()
def new(queryable, repo) when is_atom(repo) do
unless queryable?(queryable) do
raise ArgumentError,
"expected a term implementing the Ecto.Queryable protocol, got: #{inspect(queryable)}"
end
parent = self()
opts = [repo: repo, queryable: queryable, parent: parent]
{:ok, pid} = DynamicSupervisor.start_child(Kino.WidgetSupervisor, {__MODULE__, opts})
%__MODULE__{pid: pid}
end
defp queryable?(term) do
Ecto.Queryable.impl_for(term) != nil
end
@doc false
def start_link(opts) do
GenServer.start_link(__MODULE__, opts)
end
@impl true
def init(opts) do
repo = Keyword.fetch!(opts, :repo)
queryable = Keyword.fetch!(opts, :queryable)
parent = Keyword.fetch!(opts, :parent)
parent_monitor_ref = Process.monitor(parent)
{:ok, %{parent_monitor_ref: parent_monitor_ref, repo: repo, queryable: queryable}}
end
@impl true
def handle_info({:connect, pid}, state) do
name = state.queryable |> query_source() |> to_string()
columns = state.queryable |> keys_from_queryable() |> Table.keys_to_columns()
features =
Kino.Utils.truthy_keys(
refetch: true,
pagination: true,
# If the user specifies custom select, the record keys
# are not valid "order by" fields, so we disable sorting
sorting: default_select_query?(state.queryable)
)
send(
pid,
{:connect_reply, %{name: name, columns: columns, features: features}}
)
{:noreply, state}
end
def handle_info({:get_rows, pid, rows_spec}, state) do
{total_rows, records} = get_records(state.repo, state.queryable, rows_spec)
{columns, keys} =
case keys_from_queryable(state.queryable) do
[] ->
columns = Table.columns_for_records(records)
keys = Enum.map(columns, & &1.key)
{columns, keys}
keys ->
{:initial, keys}
end
rows = Enum.map(records, &Table.record_to_row(&1, keys))
send(pid, {:rows, %{rows: rows, total_rows: total_rows, columns: columns}})
{:noreply, state}
end
def handle_info({:DOWN, ref, :process, _object, _reason}, %{parent_monitor_ref: ref} = state) do
{:stop, :shutdown, state}
end
defp get_records(repo, queryable, rows_spec) do
count = repo.aggregate(queryable, :count)
query = prepare_query(queryable, rows_spec)
records = repo.all(query)
{count, records}
end
defp query_source(queryable) do
%{from: %{source: {source, _schema}}} = Ecto.Queryable.to_query(queryable)
source
end
defp default_select_query?(queryable) do
query = Ecto.Queryable.to_query(queryable)
query.select == nil
end
defp keys_from_queryable(queryable) do
schema = Table.ecto_schema(queryable)
if schema != nil and default_select_query?(queryable) do
schema.__schema__(:fields)
else
[]
end
end
if Code.ensure_loaded?(Ecto.Query) do
defp prepare_query(queryable, rows_spec) do
import Ecto.Query, only: [from: 2]
query = from(q in queryable, limit: ^rows_spec.limit, offset: ^rows_spec.offset)
if rows_spec[:order_by] do
query = Ecto.Query.exclude(query, :order_by)
order_by = [{rows_spec.order, rows_spec.order_by}]
from(q in query, order_by: ^order_by)
else
query
end
end
else
defp prepare_query(_queryable, _rows_spec), do: raise("Ecto is missing")
end
end
|
lib/kino/ecto.ex
| 0.903334 | 0.620808 |
ecto.ex
|
starcoder
|
defmodule Data.Exit do
@moduledoc """
Exit Schema
"""
use Data.Schema
alias Data.Room
alias Data.Zone
@directions [
"north",
"east",
"south",
"west",
"up",
"down",
"in",
"out",
"north west",
"north east",
"south west",
"south east"
]
schema "exits" do
field(:direction, :string)
field(:has_door, :boolean, default: false)
field(:door_id, Ecto.UUID)
field(:start_id, :string, virtual: true)
field(:finish_id, :string, virtual: true)
field(:start_overworld_id, :string)
field(:finish_overworld_id, :string)
belongs_to(:start_room, Room)
belongs_to(:start_zone, Zone)
belongs_to(:finish_room, Room)
belongs_to(:finish_zone, Zone)
timestamps()
end
@doc """
Get a list of directions
"""
@spec directions() :: [String.t()]
def directions(), do: @directions
def changeset(struct, params) do
struct
|> cast(params, [
:direction,
:has_door,
:door_id,
:start_room_id,
:finish_room_id,
:start_overworld_id,
:finish_overworld_id
])
|> cast(params, [:start_zone_id, :finish_zone_id])
|> validate_required([:direction, :has_door])
|> validate_inclusion(:direction, @directions)
|> validate_one_of([:start_room_id, :start_overworld_id])
|> validate_one_of([:finish_room_id, :finish_overworld_id])
|> foreign_key_constraint(:start_room_id)
|> foreign_key_constraint(:finish_room_id)
|> unique_constraint(:start_room_id, name: :exits_direction_start_room_id_index)
|> unique_constraint(:start_overworld_id, name: :exits_direction_start_overworld_id_index)
|> unique_constraint(:finish_room_id, name: :exits_direction_finish_room_id_index)
|> unique_constraint(:finish_overworld_id, name: :exits_direction_finish_overworld_id_index)
end
defp validate_one_of(changeset, keys) do
keys =
Enum.map(keys, fn key ->
{key, get_field(changeset, key)}
end)
keys_with_values = Enum.filter(keys, fn {_key, value} -> !is_nil(value) end)
case length(keys_with_values) == 1 do
true ->
changeset
false ->
Enum.reduce(keys, changeset, fn {key, _value}, changeset ->
add_error(changeset, key, "cannot be combined with other values")
end)
end
end
@doc """
Load all exits for a room
Adds them to the room as `exits`
"""
@spec load_exits(Room.t()) :: Room.t()
def load_exits(room, opts \\ []) do
query = where(__MODULE__, [e], e.start_room_id == ^room.id)
query =
case Keyword.get(opts, :preload) do
true ->
query |> preload([:start_room, :finish_room, :start_zone, :finish_zone])
_ ->
query
end
exits =
query
|> Repo.all()
|> Enum.map(&setup_exit/1)
%{room | exits: exits}
end
@doc """
Load all exits for a zone
Adds them to the zone as `exits`
"""
@spec load_zone_exits(Zone.t()) :: Zone.t()
def load_zone_exits(zone) do
exits =
__MODULE__
|> where([e], e.start_zone_id == ^zone.id)
|> Repo.all()
|> Enum.map(&setup_exit/1)
%{zone | exits: exits}
end
@doc """
Sets up exits for the overworld
iex> room_exit = Data.Exit.setup_exit(%{start_room_id: 1, finish_room_id: 1})
iex> %{start_id: 1, finish_id: 1} == Map.take(room_exit, [:start_id, :finish_id])
true
iex> room_exit = Data.Exit.setup_exit(%{start_overworld_id: "overworld", finish_room_id: 1})
iex> %{start_id: "overworld", finish_id: 1} == Map.take(room_exit, [:start_id, :finish_id])
true
iex> room_exit = Data.Exit.setup_exit(%{start_room_id: 1, finish_overworld_id: "overworld"})
iex> %{start_id: 1, finish_id: "overworld"} == Map.take(room_exit, [:start_id, :finish_id])
true
iex> room_exit = Data.Exit.setup_exit(%{start_overworld_id: "overworld", finish_overworld_id: "overworld"})
iex> %{start_id: "overworld", finish_id: "overworld"} == Map.take(room_exit, [:start_id, :finish_id])
true
"""
def setup_exit(room_exit) do
room_exit
|> fallthrough(:start_id, :start_room_id, :start_overworld_id)
|> fallthrough(:finish_id, :finish_room_id, :finish_overworld_id)
end
defp fallthrough(struct, field, base_field, fallthrough_field) do
case Map.get(struct, base_field) do
nil ->
Map.put(struct, field, Map.get(struct, fallthrough_field))
value ->
Map.put(struct, field, value)
end
end
@doc """
Check if a string is a valid exit
iex> Data.Exit.exit?("north")
true
iex> Data.Exit.exit?("outside")
false
"""
@spec exit?(String.t()) :: boolean()
def exit?(direction), do: direction in @directions
@doc """
From a direction find the opposite direction's id
iex> Data.Exit.opposite("north")
"south"
iex> Data.Exit.opposite("east")
"west"
iex> Data.Exit.opposite("south")
"north"
iex> Data.Exit.opposite("west")
"east"
iex> Data.Exit.opposite("up")
"down"
iex> Data.Exit.opposite("down")
"up"
iex> Data.Exit.opposite("in")
"out"
iex> Data.Exit.opposite("out")
"in"
iex> Data.Exit.opposite("north west")
"south east"
iex> Data.Exit.opposite("north east")
"south west"
iex> Data.Exit.opposite("south west")
"north east"
iex> Data.Exit.opposite("south east")
"north west"
"""
@spec opposite(String.t() | atom) :: atom
def opposite("north"), do: "south"
def opposite("east"), do: "west"
def opposite("south"), do: "north"
def opposite("west"), do: "east"
def opposite("up"), do: "down"
def opposite("down"), do: "up"
def opposite("in"), do: "out"
def opposite("out"), do: "in"
def opposite("north west"), do: "south east"
def opposite("north east"), do: "south west"
def opposite("south west"), do: "north east"
def opposite("south east"), do: "north west"
@doc """
Get an exit in a direction
"""
@spec exit_to(Room.t(), String.t() | atom) :: Exit.t() | nil
def exit_to(room, direction) do
Enum.find(room.exits, &(&1.direction == direction))
end
end
|
lib/data/exit.ex
| 0.756627 | 0.524456 |
exit.ex
|
starcoder
|
defmodule Penelope.ML.Registry do
@moduledoc """
The ML pipeline registry decouples the names of pipeline components from
their module names, so that modules can be refactored without breaking
stored models. The built-in Penelope components are registered automatically,
but custom components can be added via the `register` function.
Inverse lookups are also supported for exporting compiled models. The
registry falls back on module atoms for unregistered components.
"""
@agent __MODULE__
@defaults %{
feature_stack: Penelope.ML.Feature.StackVectorizer,
feature_merge: Penelope.ML.Feature.MergeFeaturizer,
context_featurizer: Penelope.ML.Feature.ContextFeaturizer,
lowercase_preprocessor: Penelope.ML.Text.LowercasePreprocessor,
token_filter: Penelope.ML.Text.TokenFilter,
ptb_tokenizer: Penelope.ML.Text.PTBTokenizer,
ptb_digit_tokenizer: Penelope.ML.Text.PTBDigitTokenizer,
count_vectorizer: Penelope.ML.Text.CountVectorizer,
regex_vectorizer: Penelope.ML.Text.RegexVectorizer,
pos_featurizer: Penelope.ML.Text.POSFeaturizer,
token_featurizer: Penelope.ML.Text.TokenFeaturizer,
word2vec_mean_vectorizer: Penelope.ML.Word2vec.MeanVectorizer,
linear_classifier: Penelope.ML.Linear.Classifier,
svm_classifier: Penelope.ML.SVM.Classifier,
crf_tagger: Penelope.ML.CRF.Tagger
}
@doc """
starts the registry process
"""
@spec start_link() :: {:ok, pid} | {:error, any}
def start_link do
Agent.start_link(fn -> @defaults end, name: @agent)
end
@doc """
adds a new alias for a pipeline component to the registry
"""
@spec register(name :: String.t() | atom, module :: atom) :: :ok
def register(name, module) when is_binary(name) do
register(String.to_atom(name), module)
end
def register(name, module) do
Agent.update(@agent, fn s -> Map.put(s, name, module) end)
end
@doc """
locates a pipeline component module from its name, falling back on the
module itself
"""
@spec lookup(name :: String.t() | atom) :: atom
def lookup(name) when is_binary(name) do
lookup(String.to_atom(name))
end
def lookup(name) do
module =
@agent
|> Agent.get(& &1)
|> Map.get(name, name)
case Code.ensure_loaded(module) do
{:module, module} -> module
_ -> raise ArgumentError, message: "invalid name #{name}"
end
end
@doc """
performs a module->name reverse lookup
"""
@spec invert(module :: atom) :: String.t()
def invert(module) do
@agent
|> Agent.get(& &1)
|> Map.new(fn {k, v} -> {v, k} end)
|> Map.get(module, module)
|> to_string()
end
end
|
lib/penelope/ml/registry.ex
| 0.882466 | 0.560102 |
registry.ex
|
starcoder
|
defmodule Bunt.ANSI.Sequence do
@moduledoc false
defmacro defalias(alias_name, original_name) do
quote bind_quoted: [alias_name: alias_name, original_name: original_name] do
def unquote(alias_name)() do
unquote(original_name)()
end
defp format_sequence(unquote(alias_name)) do
unquote(original_name)()
end
end
end
defmacro defsequence(name, code, prefix \\ "", terminator \\ "m") do
quote bind_quoted: [name: name, code: code, prefix: prefix, terminator: terminator] do
def unquote(name)() do
"\e[#{unquote(prefix)}#{unquote(code)}#{unquote(terminator)}"
end
defp format_sequence(unquote(name)) do
unquote(name)()
end
end
end
end
defmodule Bunt.ANSI do
@moduledoc """
Functionality to render ANSI escape sequences.
[ANSI escape sequences](https://en.wikipedia.org/wiki/ANSI_escape_code)
are characters embedded in text used to control formatting, color, and
other output options on video text terminals.
"""
import Bunt.ANSI.Sequence
@color_tuples [
{nil, :color16, 16, {0, 0, 0}},
{nil, :color17, 17, {0, 0, 95}},
{"darkblue", :color18, 18, {0, 0, 135}},
{nil, :color19, 19, {0, 0, 175}},
{"mediumblue", :color20, 20, {0, 0, 215}},
{nil, :color21, 21, {0, 0, 255}},
{"darkgreen", :color22, 22, {0, 95, 0}},
{"darkslategray", :color23, 23, {0, 95, 95}},
{nil, :color24, 24, {0, 95, 135}},
{nil, :color25, 25, {0, 95, 175}},
{nil, :color26, 26, {0, 95, 215}},
{nil, :color27, 27, {0, 95, 255}},
{nil, :color28, 28, {0, 135, 0}},
{nil, :color29, 29, {0, 135, 95}},
{"darkcyan", :color30, 30, {0, 135, 135}},
{nil, :color31, 31, {0, 135, 175}},
{nil, :color32, 32, {0, 135, 215}},
{nil, :color33, 33, {0, 135, 255}},
{nil, :color34, 34, {0, 175, 0}},
{nil, :color35, 35, {0, 175, 95}},
{nil, :color36, 36, {0, 175, 135}},
{nil, :color37, 37, {0, 175, 175}},
{nil, :color38, 38, {0, 175, 215}},
{"deepskyblue", :color39, 39, {0, 175, 255}},
{nil, :color40, 40, {0, 215, 0}},
{nil, :color41, 41, {0, 215, 95}},
{nil, :color42, 42, {0, 215, 135}},
{nil, :color43, 43, {0, 215, 175}},
{nil, :color44, 44, {0, 215, 215}},
{nil, :color45, 45, {0, 215, 255}},
{nil, :color46, 46, {0, 255, 0}},
{nil, :color47, 47, {0, 255, 95}},
{"springgreen", :color48, 48, {0, 255, 135}},
{nil, :color49, 49, {0, 255, 175}},
{nil, :color50, 50, {0, 255, 215}},
{"aqua", :color51, 51, {0, 255, 255}},
{nil, :color52, 52, {95, 0, 0}},
{nil, :color53, 53, {95, 0, 95}},
{nil, :color54, 54, {95, 0, 135}},
{nil, :color55, 55, {95, 0, 175}},
{nil, :color56, 56, {95, 0, 215}},
{nil, :color57, 57, {95, 0, 255}},
{nil, :color58, 58, {95, 95, 0}},
{"dimgray", :color59, 59, {95, 95, 95}},
{nil, :color60, 60, {95, 95, 135}},
{nil, :color61, 61, {95, 95, 175}},
{nil, :color62, 62, {95, 95, 215}},
{nil, :color63, 63, {95, 95, 255}},
{nil, :color64, 64, {95, 135, 0}},
{nil, :color65, 65, {95, 135, 95}},
{nil, :color66, 66, {95, 135, 135}},
{"steelblue", :color67, 67, {95, 135, 175}},
{nil, :color68, 68, {95, 135, 215}},
{nil, :color69, 69, {95, 135, 255}},
{nil, :color70, 70, {95, 175, 0}},
{nil, :color71, 71, {95, 175, 95}},
{nil, :color72, 72, {95, 175, 135}},
{nil, :color73, 73, {95, 175, 175}},
{nil, :color74, 74, {95, 175, 215}},
{nil, :color75, 75, {95, 175, 255}},
{nil, :color76, 76, {95, 215, 0}},
{nil, :color77, 77, {95, 215, 95}},
{nil, :color78, 78, {95, 215, 135}},
{nil, :color79, 79, {95, 215, 175}},
{nil, :color80, 80, {95, 215, 215}},
{nil, :color81, 81, {95, 215, 255}},
{nil, :color82, 82, {95, 255, 0}},
{nil, :color83, 83, {95, 255, 95}},
{nil, :color84, 84, {95, 255, 135}},
{nil, :color85, 85, {95, 255, 175}},
{nil, :color86, 86, {95, 255, 215}},
{nil, :color87, 87, {95, 255, 255}},
{"darkred", :color88, 88, {135, 0, 0}},
{nil, :color89, 89, {135, 0, 95}},
{"darkmagenta", :color90, 90, {135, 0, 135}},
{nil, :color91, 91, {135, 0, 175}},
{nil, :color92, 92, {135, 0, 215}},
{nil, :color93, 93, {135, 0, 255}},
{nil, :color94, 94, {135, 95, 0}},
{nil, :color95, 95, {135, 95, 95}},
{nil, :color96, 96, {135, 95, 135}},
{nil, :color97, 97, {135, 95, 175}},
{nil, :color98, 98, {135, 95, 215}},
{nil, :color99, 99, {135, 95, 255}},
{"olive", :color100, 100, {135, 135, 0}},
{nil, :color101, 101, {135, 135, 95}},
{nil, :color102, 102, {135, 135, 135}},
{nil, :color103, 103, {135, 135, 175}},
{nil, :color104, 104, {135, 135, 215}},
{nil, :color105, 105, {135, 135, 255}},
{nil, :color106, 106, {135, 175, 0}},
{nil, :color107, 107, {135, 175, 95}},
{nil, :color108, 108, {135, 175, 135}},
{nil, :color109, 109, {135, 175, 175}},
{nil, :color110, 110, {135, 175, 215}},
{nil, :color111, 111, {135, 175, 255}},
{nil, :color112, 112, {135, 215, 0}},
{nil, :color113, 113, {135, 215, 95}},
{nil, :color114, 114, {135, 215, 135}},
{nil, :color115, 115, {135, 215, 175}},
{nil, :color116, 116, {135, 215, 215}},
{nil, :color117, 117, {135, 215, 255}},
{"chartreuse", :color118, 118, {135, 255, 0}},
{nil, :color119, 119, {135, 255, 95}},
{nil, :color120, 120, {135, 255, 135}},
{nil, :color121, 121, {135, 255, 175}},
{"aquamarine", :color122, 122, {135, 255, 215}},
{nil, :color123, 123, {135, 255, 255}},
{nil, :color124, 124, {175, 0, 0}},
{nil, :color125, 125, {175, 0, 95}},
{nil, :color126, 126, {175, 0, 135}},
{nil, :color127, 127, {175, 0, 175}},
{nil, :color128, 128, {175, 0, 215}},
{nil, :color129, 129, {175, 0, 255}},
{nil, :color130, 130, {175, 95, 0}},
{nil, :color131, 131, {175, 95, 95}},
{nil, :color132, 132, {175, 95, 135}},
{nil, :color133, 133, {175, 95, 175}},
{nil, :color134, 134, {175, 95, 215}},
{nil, :color135, 135, {175, 95, 255}},
{nil, :color136, 136, {175, 135, 0}},
{nil, :color137, 137, {175, 135, 95}},
{nil, :color138, 138, {175, 135, 135}},
{nil, :color139, 139, {175, 135, 175}},
{nil, :color140, 140, {175, 135, 215}},
{nil, :color141, 141, {175, 135, 255}},
{nil, :color142, 142, {175, 175, 0}},
{nil, :color143, 143, {175, 175, 95}},
{nil, :color144, 144, {175, 175, 135}},
{nil, :color145, 145, {175, 175, 175}},
{nil, :color146, 146, {175, 175, 215}},
{nil, :color147, 147, {175, 175, 255}},
{nil, :color148, 148, {175, 215, 0}},
{nil, :color149, 149, {175, 215, 95}},
{nil, :color150, 150, {175, 215, 135}},
{nil, :color151, 151, {175, 215, 175}},
{nil, :color152, 152, {175, 215, 215}},
{nil, :color153, 153, {175, 215, 255}},
{"greenyellow", :color154, 154, {175, 255, 0}},
{nil, :color155, 155, {175, 255, 95}},
{nil, :color156, 156, {175, 255, 135}},
{nil, :color157, 157, {175, 255, 175}},
{nil, :color158, 158, {175, 255, 215}},
{nil, :color159, 159, {175, 255, 255}},
{nil, :color160, 160, {215, 0, 0}},
{nil, :color161, 161, {215, 0, 95}},
{nil, :color162, 162, {215, 0, 135}},
{nil, :color163, 163, {215, 0, 175}},
{nil, :color164, 164, {215, 0, 215}},
{nil, :color165, 165, {215, 0, 255}},
{nil, :color166, 166, {215, 95, 0}},
{nil, :color167, 167, {215, 95, 95}},
{nil, :color168, 168, {215, 95, 135}},
{nil, :color169, 169, {215, 95, 175}},
{nil, :color170, 170, {215, 95, 215}},
{nil, :color171, 171, {215, 95, 255}},
{"chocolate", :color172, 172, {215, 135, 0}},
{nil, :color173, 173, {215, 135, 95}},
{nil, :color174, 174, {215, 135, 135}},
{nil, :color175, 175, {215, 135, 175}},
{nil, :color176, 176, {215, 135, 215}},
{nil, :color177, 177, {215, 135, 255}},
{"goldenrod", :color178, 178, {215, 175, 0}},
{nil, :color179, 179, {215, 175, 95}},
{nil, :color180, 180, {215, 175, 135}},
{nil, :color181, 181, {215, 175, 175}},
{nil, :color182, 182, {215, 175, 215}},
{nil, :color183, 183, {215, 175, 255}},
{nil, :color184, 184, {215, 215, 0}},
{nil, :color185, 185, {215, 215, 95}},
{nil, :color186, 186, {215, 215, 135}},
{nil, :color187, 187, {215, 215, 175}},
{"lightgray", :color188, 188, {215, 215, 215}},
{nil, :color189, 189, {215, 215, 255}},
{nil, :color190, 190, {215, 255, 0}},
{nil, :color191, 191, {215, 255, 95}},
{nil, :color192, 192, {215, 255, 135}},
{nil, :color193, 193, {215, 255, 175}},
{"beige", :color194, 194, {215, 255, 215}},
{"lightcyan", :color195, 195, {215, 255, 255}},
{nil, :color196, 196, {255, 0, 0}},
{nil, :color197, 197, {255, 0, 95}},
{nil, :color198, 198, {255, 0, 135}},
{nil, :color199, 199, {255, 0, 175}},
{nil, :color200, 200, {255, 0, 215}},
{"fuchsia", :color201, 201, {255, 0, 255}},
{"orangered", :color202, 202, {255, 95, 0}},
{nil, :color203, 203, {255, 95, 95}},
{nil, :color204, 204, {255, 95, 135}},
{"hotpink", :color205, 205, {255, 95, 175}},
{nil, :color206, 206, {255, 95, 215}},
{nil, :color207, 207, {255, 95, 255}},
{"darkorange", :color208, 208, {255, 135, 0}},
{"coral", :color209, 209, {255, 135, 95}},
{nil, :color210, 210, {255, 135, 135}},
{nil, :color211, 211, {255, 135, 175}},
{nil, :color212, 212, {255, 135, 215}},
{nil, :color213, 213, {255, 135, 255}},
{"orange", :color214, 214, {255, 175, 0}},
{nil, :color215, 215, {255, 175, 95}},
{nil, :color216, 216, {255, 175, 135}},
{nil, :color217, 217, {255, 175, 175}},
{nil, :color218, 218, {255, 175, 215}},
{nil, :color219, 219, {255, 175, 255}},
{"gold", :color220, 220, {255, 215, 0}},
{nil, :color221, 221, {255, 215, 95}},
{"khaki", :color222, 222, {255, 215, 135}},
{"moccasin", :color223, 223, {255, 215, 175}},
{"mistyrose", :color224, 224, {255, 215, 215}},
{nil, :color225, 225, {255, 215, 255}},
{nil, :color226, 226, {255, 255, 0}},
{nil, :color227, 227, {255, 255, 95}},
{nil, :color228, 228, {255, 255, 135}},
{nil, :color229, 229, {255, 255, 175}},
{"lightyellow", :color230, 230, {255, 255, 215}},
{nil, :color231, 231, {255, 255, 255}},
{nil, :color232, 232, {255, 255, 255}},
{nil, :color233, 233, {255, 255, 255}},
{nil, :color234, 234, {255, 255, 255}},
{nil, :color235, 235, {255, 255, 255}},
{nil, :color236, 236, {255, 255, 255}},
{nil, :color237, 237, {255, 255, 255}},
{nil, :color238, 238, {255, 255, 255}},
{nil, :color239, 239, {255, 255, 255}},
{nil, :color240, 240, {255, 255, 255}},
{nil, :color241, 241, {255, 255, 255}},
{nil, :color242, 242, {255, 255, 255}},
{nil, :color243, 243, {255, 255, 255}},
{nil, :color244, 244, {255, 255, 255}},
{nil, :color245, 245, {255, 255, 255}},
{nil, :color246, 246, {255, 255, 255}},
{nil, :color247, 247, {255, 255, 255}},
{nil, :color248, 248, {255, 255, 255}},
{nil, :color249, 249, {255, 255, 255}},
{nil, :color250, 250, {255, 255, 255}},
{nil, :color251, 251, {255, 255, 255}},
{nil, :color252, 252, {255, 255, 255}},
{nil, :color253, 253, {255, 255, 255}},
{nil, :color254, 254, {255, 255, 255}},
{nil, :color255, 255, {255, 255, 255}}
]
def color_tuples, do: @color_tuples
for {name, color, code, _} <- @color_tuples do
@doc "Sets foreground color to #{color}"
defsequence(color, code, "38;5;")
@doc "Sets background color to #{color}"
defsequence(:"#{color}_background", code, "48;5;")
if name do
@doc "Sets foreground color to #{name}"
defsequence(:"#{name}", code, "38;5;")
@doc "Sets background color to #{name}"
defsequence(:"#{name}_background", code, "48;5;")
end
end
@color_aliases Application.get_env(:bunt, :color_aliases, [])
def color_aliases, do: @color_aliases
for {alias_name, original_name} <- @color_aliases do
defalias(alias_name, original_name)
defalias(:"#{alias_name}_background", :"#{original_name}_background")
end
@typep ansicode :: atom()
@typep ansilist ::
maybe_improper_list(
char() | ansicode() | binary() | ansilist(),
binary() | ansicode() | []
)
@type ansidata :: ansilist() | ansicode() | binary()
@doc """
Checks if ANSI coloring is supported and enabled on this machine.
This function simply reads the configuration value for
`:ansi_enabled` in the `:elixir` application. The value is by
default `false` unless Elixir can detect during startup that
both `stdout` and `stderr` are terminals.
"""
@spec enabled? :: boolean
def enabled? do
Application.get_env(:elixir, :ansi_enabled, false)
end
@doc "Resets all attributes"
defsequence(:reset, 0)
@doc "Bright (increased intensity) or Bold"
defsequence(:bright, 1)
@doc "Faint (decreased intensity), not widely supported"
defsequence(:faint, 2)
@doc "Italic: on. Not widely supported. Sometimes treated as inverse"
defsequence(:italic, 3)
@doc "Underline: Single"
defsequence(:underline, 4)
@doc "Blink: Slow. Less than 150 per minute"
defsequence(:blink_slow, 5)
@doc "Blink: Rapid. MS-DOS ANSI.SYS; 150 per minute or more; not widely supported"
defsequence(:blink_rapid, 6)
@doc "Image: Negative. Swap foreground and background"
defsequence(:inverse, 7)
@doc "Image: Negative. Swap foreground and background"
defsequence(:reverse, 7)
@doc "Conceal. Not widely supported"
defsequence(:conceal, 8)
@doc "Crossed-out. Characters legible, but marked for deletion. Not widely supported"
defsequence(:crossed_out, 9)
@doc "Sets primary (default) font"
defsequence(:primary_font, 10)
for font_n <- [1, 2, 3, 4, 5, 6, 7, 8, 9] do
@doc "Sets alternative font #{font_n}"
defsequence(:"font_#{font_n}", font_n + 10)
end
@doc "Normal color or intensity"
defsequence(:normal, 22)
@doc "Not italic"
defsequence(:not_italic, 23)
@doc "Underline: None"
defsequence(:no_underline, 24)
@doc "Blink: off"
defsequence(:blink_off, 25)
colors = [:black, :red, :green, :yellow, :blue, :magenta, :cyan, :white]
for {color, code} <- Enum.with_index(colors) do
@doc "Sets foreground color to #{color}"
defsequence(color, code + 30)
@doc "Sets background color to #{color}"
defsequence(:"#{color}_background", code + 40)
end
@doc "Default text color"
defsequence(:default_color, 39)
@doc "Default background color"
defsequence(:default_background, 49)
@doc "Framed"
defsequence(:framed, 51)
@doc "Encircled"
defsequence(:encircled, 52)
@doc "Overlined"
defsequence(:overlined, 53)
@doc "Not framed or encircled"
defsequence(:not_framed_encircled, 54)
@doc "Not overlined"
defsequence(:not_overlined, 55)
@doc "Sends cursor home"
defsequence(:home, "", "H")
@doc "Clears screen"
defsequence(:clear, "2", "J")
@doc "Clears line"
defsequence(:clear_line, "2", "K")
defp format_sequence(other) do
raise ArgumentError, "invalid ANSI sequence specification: #{other}"
end
@doc ~S"""
Formats a chardata-like argument by converting named ANSI sequences into actual
ANSI codes.
The named sequences are represented by atoms.
It will also append an `IO.ANSI.reset/0` to the chardata when a conversion is
performed. If you don't want this behaviour, use `format_fragment/2`.
An optional boolean parameter can be passed to enable or disable
emitting actual ANSI codes. When `false`, no ANSI codes will emitted.
By default checks if ANSI is enabled using the `enabled?/0` function.
## Examples
iex> IO.ANSI.format(["Hello, ", :red, :bright, "world!"], true)
[[[[[[], "Hello, "] | "\e[31m"] | "\e[1m"], "world!"] | "\e[0m"]
"""
def format(chardata, emit \\ enabled?()) when is_boolean(emit) do
do_format(chardata, [], [], emit, :maybe)
end
@doc ~S"""
Formats a chardata-like argument by converting named ANSI sequences into actual
ANSI codes.
The named sequences are represented by atoms.
An optional boolean parameter can be passed to enable or disable
emitting actual ANSI codes. When `false`, no ANSI codes will emitted.
By default checks if ANSI is enabled using the `enabled?/0` function.
## Examples
iex> IO.ANSI.format_fragment([:bright, 'Word'], true)
[[[[[[] | "\e[1m"], 87], 111], 114], 100]
"""
def format_fragment(chardata, emit \\ enabled?()) when is_boolean(emit) do
do_format(chardata, [], [], emit, false)
end
defp do_format([term | rest], rem, acc, emit, append_reset) do
do_format(term, [rest | rem], acc, emit, append_reset)
end
defp do_format(term, rem, acc, true, append_reset) when is_atom(term) do
do_format([], rem, [acc | format_sequence(term)], true, !!append_reset)
end
defp do_format(term, rem, acc, false, append_reset) when is_atom(term) do
do_format([], rem, acc, false, append_reset)
end
defp do_format(term, rem, acc, emit, append_reset) when not is_list(term) do
do_format([], rem, [acc | [term]], emit, append_reset)
end
defp do_format([], [next | rest], acc, emit, append_reset) do
do_format(next, rest, acc, emit, append_reset)
end
defp do_format([], [], acc, true, true) do
[acc | IO.ANSI.reset()]
end
defp do_format([], [], acc, _emit, _append_reset) do
acc
end
end
|
lib/bunt_ansi.ex
| 0.641085 | 0.44348 |
bunt_ansi.ex
|
starcoder
|
defmodule Tarearbol.Scheduler do
@moduledoc """
Cron-like task scheduler. Accepts both static and dynamic configurations.
### Usage
Add `Tarearbol.Scheduler` to the list of supervised workers. It would attempt
to read the static configuration (see below) and start the `DynamicSupervisor`
with all the scheduled jobs as supervised workers.
The `runner` is the function of arity zero, that should return `{:ok, result}`
tuple upon completion. The job will be rescheduled according to its schedule.
The last result returned will be stored in the state and might be retrieved
later with `get/1` passing the job name.
### Static Configuration
Upon starts it looks up `:tarearbol` section of `Mix.Project` for
`:jobs` and `:jobs_file` keys. The latter has a default `.tarearbol.exs`.
This won’t work with releases.
Also it looks up `:tarearbol, :jobs` section of `config.exs`. Everything found
is unioned. Jobs with the same names are overriden, the file has precedence
over project config, the application config has least precedence.
If found, jobs as a list of tuples of `{name, runner, schedule}` are scheduled.
These are expected to be in the following form.
- `name` might be whatever, used to refer to the job during it’s lifetime
- `runner` might be either `{module, function}` tuple or a reference to the function of arity zero (`&Foo.bar/0`)
- `schedule` in standard cron notation, see https://crontab.guru
### Dynamic Configuration
Use `Tarearbol.Scheduler.push/3`, `Tarearbol.Scheduler.pop/1` to add/remove jobs
temporarily and/or `Tarearbol.Scheduler.push!/3`, `Tarearbol.Scheduler.pop!/1` to
reflect changes in the configuration file.
```elixir
Tarearbol.Scheduler.push(TestJob, &Foo.bar/0, "3-5/1 9-18 * * 6-7")
```
"""
use Boundary,
deps: [
Tarearbol.Crontab,
Tarearbol.DynamicManager,
Tarearbol.InternalWorker,
Tarearbol.Telemetria
],
exports: [State]
use Tarearbol.DynamicManager
@typedoc """
Type of the job runner, an `{m, f}` tuple or a function of arity zero,
returning one of the outcomes below
"""
@type runner ::
{atom(), atom()} | (() -> :halt | {:ok | {:reschedule, binary()}, any()})
@typedoc """
Type of possible job schedules: binary cron format, `Time` to be executed once
`DateTime` for the daily execution
"""
@type schedule :: binary() | non_neg_integer() | DateTime.t() | Time.t()
defmodule Job do
@moduledoc """
A struct holding the job description. Used internally by `Tarearbol.Scheduler`
to preserve a list of scheduled jobs.
"""
@typedoc "The struct containing the information about the job"
@type t :: %Job{}
defstruct [:name, :module, :runner, :schedule]
@doc "The implementation to be run on schedule"
@callback run :: Tarearbol.Scheduler.runner()
@doc "Produces a `Tarearbol.Scheduler.Job` by parameters given"
@spec create(
name :: binary(),
runner :: Tarearbol.Scheduler.runner(),
schedule :: Tarearbol.Scheduler.schedule()
) :: t()
def create(name, runner, schedule) do
{once?, schedule} =
case schedule do
msecs when is_integer(msecs) ->
{true, Macro.escape(DateTime.add(DateTime.utc_now(), schedule, :millisecond))}
%Time{} ->
{false, Tarearbol.Crontab.to_cron(schedule)}
%DateTime{} = hour_x ->
{true, Macro.escape(hour_x)}
crontab when is_binary(crontab) ->
{false, crontab}
end
run_ast =
case {once?, runner} do
{true, {m, f}} ->
quote do
def run do
apply(unquote(m), unquote(f), [])
:halt
end
end
{false, {m, f}} ->
quote do
def run, do: {:ok, apply(unquote(m), unquote(f), [])}
end
{true, f} when is_function(f, 0) ->
f = Macro.escape(f)
quote do
def run do
unquote(f).()
:halt
end
end
{false, f} when is_function(f, 0) ->
f = Macro.escape(f)
quote do
def run, do: {:ok, unquote(f).()}
end
end
ast = [
quote do
@behaviour Job
@job struct(Job,
name: unquote(name),
module: __MODULE__,
runner: &__MODULE__.run/0,
schedule: unquote(schedule)
)
def job, do: @job
end,
run_ast
]
with {:module, module, _, _} <-
Module.create(Module.concat(Tarearbol.Scheduler.Job, name), ast, __ENV__),
do: module.job()
end
end
use Tarearbol.Telemetria
@impl Tarearbol.DynamicManager
@doc false
def children_specs,
do: for({name, runner, schedule} <- jobs(), into: %{}, do: job!(name, runner, schedule))
@impl Tarearbol.DynamicManager
@doc false
def perform(id, %{job: %Job{}} = payload),
do: do_perform(id, payload)
@spec do_perform(id :: Tarearbol.DynamicManager.id(), payload :: map()) :: any()
if Tarearbol.Telemetria.use?(), do: @telemetria(Tarearbol.Telemetria.apply_options())
defp do_perform(id, payload) do
job = payload.job
case job.runner.() do
:halt ->
:halt
{:ok, result} ->
{{:timeout, timeout(job.schedule)}, result}
{{:reschedule, schedule}, _result} ->
{:replace, id, %{payload | job: %Job{job | schedule: schedule}}}
end
end
@spec active_jobs :: %{Tarearbol.DynamicManager.id() => %Tarearbol.DynamicManager.Child{}}
def active_jobs, do: state().children
@doc """
Creates and temporarily pushes the job to the list of currently scheduled jobs.
For the implementation that survives restarts use `push!/3`.
"""
@spec push(name :: binary(), runner :: runner(), schedule :: schedule()) :: :ok
def push(name, runner, schedule) do
{name, opts} = job!(name, runner, schedule)
Tarearbol.Scheduler.put(name, opts)
end
@doc """
Creates and pushes the job to the list of currently scheduled jobs, updates
the permanent list of scheduled jobs.
For the implementation that temporarily pushes a job, use `push/3`.
"""
@spec push!(name :: binary(), runner :: runner(), schedule :: schedule()) :: :ok
def push!(name, runner, schedule) do
File.write!(config_file(), Macro.to_string([{name, runner, schedule} | jobs()]))
push(name, runner, schedule)
end
@doc """
Removes the scheduled job from the schedule by `id`.
For the implementation that survives restarts use `pop!/1`.
"""
@spec pop(name :: any()) :: :ok
def pop(name), do: Tarearbol.Scheduler.del(name)
@doc """
Removes the scheduled job from the schedule by `id` and updated the configuration.
For the implementation that removes jobs temporarily, use `pop!/1`.
"""
@spec pop!(name :: any()) :: :ok
def pop!(name) do
File.write!(
config_file(),
Macro.to_string(for({id, _, _} = job <- jobs(), id != name, do: job))
)
pop(name)
end
@spec job!(name :: any(), runner :: runner(), schedule :: schedule()) :: {binary(), map()}
defp job!(name, runner, schedule) do
job = Job.create(name, runner, schedule)
{inspect(name), %{payload: %{job: job}, timeout: timeout(job.schedule)}}
end
@spec timeout(schedule :: schedule()) :: non_neg_integer()
defp timeout(schedule) when is_integer(schedule) and schedule > 0, do: schedule
defp timeout(schedule) when is_binary(schedule),
do:
Tarearbol.Crontab.next(DateTime.utc_now(), schedule, precision: :millisecond)[:millisecond]
defp timeout(%DateTime{} = schedule),
do: Enum.max([0, DateTime.diff(schedule, DateTime.utc_now(), :millisecond)])
defp timeout(%Time{} = schedule) do
schedule
|> Time.diff(Time.utc_now(), :millisecond)
|> case do
secs when secs >= 0 -> secs
secs -> 24 * 60 * 60 + secs
end
|> timeout()
end
@spec config :: keyword()
defp config,
do:
if(Code.ensure_loaded?(Mix), do: Keyword.get(Mix.Project.config(), :tarearbol, []), else: [])
@spec config_file :: binary()
defp config_file, do: Keyword.get(config(), :jobs_file, ".tarearbol.exs")
@spec jobs :: [{any(), runner(), schedule()}]
defp jobs do
Application.get_env(:tarearbol, :jobs, []) ++
Keyword.get(config(), :jobs, []) ++
if File.exists?(config_file()),
do: config_file() |> File.read!() |> Code.eval_string(),
else: []
end
end
|
lib/tarearbol/scheduler.ex
| 0.874131 | 0.893774 |
scheduler.ex
|
starcoder
|
defmodule Snek.Board do
@moduledoc """
A struct for representing a board position.
This may be used to keep track of state in a game, each turn of the
game producing the next board position.
"""
@moduledoc since: "0.1.0"
alias __MODULE__
alias Board.{Point, Size, Snake}
@typedoc """
A board position.
"""
@typedoc since: "0.1.0"
@type t :: %Board{
size: Size.t,
apples: list(Point.t),
snakes: list(Snake.t)
}
@enforce_keys [
:size,
:apples,
:snakes
]
defstruct [
:size,
:apples,
:snakes
]
@typedoc """
When spawning, `{:ok, board}` if there is space available, `{:error, :occupied}` otherwise.
"""
@type spawn_result :: {:ok, t} | {:error, :occupied}
@snake_default_length 3
@snake_default_health 100
@doc """
Returns a new empty board of a given size.
## Examples
iex> Board.new(Board.Size.small)
%Board{size: %Board.Size{width: 7, height: 7}, apples: [], snakes: []}
"""
@doc since: "0.1.0"
@spec new(Size.t) :: t
def new(size) do
%Board{
size: size,
apples: [],
snakes: []
}
end
@doc """
Returns true if and only if this board is empty, otherwise false.
The board is considered empty if it does not contain any snakes or
apples.
## Examples
iex> Board.new(Board.Size.small) |> Board.empty?
true
iex> {:ok, board} = Board.new(Board.Size.small) |> Board.spawn_apple_at_center
iex> Board.empty?(board)
false
iex> {:ok, board} = Board.new(Board.Size.small) |> Board.spawn_snake_at_center("mysnek")
iex> Board.empty?(board)
false
"""
@doc since: "0.1.0"
@spec empty?(t) :: boolean
def empty?(%Board{apples: apples}) when length(apples) > 0, do: false
def empty?(%Board{snakes: snakes}) when length(snakes) > 0, do: false
def empty?(%Board{}), do: true
@doc """
Spawns an apple in the center of the board.
Returns `{:ok, board}` if there is space available, returns
`{:error, :occupied}` otherwise.
## Examples
iex> {:ok, board} = Board.new(Board.Size.new(3, 3)) |> Board.spawn_apple_at_center()
iex> board
%Board{
apples: [{1, 1}],
size: %Board.Size{height: 3, width: 3},
snakes: []
}
iex> {:ok, board} = Board.new(Board.Size.small) |> Board.spawn_apple_at_center()
iex> board |> Board.spawn_apple_at_center()
{:error, :occupied}
"""
@doc since: "0.1.0"
@spec spawn_apple_at_center(t) :: spawn_result
def spawn_apple_at_center(board) do
spawn_apple(board, Board.center_point(board))
end
@doc """
Spawns an apple at the specified point on the board.
Returns `{:ok, board}` if there is space available, returns
`{:error, :occupied}` otherwise.
## Examples
iex> {:ok, board} = Board.new(Board.Size.small) |> Board.spawn_apple(Board.Point.new(1, 1))
iex> board
%Board{
apples: [{1, 1}],
size: %Board.Size{height: 7, width: 7},
snakes: []
}
iex> {:ok, board} = Board.new(Board.Size.small) |> Board.spawn_apple(Board.Point.new(1, 1))
iex> board |> Board.spawn_apple(Board.Point.new(1, 1))
{:error, :occupied}
"""
@doc since: "0.1.0"
@spec spawn_apple(t, Point.t) :: spawn_result
def spawn_apple(board, point) do
if occupied?(board, point) do
{:error, :occupied}
else
next_board = %Board{board | apples: [point | board.apples]}
{:ok, next_board}
end
end
@doc """
Spawns an apple at the specified point on the board.
Unlike `spawn_apple/2` this function will not check whether there is space
available. You are expected to only use this function if you are otherwise
performing that validation yourself. For example, it may be more efficient to
precompute available spaces before spawning many apples.
Returns a board state with the apple added.
## Examples
iex> board = Board.new(Board.Size.small) |> Board.spawn_apple_unchecked(Board.Point.new(1, 1))
iex> board
%Board{
apples: [{1, 1}],
size: %Board.Size{height: 7, width: 7},
snakes: []
}
"""
@doc since: "0.1.0"
@spec spawn_apple_unchecked(t, Point.t) :: t
def spawn_apple_unchecked(board, point) do
%Board{board | apples: [point | board.apples]}
end
@doc """
Spawns apples at each of the specified points on the board.
Returns `{:ok, board}` if there is space available, returns
`{:error, :occupied}` otherwise.
## Examples
iex> points = [Board.Point.new(1, 1), Board.Point.new(1, 2)]
iex> {:ok, board} = Board.new(Board.Size.small) |> Board.spawn_apples(points)
iex> board
%Board{
apples: [
{1, 1},
{1, 2}
],
size: %Snek.Board.Size{height: 7, width: 7},
snakes: []
}
iex> occupied_point = Board.Point.new(1, 1)
iex> new_points = [occupied_point, Board.Point.new(1, 2)]
iex> {:ok, board} = Board.new(Board.Size.small) |> Board.spawn_apple(occupied_point)
iex> Board.spawn_apples(board, new_points)
{:error, :occupied}
"""
@doc since: "0.1.0"
@spec spawn_apples(t, list(Point.t)) :: spawn_result
def spawn_apples(board, points) do
if any_points_occupied?(board, points) do
{:error, :occupied}
else
board = %Board{board | apples: Enum.concat(points, board.apples)}
{:ok, board}
end
end
@doc """
Returns the point at the center of the board.
If the board width or height are even, the center will be offset because
boards are a discrete grid.
## Examples
iex> Board.new(Board.Size.new(3, 3)) |> Board.center_point()
{1, 1}
iex> Board.new(Board.Size.new(8, 8)) |> Board.center_point()
{3, 3}
"""
@doc since: "0.1.0"
@spec center_point(t) :: Point.t
def center_point(%Board{size: %Size{width: width, height: height}}) do
x = div(width - 1, 2)
y = div(height - 1, 2)
Point.new(x, y)
end
@doc """
Spawns a snake in the center of the board.
Returns `{:ok, board}` if there is space available, returns
`{:error, :occupied}` otherwise.
## Examples
iex> {:ok, board} = Board.new(Board.Size.small) |> Board.spawn_snake_at_center("mysnek")
iex> board.snakes
[
%Board.Snake{
body: [{3, 3}, {3, 3}, {3, 3}],
state: :alive,
health: 100,
id: "mysnek"
}
]
iex> {:ok, board} = Board.new(Board.Size.small) |> Board.spawn_snake_at_center("mysnek")
iex> Board.spawn_snake_at_center(board, "mysnek")
{:error, :occupied}
"""
@doc since: "0.1.0"
@spec spawn_snake_at_center(t, any, non_neg_integer, non_neg_integer) :: spawn_result
def spawn_snake_at_center(board, id, length \\ @snake_default_length, health \\ @snake_default_health) do
head = center_point(board)
spawn_snake(board, id, head, length, health)
end
@doc """
Spawns multiple snakes, each at a specified point on the board.
Returns `{:ok, board}` if there is space available, returns
`{:error, :occupied}` otherwise.
## Examples
iex> ids_and_heads = [{"snek1", Board.Point.new(1, 1)}, {"snek2", Board.Point.new(5, 5)}]
iex> {:ok, board} = Board.new(Board.Size.small) |> Board.spawn_snakes(ids_and_heads)
iex> board.snakes
[
%Board.Snake{
body: [{5, 5}, {5, 5}, {5, 5}],
state: :alive,
health: 100,
id: "snek2"
},
%Board.Snake{
body: [{1, 1}, {1, 1}, {1, 1}],
state: :alive,
health: 100,
id: "snek1"
}
]
iex> ids_and_heads = [{"snek1", Board.Point.new(1, 1)}, {"snek2", Board.Point.new(1, 1)}]
iex> Board.new(Board.Size.small) |> Board.spawn_snakes(ids_and_heads)
{:error, :occupied}
"""
@doc since: "0.1.0"
@spec spawn_snakes(t, list({Snake.id, Point.t}), non_neg_integer, non_neg_integer) :: spawn_result
def spawn_snakes(board, ids_and_heads, length \\ @snake_default_length, health \\ @snake_default_health)
def spawn_snakes(board, [], _length, _health) do
{:ok, board}
end
def spawn_snakes(board, [{snake_id, head} | rest_of_ids_and_heads], length, health) do
case Board.spawn_snake(board, snake_id, head, length, health) do
{:ok, next_board} -> spawn_snakes(next_board, rest_of_ids_and_heads, length, health)
{:error, reason} -> {:error, reason}
end
end
@doc """
Spawns a snake at the specified point on the board.
Returns `{:ok, board}` if there is space available, returns
`{:error, :occupied}` otherwise.
## Examples
iex> {:ok, board} = Board.new(Board.Size.small) |> Board.spawn_snake("mysnek", Board.Point.new(1, 1))
iex> board.snakes
[
%Board.Snake{
body: [{1, 1}, {1, 1}, {1, 1}],
state: :alive,
health: 100,
id: "mysnek"
}
]
iex> {:ok, board} = Board.new(Board.Size.small) |> Board.spawn_snake("mysnek", Board.Point.new(1, 1))
iex> Board.spawn_snake(board, "mysnek", Board.Point.new(1, 1))
{:error, :occupied}
"""
@doc since: "0.1.0"
@spec spawn_snake(t, any, Point.t, non_neg_integer, non_neg_integer) :: spawn_result
def spawn_snake(board, id, head, length \\ @snake_default_length, health \\ @snake_default_health) do
if occupied?(board, head) do
{:error, :occupied}
else
snake = %Snake{
id: id,
state: :alive,
health: health,
body: List.duplicate(head, length)
}
board = %Board{board | snakes: [snake | board.snakes]}
{:ok, board}
end
end
@doc """
Moves all alive snake on the board according to their respective moves for
this turn.
Snakes move by slithering by one space per turn, in other words stepping in
one direction by adding a new head part and removing a tail part.
If no move is provided for a snake, or `nil` is provided as its move, the
snake will move `:up` instead.
Snakes that have already been eliminated will not be moved.
Returns a board with all moves applied.
## Examples
iex> board0 = Board.new(Board.Size.small)
iex> {:ok, board1} = Board.spawn_snakes(board0, [{"snek0", Board.Point.new(1, 1)}, {"snek1", Board.Point.new(5, 5)}])
iex> board2 = Board.move_snakes(board1, %{"snek0" => :right, "snek1" => nil})
iex> board2.snakes
[
%Board.Snake{
body: [{5, 4}, {5, 5}, {5, 5}],
state: :alive,
health: 100,
id: "snek1"
},
%Board.Snake{
body: [{2, 1}, {1, 1}, {1, 1}],
state: :alive,
health: 100,
id: "snek0"
}
]
"""
@doc since: "0.1.0"
@spec move_snakes(t, %{required(Snake.id) => Snake.snake_move | nil} | list({Snake.id, Snake.snake_move | nil})) :: t
def move_snakes(board, snake_moves) when is_map(snake_moves) do
next_snakes = Enum.map(board.snakes, fn snake ->
direction = Map.get(snake_moves, snake.id)
Snake.move(snake, direction)
end)
%Board{board | snakes: next_snakes}
end
def move_snakes(board, snake_moves) when is_list(snake_moves) do
move_snakes(board, Enum.into(snake_moves, %{}))
end
@doc """
Moves a snake on the board according to its move for this turn.
A snake moves by slithering by one space per turn, in other words stepping in
one direction by adding a new head part and removing a tail part.
If no move is provided, or `nil` is provided as its move, the snake will move
`:up` instead.
Snakes that have already been eliminated will not be moved.
A snake that is already eliminated will not be moved.
Returns a board with this snake's move applied.
## Examples
iex> board0 = Board.new(Board.Size.small)
iex> {:ok, board1} = Board.spawn_snake(board0, "snek0", Board.Point.new(1, 1))
iex> board2 = Board.move_snake(board1, "snek0", :right)
iex> board2.snakes
[
%Board.Snake{
body: [{2, 1}, {1, 1}, {1, 1}],
state: :alive,
health: 100,
id: "snek0"
}
]
"""
@doc since: "0.1.0"
@spec move_snake(t, Snake.id, Snake.snake_move | nil) :: t
def move_snake(board, snake_id, direction) do
next_snakes = Enum.map(board.snakes, fn snake ->
if snake.id == snake_id do
Snake.move(snake, direction)
else
snake
end
end)
%Board{board | snakes: next_snakes}
end
@doc """
Reduce the health of each snake by one point.
Does not affect the health of eliminated snakes.
Returns a board with all snake health reductions applied.
## Examples
iex> apple = Board.Point.new(1, 4)
iex> ids_and_heads = [{"snek0", Board.Point.new(1, 1)}, {"snek1", Board.Point.new(1, 5)}]
iex> {:ok, board0} = Board.new(Board.Size.small) |> Board.spawn_apple(apple)
iex> {:ok, board1} = Board.spawn_snakes(board0, ids_and_heads)
iex> board2 = Board.move_snakes(board1, %{"snek0" => :down, "snek1" => :up})
iex> board3 = Board.maybe_feed_snakes(board2)
iex> board4 = Board.move_snakes(board3, %{"snek0" => :down, "snek1" => :up})
iex> board5 = Board.maybe_eliminate_snakes(board4)
iex> board6 = Board.reduce_snake_healths(board5)
iex> snek0 = board6.snakes |> Enum.find(&(&1.id == "snek0"))
iex> snek1 = board6.snakes |> Enum.find(&(&1.id == "snek1"))
iex> snek0.health # Eliminated before reducing health
100
iex> snek1.health # Not eliminiated
99
"""
@doc since: "0.1.0"
@spec reduce_snake_healths(t) :: t
def reduce_snake_healths(board) do
next_snakes = Enum.map(board.snakes, fn snake ->
if Snake.eliminated?(snake) do
snake
else
Snake.hurt(snake)
end
end)
%Board{board | snakes: next_snakes}
end
@doc """
Eliminate snakes who have moved out of bounds, collided with themselves,
collided with other snake bodies, or lost in a head-to-head collision.
Eliminations are decided by `maybe_eliminate_snake/3` for each snake, giving
priority to longer snakes in in ambiguous collisions.
Snakes that are already eliminated will remain unchanged, and snakes will not
be eliminated by colliding with another snake that has previously been
eliminated itself.
## Examples
iex> apple = Board.Point.new(1, 4)
iex> ids_and_heads = [{"snek0", Board.Point.new(1, 1)}, {"snek1", Board.Point.new(1, 5)}]
iex> {:ok, board0} = Board.new(Board.Size.small) |> Board.spawn_apple(apple)
iex> {:ok, board1} = Board.spawn_snakes(board0, ids_and_heads)
iex> board2 = Board.move_snakes(board1, %{"snek0" => :down, "snek1" => :up})
iex> board3 = Board.maybe_feed_snakes(board2)
iex> board4 = Board.move_snakes(board3, %{"snek0" => :down, "snek1" => :up})
iex> board5 = Board.maybe_eliminate_snakes(board4)
iex> board6 = Board.reduce_snake_healths(board5)
iex> snek0 = board6.snakes |> Enum.find(&(&1.id == "snek0"))
iex> snek1 = board6.snakes |> Enum.find(&(&1.id == "snek1"))
iex> snek0.state
{:eliminated, :head_to_head, "snek1"}
iex> snek1.state
:alive
"""
@doc since: "0.1.0"
@spec maybe_eliminate_snakes(t) :: t
def maybe_eliminate_snakes(board) do
alive_snakes = Enum.filter(board.snakes, &Snake.alive?/1)
snakes_by_length_descending = Enum.sort_by(alive_snakes, fn snake ->
{length(snake.body), snake.id}
end)
next_snakes = Enum.map(board.snakes, fn snake ->
maybe_eliminate_snake(board, snake, snakes_by_length_descending)
end)
%Board{board | snakes: next_snakes}
end
@doc """
Eliminate this snake if it has moved out of bounds, collided with itself,
collided with another snake body, or lost in a head-to-head collision.
If the snake is already previously eliminated, it will be returned unchanged
regardless of any new collisions.
Pass the `snakes_by_length_descending` argument as an ordered list of all
snakes such that ambiguous collisions will be tied by snakes which appear
first in the list. For example, if longer snakes should be considered first,
pass a list of all snakes ordered by their respective lengths descending.
Snakes that are already eliminated will remain unchanged, and snakes will not
be eliminated by colliding with another snake that has previously been
eliminated itself.
# Examples
iex> ids_and_heads = [{"snek0", Board.Point.new(1, 1)}, {"snek1", Board.Point.new(1, 3)}]
iex> {:ok, board0} = Board.new(Board.Size.small) |> Board.spawn_snakes(ids_and_heads)
iex> board1 = Board.move_snakes(board0, %{"snek0" => :down, "snek1" => :right})
iex> board2 = Board.move_snakes(board1, %{"snek0" => :down, "snek1" => :right})
iex> snek0 = board2.snakes |> Enum.find(&(&1.id == "snek0"))
iex> snek0_eliminated = Board.maybe_eliminate_snake(board2, snek0, board2.snakes)
iex> snek0_eliminated.state
{:eliminated, :collision, "snek1"}
iex> snek0_double_eliminated = Board.maybe_eliminate_snake(board2, snek0_eliminated, board2.snakes)
iex> snek0_double_eliminated == snek0_eliminated
true
iex> start_length = 3
iex> start_health = 1
iex> {:ok, board0} = Board.new(Board.Size.small) |> Board.spawn_snake("snek0", Board.Point.new(1, 1), start_length, start_health)
iex> board1 = Board.reduce_snake_healths(board0)
iex> [snek0 | _] = board1.snakes
iex> snek0_eliminated = Board.maybe_eliminate_snake(board1, snek0, board1.snakes)
iex> snek0_eliminated.state
{:eliminated, :starvation}
"""
@doc since: "0.1.0"
@spec maybe_eliminate_snake(t, Snake.t, list(Snake.t)) :: t
def maybe_eliminate_snake(_board, %Snake{state: state} = snake, _snakes_by_length_descending) when state != :alive do
snake
end
def maybe_eliminate_snake(_board, %Snake{health: health} = snake, _snakes_by_length_descending) when health <= 0 do
%Snake{snake | state: {:eliminated, :starvation}}
end
def maybe_eliminate_snake(board, snake, snakes_by_length_descending) do
next_state = cond do
Board.snake_out_of_bounds?(board, snake) ->
{:eliminated, :out_of_bounds}
Board.snake_collides_with_other_snake?(snake, snake) ->
{:eliminated, :self_collision}
true ->
body_collision_other_snake = Enum.find(snakes_by_length_descending, fn other_snake ->
other_snake.id != snake.id && Board.snake_collides_with_other_snake?(snake, other_snake)
end)
if is_nil(body_collision_other_snake) do
head_collision_other_snake = Enum.find(snakes_by_length_descending, fn other_snake ->
other_snake.id != snake.id && Board.snake_loses_head_to_head_collision?(snake, other_snake)
end)
if is_nil(head_collision_other_snake) do
snake.state
else
{:eliminated, :head_to_head, head_collision_other_snake.id}
end
else
{:eliminated, :collision, body_collision_other_snake.id}
end
end
%Snake{snake | state: next_state}
end
@doc """
Feed snakes who eat an apple.
For all apples on the board, if any snake eats it, remove the apple from the
board and feed each snake who ate it.
A snake eats an apple if the snake's head is at the same position as the
apple, and the snake is alive (not eliminated), and the snake has at least
one body part.
Feeding a snake is defined by `Snek.Board.Snake.feed/2`.
Returns the modified board state.
## Examples
iex> apple = Board.Point.new(1, 4)
iex> ids_and_heads = [{"snek0", Board.Point.new(1, 1)}, {"snek1", Board.Point.new(1, 5)}]
iex> {:ok, board0} = Board.new(Board.Size.small) |> Board.spawn_apple(apple)
iex> {:ok, board1} = Board.spawn_snakes(board0, ids_and_heads)
iex> board2 = Board.move_snakes(board1, %{"snek0" => :down, "snek1" => :up})
iex> board3 = Board.maybe_feed_snakes(board2)
iex> snek0 = board3.snakes |> Enum.find(&(&1.id == "snek0"))
iex> snek1 = board3.snakes |> Enum.find(&(&1.id == "snek1"))
iex> length(snek0.body)
3
iex> length(snek1.body)
4
iex> apple = Board.Point.new(1, 4)
iex> ids_and_heads = [{"snek0", Board.Point.new(1, 1)}, {"snek1", Board.Point.new(1, 5)}]
iex> {:ok, board0} = Board.new(Board.Size.small) |> Board.spawn_apple(apple)
iex> {:ok, board1} = Board.spawn_snakes(board0, ids_and_heads)
iex> board2 = Board.move_snakes(board1, %{"snek0" => :down, "snek1" => :right})
iex> board3 = Board.maybe_feed_snakes(board2)
iex> snek0 = board3.snakes |> Enum.find(&(&1.id == "snek0"))
iex> snek1 = board3.snakes |> Enum.find(&(&1.id == "snek1"))
iex> length(snek0.body)
3
iex> length(snek1.body)
3
iex> board3 == board2
true
"""
@doc since: "0.1.0"
@spec maybe_feed_snakes(t) :: t
def maybe_feed_snakes(board) do
alive_snakes = Enum.filter(board.snakes, fn snake ->
Snake.alive?(snake) && length(snake.body) > 0
end)
Enum.reduce(board.apples, board, fn apple, previous_board ->
snakes_who_ate = Enum.filter(alive_snakes, fn snake ->
Snake.head(snake) == apple
end)
if Enum.empty?(snakes_who_ate) do
previous_board
else
next_apples = List.delete(previous_board.apples, apple)
next_snakes = Enum.map(previous_board.snakes, fn snake ->
snake_ate = Enum.any?(snakes_who_ate, fn snake_who_ate ->
snake.id == snake_who_ate.id
end)
if snake_ate do
Snake.feed(snake, @snake_default_health)
else
snake
end
end)
%Board{previous_board | apples: next_apples, snakes: next_snakes}
end
end)
end
@doc """
Returns true if and only if the given point on the board is occupied,
otherwise false.
A point may be occupied by an apple, or any snake's body part.
## Examples
iex> Board.new(Board.Size.small) |> Board.occupied?(Board.Point.new(1, 3))
false
iex> point = Board.Point.new(1, 3)
iex> {:ok, board} = Board.new(Board.Size.small) |> Board.spawn_apple(point)
iex> Board.occupied?(board, point)
true
iex> point = Board.Point.new(1, 3)
iex> {:ok, board} = Board.new(Board.Size.small) |> Board.spawn_snake("mysnek", point)
iex> Board.occupied?(board, point)
true
"""
@doc since: "0.1.0"
@spec occupied?(t, Point.t) :: boolean
def occupied?(board, point) do
occupied_by_apple?(board, point) || occupied_by_snake?(board, point)
end
@doc """
Returns true if and only if any of the given points on the board are occupied.
A point may be occupied by an apple, or any snake's body part.
## Examples
iex> board = Board.new(Board.Size.small)
iex> board |> Board.any_points_occupied?([Board.Point.new(1, 3), Board.Point.new(0, 0)])
false
iex> {:ok, board} = Board.new(Board.Size.small) |> Board.spawn_apple(Board.Point.new(1, 3))
iex> board |> Board.any_points_occupied?([Board.Point.new(1, 3), Board.Point.new(0, 0)])
true
"""
@doc since: "0.1.0"
@spec any_points_occupied?(t, list(Point.t)) :: boolean
def any_points_occupied?(board, points) do
Enum.any?(points, &(occupied?(board, &1)))
end
@doc """
Returns true if and only if the given point on the board is occupied by an
apple, otherwise false.
## Examples
iex> Board.new(Board.Size.small) |> Board.occupied_by_apple?(Board.Point.new(1, 3))
false
iex> point = Board.Point.new(1, 3)
iex> {:ok, board} = Board.new(Board.Size.small) |> Board.spawn_apple(point)
iex> Board.occupied_by_apple?(board, point)
true
iex> point = Board.Point.new(1, 3)
iex> {:ok, board} = Board.new(Board.Size.small) |> Board.spawn_snake("mysnek", point)
iex> Board.occupied_by_apple?(board, point)
false
"""
@doc since: "0.1.0"
@spec occupied_by_apple?(t, Point.t) :: boolean
def occupied_by_apple?(board, point) do
Enum.member?(board.apples, point)
end
@doc """
Returns true if and only if the given point on the board is occupied by a
snake's body part, otherwise false.
## Examples
iex> Board.new(Board.Size.small) |> Board.occupied_by_snake?(Board.Point.new(1, 3))
false
iex> point = Board.Point.new(1, 3)
iex> {:ok, board} = Board.new(Board.Size.small) |> Board.spawn_apple(point)
iex> Board.occupied_by_snake?(board, point)
false
iex> point = Board.Point.new(1, 3)
iex> {:ok, board} = Board.new(Board.Size.small) |> Board.spawn_snake("mysnek", point)
iex> Board.occupied_by_snake?(board, point)
true
"""
@doc since: "0.1.0"
@spec occupied_by_snake?(t, Point.t) :: boolean
def occupied_by_snake?(board, point) do
Enum.any?(board.snakes, fn snake ->
Enum.member?(snake.body, point)
end)
end
@doc """
Returns a list of all points on the board.
## Examples
iex> Board.new(Board.Size.new(2, 2)) |> Board.all_points
[
{0, 0},
{0, 1},
{1, 0},
{1, 1}
]
"""
@doc since: "0.1.0"
@spec all_points(t) :: list(Point.t)
def all_points(board) do
xs = 0..board.size.width-1
ys = 0..board.size.height-1
for x <- xs, y <- ys do
Point.new(x, y)
end
end
@doc """
Returns a list of all even points on the board, alternating like a
checkerboard.
## Examples
iex> Board.new(Board.Size.new(3, 3)) |> Board.all_even_points
[
{0, 0},
{0, 2},
{1, 1},
{2, 0},
{2, 2}
]
"""
@doc since: "0.1.0"
@spec all_even_points(t) :: list(Point.t)
def all_even_points(board) do
board
|> all_points
|> Enum.filter(&Point.even?/1)
end
@doc """
Returns a list of all unoccupied points on the board.
## Examples
iex> apple = Board.Point.new(0, 1)
iex> {:ok, board} = Board.new(Board.Size.new(2, 2)) |> Board.spawn_apple(apple)
iex> Board.unoccupied_points(board)
[
{0, 0},
{1, 0},
{1, 1}
]
"""
@doc since: "0.1.0"
@spec unoccupied_points(t) :: list(Point.t)
def unoccupied_points(board) do
board
|> all_points()
|> Enum.reject(&(occupied?(board, &1)))
end
@doc """
Returns a list of all occupied points on the board.
## Examples
iex> apple = Board.Point.new(0, 1)
iex> {:ok, board} = Board.new(Board.Size.new(2, 2)) |> Board.spawn_apple(apple)
iex> Board.occupied_points(board)
[
{0, 1}
]
"""
@doc since: "0.1.0"
@spec occupied_points(t) :: list(Point.t)
def occupied_points(board) do
board
|> all_points()
|> Enum.filter(&(occupied?(board, &1)))
end
@doc """
Returns a list of neighboring points adjascent to a point of origin.
This excludes points that are outside of the board's boundaries.
## Examples
iex> board = Board.new(Board.Size.small)
iex> board |> Board.adjascent_neighbors(Board.Point.new(1, 1))
[
{1, 0},
{1, 2},
{2, 1},
{0, 1}
]
iex> board = Board.new(Board.Size.small)
iex> board |> Board.adjascent_neighbors(Board.Point.new(0, 0))
[
{0, 1},
{1, 0}
]
iex> board = Board.new(Board.Size.new(3, 3))
iex> board |> Board.adjascent_neighbors(Board.Point.new(2, 2))
[
{2, 1},
{1, 2}
]
"""
@doc since: "0.1.0"
@spec adjascent_neighbors(t, Point.t) :: list(Point.t)
def adjascent_neighbors(board, origin) do
Point.adjascent_neighbors(origin)
|> Enum.filter(&(within_bounds?(board, &1)))
end
@doc """
Returns a list of unoccupied neighboring points adjascent to a point of
origin.
This excludes any points occupied by an apple, or any snake's body part.
This excludes points that are outside of the board's boundaries.
## Examples
iex> board = Board.new(Board.Size.small)
iex> board |> Board.unoccupied_adjascent_neighbors(Board.Point.new(1, 1))
[
{1, 0},
{1, 2},
{2, 1},
{0, 1}
]
iex> {:ok, board} = Board.new(Board.Size.small) |> Board.spawn_apple(Board.Point.new(1, 2))
iex> board |> Board.unoccupied_adjascent_neighbors(Board.Point.new(1, 1))
[
{1, 0},
{2, 1},
{0, 1}
]
"""
@doc since: "0.1.0"
@spec unoccupied_adjascent_neighbors(t, Point.t) :: list(Point.t)
def unoccupied_adjascent_neighbors(board, origin) do
adjascent_neighbors(board, origin)
|> Enum.reject(&(occupied?(board, &1)))
end
@doc """
Returns a list of neighboring points diagonal to a point of origin.
This excludes points that are outside of the board's boundaries.
## Examples
iex> board = Board.new(Board.Size.small)
iex> board |> Board.diagonal_neighbors(Board.Point.new(1, 1))
[
{0, 0},
{2, 0},
{2, 2},
{0, 2}
]
iex> board = Board.new(Board.Size.small)
iex> board |> Board.diagonal_neighbors(Board.Point.new(0, 0))
[{1, 1}]
iex> board = Board.new(Board.Size.new(3, 3))
iex> board |> Board.diagonal_neighbors(Board.Point.new(2, 2))
[{1, 1}]
"""
@doc since: "0.1.0"
@spec diagonal_neighbors(t, Point.t) :: list(Point.t)
def diagonal_neighbors(board, origin) do
Point.diagonal_neighbors(origin)
|> Enum.filter(&(within_bounds?(board, &1)))
end
@doc """
Returns a list of unoccupied neighboring points diagonal to a point of
origin.
This excludes any points occupied by an apple, or any snake's body part.
This excludes points that are outside of the board's boundaries.
## Examples
iex> board = Board.new(Board.Size.small)
iex> board |> Board.unoccupied_diagonal_neighbors(Board.Point.new(1, 1))
[
{0, 0},
{2, 0},
{2, 2},
{0, 2}
]
iex> {:ok, board} = Board.new(Board.Size.small) |> Board.spawn_apple(Board.Point.new(0, 0))
iex> board |> Board.unoccupied_diagonal_neighbors(Board.Point.new(1, 1))
[
{2, 0},
{2, 2},
{0, 2}
]
"""
@doc since: "0.1.0"
@spec unoccupied_diagonal_neighbors(t, Point.t) :: list(Point.t)
def unoccupied_diagonal_neighbors(board, origin) do
diagonal_neighbors(board, origin)
|> Enum.reject(&(occupied?(board, &1)))
end
@doc """
Returns true if and only if this point is within the board's boundaries,
otherwise false.
## Examples
iex> board = Board.new(Board.Size.new(3, 3))
iex> board |> Board.within_bounds?(Board.Point.new(0, 0))
true
iex> board |> Board.within_bounds?(Board.Point.new(1, 2))
true
iex> board |> Board.within_bounds?(Board.Point.new(-1, 0))
false
iex> board |> Board.within_bounds?(Board.Point.new(0, 3))
false
"""
@doc since: "0.1.0"
@spec within_bounds?(t, Point.t) :: boolean
def within_bounds?(%Board{size: %Size{width: width, height: height}}, {x, y})
when x < 0 or y < 0 or x >= width or y >= height,
do: false
def within_bounds?(_board, _point), do: true
@doc """
Returns true if and only if this point is outside of the board's boundaries,
in other words the opposite of `within_bounds?/2`.
## Examples
iex> board = Board.new(Board.Size.new(3, 3))
iex> board |> Board.out_of_bounds?(Board.Point.new(0, 0))
false
iex> board |> Board.out_of_bounds?(Board.Point.new(1, 2))
false
iex> board |> Board.out_of_bounds?(Board.Point.new(-1, 0))
true
iex> board |> Board.out_of_bounds?(Board.Point.new(0, 3))
true
"""
@doc since: "0.1.0"
@spec out_of_bounds?(t, Point.t) :: boolean
def out_of_bounds?(board, point) do
!within_bounds?(board, point)
end
@doc """
Returns true if and only if this snake has some body part outside of the
board's boundaries.
## Examples
iex> {:ok, board} = Board.new(Board.Size.new(3, 3)) |> Board.spawn_snake("mysnek", Board.Point.new(1, 1))
iex> [snake | _] = board.snakes
iex> Board.snake_out_of_bounds?(board, snake)
false
iex> {:ok, board} = Board.new(Board.Size.new(3, 3)) |> Board.spawn_snake("mysnek", Board.Point.new(0, 3))
iex> [snake | _] = board.snakes
iex> Board.snake_out_of_bounds?(board, snake)
true
"""
@doc since: "0.1.0"
@spec snake_out_of_bounds?(t, Snake.t) :: boolean
def snake_out_of_bounds?(board, snake) do
Enum.any?(snake.body, fn bodypart ->
out_of_bounds?(board, bodypart)
end)
end
@doc """
Returns true if and only if `snake_a`'s head is in collision with any of
`snake_b`'s body parts, excluding `snake_b`'s head. Otherwise, returns false.
The two snake arguments are not commutative. One snake my collide with
another snake's body, and yet the other snake's head may not be in a
collision.
As such, head-to-head collisions are not detected this way. For that, use
`snake_loses_head_to_head_collision?/2` instead.
## Examples
iex> ids_and_heads = [{"snek0", Board.Point.new(1, 1)}, {"snek1", Board.Point.new(1, 3)}]
iex> {:ok, board0} = Board.new(Board.Size.small) |> Board.spawn_snakes(ids_and_heads)
iex> board1 = Board.move_snakes(board0, %{"snek0" => :down, "snek1" => :right})
iex> board2 = Board.move_snakes(board1, %{"snek0" => :down, "snek1" => :right})
iex> snek0 = board2.snakes |> Enum.find(&(&1.id == "snek0"))
iex> snek1 = board2.snakes |> Enum.find(&(&1.id == "snek1"))
iex> Board.snake_collides_with_other_snake?(snek0, snek1)
true
iex> Board.snake_collides_with_other_snake?(snek1, snek0)
false
"""
@doc since: "0.1.0"
@spec snake_collides_with_other_snake?(Snake.t, Snake.t) :: boolean
def snake_collides_with_other_snake?(snake_a, snake_b) do
case Snake.head(snake_a) do
nil -> false
head -> Enum.any?(Enum.drop(snake_b.body, 1), &(&1 == head))
end
end
@doc """
Returns true if and only if there is a head-to-head collision between
`snake_a` and `snake_b` and `snake_a`'s body length is shorter or equal to
`snake_b`'s body length, thereby causing `snake_a` to lose the head-to-head.
## Examples
iex> apple = Board.Point.new(1, 4)
iex> ids_and_heads = [{"snek0", Board.Point.new(1, 1)}, {"snek1", Board.Point.new(1, 5)}]
iex> {:ok, board0} = Board.new(Board.Size.small) |> Board.spawn_apple(apple)
iex> {:ok, board1} = Board.spawn_snakes(board0, ids_and_heads)
iex> board2 = Board.move_snakes(board1, %{"snek0" => :down, "snek1" => :up})
iex> board3 = Board.maybe_feed_snakes(board2)
iex> board4 = Board.move_snakes(board3, %{"snek0" => :down, "snek1" => :up})
iex> snek0 = board4.snakes |> Enum.find(&(&1.id == "snek0"))
iex> snek1 = board4.snakes |> Enum.find(&(&1.id == "snek1"))
iex> Board.snake_loses_head_to_head_collision?(snek0, snek1)
true
iex> Board.snake_loses_head_to_head_collision?(snek1, snek0)
false
"""
@doc since: "0.1.0"
@spec snake_loses_head_to_head_collision?(Snake.t, Snake.t) :: boolean
def snake_loses_head_to_head_collision?(snake_a, snake_b) do
if Snake.head(snake_a) == Snake.head(snake_b) do
length(snake_a.body) <= length(snake_b.body)
else
false
end
end
@doc """
Returns the number of snakes on the board who are still alive (not
eliminated).
## Examples
iex> apple = Board.Point.new(1, 4)
iex> ids_and_heads = [{"snek0", Board.Point.new(1, 1)}, {"snek1", Board.Point.new(1, 5)}]
iex> {:ok, board0} = Board.new(Board.Size.small) |> Board.spawn_apple(apple)
iex> {:ok, board1} = Board.spawn_snakes(board0, ids_and_heads)
iex> board2 = Board.move_snakes(board1, %{"snek0" => :down, "snek1" => :up})
iex> board3 = Board.maybe_feed_snakes(board2)
iex> board4 = Board.move_snakes(board3, %{"snek0" => :down, "snek1" => :up})
iex> board5 = Board.maybe_eliminate_snakes(board4)
iex> Board.alive_snakes_remaining(board5)
1
"""
@doc since: "0.1.0"
@spec alive_snakes_remaining(t) :: non_neg_integer
def alive_snakes_remaining(%Board{snakes: snakes}) do
snakes
|> Enum.filter(&Snake.alive?/1)
|> Enum.count
end
end
|
lib/snek/board.ex
| 0.931056 | 0.54462 |
board.ex
|
starcoder
|
defmodule ExIhdlSubscriptionBase.TrackedSchema do
@tracked_suffix "_last_sent_at"
def enforce_tracked_fields(%{action: :insert} = changeset, module, add_error, put_change) do
module_tracked_fields = tracked_fields(module)
changeset.changes
|> Enum.filter(fn {field, _} -> field in module_tracked_fields end)
|> Enum.reduce(changeset, fn {field, _}, cs ->
module_tracked_field_counterpart = field |> tracked_field_counterpart(module)
case Map.get(cs.changes, module_tracked_field_counterpart) do
nil ->
add_error.(cs, module_tracked_field_counterpart, "empty",
additional: "must record the time this data was sent"
)
last_sent_at ->
put_change.(
cs,
module_tracked_field_counterpart,
last_sent_at |> DateTime.truncate(:second)
)
end
end)
end
def enforce_tracked_fields(%{action: :update} = changeset, module, add_error, put_change) do
module_tracked_fields = tracked_fields(module)
changeset.changes
|> Enum.filter(fn {field, _} -> field in module_tracked_fields end)
|> Enum.reduce(changeset, fn {field, _}, cs ->
module_tracked_field_counterpart = field |> tracked_field_counterpart(module)
current_last_sent_at = cs.data |> Map.get(module_tracked_field_counterpart)
case Map.get(cs.changes, module_tracked_field_counterpart) do
nil ->
add_error.(cs, module_tracked_field_counterpart, "empty",
additional: "must record the time this data was sent"
)
new_last_sent_at ->
cond do
current_last_sent_at == nil or new_last_sent_at |> Timex.after?(current_last_sent_at) ->
put_change.(
cs,
module_tracked_field_counterpart,
new_last_sent_at |> DateTime.truncate(:second)
)
true ->
add_error.(cs, module_tracked_field_counterpart, "out_of_date",
additional: "this data is older than the existing data"
)
end
end
end)
end
def enforce_tracked_fields(changeset, _, _, _), do: changeset
def tracked_field_counterpart(field, struct_to_inspect) do
counterpart =
field
|> suffix_atom(@tracked_suffix)
case Map.has_key?(struct(struct_to_inspect), counterpart) do
true ->
counterpart
false ->
raise "couldn't find counterpart #{counterpart} to #{field} on #{
struct_to_inspect |> inspect()
}"
end
end
def tracked_fields(struct_to_inspect) do
for field <- last_sent_at_fields(struct_to_inspect),
into: [],
do:
field
|> Atom.to_string()
|> String.trim_trailing(@tracked_suffix)
|> String.to_atom()
end
def last_sent_at_fields(struct_to_inspect) do
struct_fields = struct_to_inspect |> Map.from_struct()
for {field, _} <- struct_fields, field |> ends_with?(@tracked_suffix), into: [], do: field
end
defp ends_with?(atom, suffix) do
atom |> Atom.to_string() |> String.ends_with?(suffix)
end
defp suffix_atom(atom, suffix) do
[
atom
|> Atom.to_string(),
suffix
]
|> Enum.join()
|> String.to_atom()
end
end
|
lib/ex_ihdl_subscription_base/tracked_schema.ex
| 0.68658 | 0.423696 |
tracked_schema.ex
|
starcoder
|
defmodule FLHook.Params do
@moduledoc """
A module that provides helpers to decode command response and event params.
"""
alias FLHook.Duration
alias FLHook.ParamError
alias FLHook.Utils
defstruct data: %{}
@type key :: atom | String.t()
@type data :: %{optional(String.t()) => String.t()}
@type t :: %__MODULE__{data: data}
@type param_type ::
:boolean | :duration | :integer | :float | :string | module
@doc false
@spec new(data) :: t
def new(data \\ %{}), do: %__MODULE__{data: data}
@doc false
@spec parse(String.t(), Keyword.t()) :: t
def parse(str, opts \\ []) when is_binary(str) do
str = String.trim_trailing(str, Utils.line_sep())
str_len = String.length(str)
spread = opts[:spread]
~r/(?<key>\w+)\=(?<value>\S+)/
|> Regex.scan(str, captures: [:key, :value], return: :index)
|> Enum.reduce_while(
%{},
fn [_, {key_idx, key_len}, {value_idx, value_len}], map ->
key = String.slice(str, key_idx, key_len)
if key == spread do
value = String.slice(str, value_idx, str_len - value_idx)
{:halt, Map.put(map, key, value)}
else
value = String.slice(str, value_idx, value_len)
{:cont, Map.put(map, key, value)}
end
end
)
|> new()
end
@doc """
Fetches multiple params with the specified keys from the params collection.
Optionally allows specification of a type to coerce the param to.
"""
@doc since: "0.3.0"
@spec pick(t, [key] | [{key, param_type}]) ::
{:ok, %{optional(key) => any}} | {:error, ParamError.t()}
def pick(%__MODULE__{} = params, keys_and_types)
when is_list(keys_and_types) do
Enum.reduce_while(keys_and_types, {:ok, %{}}, fn key_and_type, {:ok, map} ->
{key, type} = resolve_key_and_type(key_and_type)
case fetch(params, key, type) do
{:ok, value} -> {:cont, {:ok, Map.put(map, key, value)}}
error -> {:halt, error}
end
end)
end
defp resolve_key_and_type({key, type}), do: {key, type}
defp resolve_key_and_type(key), do: {key, :string}
@doc """
Puts multiple params with the specified keys from the params collection in the
given struct. Optionally allows specification of a type to coerce the param
to.
"""
@doc since: "0.3.0"
@spec pick_into(t, module | struct, [key] | [{key, param_type}]) ::
{:ok, struct} | {:error, ParamError.t()}
def pick_into(%__MODULE__{} = params, target, keys_and_types)
when is_list(keys_and_types) do
with {:ok, fields} <- pick(params, keys_and_types) do
{:ok, struct(target, fields)}
end
end
@doc """
Fetches the param with the specified key from the params collection.
Optionally allows specification of a type to coerce the param to.
"""
@spec fetch(t, key, param_type) :: {:ok, any} | {:error, ParamError.t()}
def fetch(params, key, type \\ :string)
def fetch(%__MODULE__{} = params, key, type) when is_atom(key) do
fetch(params, Atom.to_string(key), type)
end
def fetch(%__MODULE__{} = params, key, :boolean) do
with {:ok, value} <- fetch(params, key) do
{:ok, value in ["1", "yes", "enabled"]}
end
end
def fetch(%__MODULE__{} = params, key, :duration) do
fetch(params, key, Duration)
end
def fetch(%__MODULE__{} = params, key, :float) do
with {:ok, value} <- fetch(params, key),
{value, ""} <- Float.parse(value) do
{:ok, value}
else
_ -> {:error, %ParamError{key: key}}
end
end
def fetch(%__MODULE__{} = params, key, :integer) do
with {:ok, value} <- fetch(params, key),
{value, ""} <- Integer.parse(value) do
{:ok, value}
else
_ -> {:error, %ParamError{key: key}}
end
end
def fetch(%__MODULE__{data: data}, key, :string) do
with :error <- Map.fetch(data, key) do
{:error, %ParamError{key: key}}
end
end
def fetch(%__MODULE__{} = params, key, type_mod) when is_atom(type_mod) do
if Code.ensure_loaded?(type_mod) &&
function_exported?(type_mod, :parse, 1) do
with {:ok, value} <- fetch(params, key),
{:ok, value} <- type_mod.parse(value) do
{:ok, value}
else
_ -> {:error, %ParamError{key: key}}
end
else
{:error, %ParamError{key: key}}
end
end
@doc """
Fetches the param with the specified key from the params collection.
Optionally allows specification of a type to coerce the param to. Raises when
the param is missing or could not be coerced to the given type.
"""
@spec fetch!(t, key, param_type) :: any | no_return
def fetch!(%__MODULE__{} = params, key, type \\ :string) do
case fetch(params, key, type) do
{:ok, value} -> value
{:error, error} -> raise error
end
end
@doc """
Fetches a param as boolean from the params collection. Raises when the param
is missing or could not be coerced.
"""
@spec boolean!(t, key) :: boolean | no_return
def boolean!(%__MODULE__{} = params, key) do
fetch!(params, key, :boolean)
end
@doc """
Fetches a param as duration from the params collection. Raises when the param
is missing or could not be coerced.
"""
@spec duration!(t, key) :: Duration.t() | no_return
def duration!(%__MODULE__{} = params, key) do
fetch!(params, key, :duration)
end
@doc """
Fetches a param as float from the params collection. Raises when the param is
missing or could not be coerced.
"""
@spec float!(t, key) :: float | no_return
def float!(%__MODULE__{} = params, key) do
fetch!(params, key, :float)
end
@doc """
Fetches a param as integer from the params collection. Raises when the param
is missing or could not be coerced.
"""
@spec integer!(t, key) :: integer | no_return
def integer!(%__MODULE__{} = params, key) do
fetch!(params, key, :integer)
end
@doc """
Fetches a param as string from the params collection. Raises when the param is
missing or could not be coerced.
"""
@spec string!(t, key) :: String.t() | no_return
def string!(%__MODULE__{} = params, key) do
fetch!(params, key, :string)
end
@doc """
Converts the params to a plain map.
"""
@doc since: "0.3.0"
@spec to_map(t, key_style :: :string | :atom) :: map
def to_map(%__MODULE__{data: data}, key_style \\ :string) do
Map.new(data, fn {key, value} ->
{format_map_key(key, key_style), value}
end)
end
defp format_map_key(key, :atom), do: String.to_atom(key)
defp format_map_key(key, :string), do: key
end
|
lib/fl_hook/params.ex
| 0.864754 | 0.50061 |
params.ex
|
starcoder
|
defmodule BPXE.BPMN.JSON do
import BPXE.BPMN.Interpolation
defstruct value: nil, current: nil, characters: nil, keyed: false, interpolate: false
use ExConstructor
def prepare(%__MODULE__{interpolate: false, value: value}), do: value
def prepare(%__MODULE__{value: value}) do
fn cb ->
interpolate(value, cb)
end
end
def interpolate(value, cb) when is_function(value, 1) do
value.(cb)
end
def interpolate(value, cb) when is_list(value) do
value |> Enum.map(fn v -> interpolate(v, cb) end)
end
def interpolate(value, cb) when is_map(value) do
value |> Enum.map(fn {k, v} -> {k, interpolate(v, cb)} end) |> Map.new()
end
def interpolate(value, _cb), do: value
def handle_event(
:start_element,
{{_, "map"}, args},
%__MODULE__{current: nil} = state
)
when map_size(args) == 0 do
{:ok, %{state | value: %{}, current: []}}
end
def handle_event(
:start_element,
{{_, "map"}, args},
%__MODULE__{current: path, value: value} = state
)
when map_size(args) == 0 do
{:ok, %{state | value: update(value, path, %{})}}
end
def handle_event(
:start_element,
{{_, _} = element, %{"key" => key}},
%__MODULE__{current: path} = state
) do
handle_event(:start_element, {element, %{}}, %{state | current: [key | path], keyed: true})
end
def handle_event(
:end_element,
{_, _} = element,
%__MODULE__{keyed: true} = state
) do
handle_event(:end_element, element, %{state | keyed: false})
|> Result.map(fn state -> %{state | current: tl(state.current)} end)
end
def handle_event(
:end_element,
{_, "map"},
%__MODULE__{current: current} = state
)
when current == [] or is_nil(current) do
{:ok, %{state | current: nil}}
end
def handle_event(
:end_element,
{_, "map"},
%__MODULE__{current: [_ | path]} = state
) do
{:ok, %{state | current: path}}
end
def handle_event(
:start_element,
{{_, "array"}, _},
%__MODULE__{current: path, value: value} = state
) do
{:ok, %{state | value: update(value, path, [])}}
end
def handle_event(
:end_element,
{_, "array"},
%__MODULE__{value: value, current: path} = state
) do
{:ok, %{state | value: reverse(value, path)}}
end
def handle_event(
:start_element,
{{_, "number"}, _},
%__MODULE__{} = state
) do
{:ok, %{state | characters: ""}}
end
def handle_event(
:end_element,
{_, "number"},
%__MODULE__{value: value, current: path, characters: characters} = state
) do
case interpolate(characters) do
characters when is_binary(characters) ->
number =
case Integer.parse(characters) do
{int, ""} ->
int
{_, "." <> _} ->
case Float.parse(characters) do
{float, _} -> float
end
{int, _} ->
int
:error ->
:error
end
case number do
:error ->
{:error, {:invalid_number, characters}}
_ ->
{:ok, %{state | characters: nil, value: update(value, path, number)}}
end
f when is_function(f, 1) ->
{:ok, %{state | characters: nil, value: update(value, path, f), interpolate: true}}
end
end
def handle_event(
:start_element,
{{_, "string"}, _},
%__MODULE__{} = state
) do
{:ok, %{state | characters: ""}}
end
def handle_event(
:end_element,
{_, "string"},
%__MODULE__{value: value, current: path, characters: characters} = state
) do
case interpolate(characters) do
characters when is_binary(characters) ->
{:ok, %{state | characters: nil, value: update(value, path, characters)}}
f when is_function(f, 1) ->
{:ok,
%{
state
| characters: nil,
value: update(value, path, fn cb -> f.(cb) |> to_string() end),
interpolate: true
}}
end
end
def handle_event(
:start_element,
{{_, "boolean"}, _},
%__MODULE__{} = state
) do
{:ok, %{state | characters: ""}}
end
def handle_event(
:end_element,
{_, "boolean"},
%__MODULE__{value: value, current: path, characters: characters} = state
) do
case interpolate(characters) do
characters when is_binary(characters) ->
bool =
case characters |> String.trim() do
"true" -> true
"false" -> false
end
{:ok, %{state | characters: nil, value: update(value, path, bool)}}
f when is_function(f, 1) ->
{:ok, %{state | characters: nil, value: update(value, path, f), interpolate: true}}
end
end
def handle_event(
:start_element,
{{_, "null"}, _},
state
) do
{:ok, state}
end
def handle_event(
:end_element,
{_, "null"},
%__MODULE__{value: value, current: path} = state
) do
{:ok, %{state | value: update(value, path, nil)}}
end
def handle_event(
:characters,
chars,
%__MODULE__{characters: characters} = state
)
when not is_nil(characters) do
{:ok, %{state | characters: characters <> chars}}
end
def handle_event(
:characters,
_,
%__MODULE__{} = state
) do
{:ok, state}
end
defp reverse(value, nil), do: Enum.reverse(value)
defp reverse(value, []), do: Enum.reverse(value)
defp reverse(value, path), do: update_in(value, path, fn _ -> Enum.reverse(value) end)
defp update(list, nothing, new_value)
when (is_nil(nothing) or nothing == []) and is_list(list) do
[new_value | list]
end
defp update(_value, nothing, new_value) when is_nil(nothing) or nothing == [] do
new_value
end
defp update(value, path, new_value) do
update_in(value, path |> Enum.reverse(), fn
list when is_list(list) ->
[new_value | list]
_ ->
new_value
end)
end
end
|
lib/bpxe/bpmn/json.ex
| 0.759894 | 0.47025 |
json.ex
|
starcoder
|
defmodule Kitsune.Aws.Config do
@moduledoc """
This module is used to load the default credentials from one or many [Configuration Providers](configuration-providers.html)
The credentials are internally stored in a table in the [Erlang Term Storage](http://www.erlang.org/doc/man/ets.html).
Since the ETS for `:set` tables can insert and lookup in constant time, all operations in this module will also run in
constant time.
"""
defstruct [:access_key, :secret_key, :default_region]
@typedoc """
The struct that should be returned by every single [Configuration Provider](configuration-providers.html)
"""
@type t() :: %Kitsune.Aws.Config{access_key: String.t(), secret_key: String.t(), default_region: String.t()}
@doc """
Loads configuration from the specified providers
If no providers are specified (i.e., `providers == nil`), then `get_default_providers/0` is used instead.
This function returns the loaded configuration. In case of error, `nil` will be returned instead.
"""
@spec load([]) :: Kitsune.Aws.Config.t(), nil
def load(providers \\ nil) do
providers = providers || get_default_providers()
config = try do
Stream.map(providers, &load_provider/1)
|> Stream.filter(fn config -> config != nil end)
|> Enum.reduce(%Kitsune.Aws.Config{}, fn config, acc -> Map.merge(acc, config) end)
rescue
Enum.EmptyError -> nil
end
save(config)
end
@doc """
Returns the list of the default configuration providers
This function will always return the following list:
[Kitsune.Aws.ConfigProvider.ApplicationConfig, Kitsune.Aws.ConfigProvider.Environment]
"""
def get_default_providers(), do: [Kitsune.Aws.ConfigProvider.ApplicationConfig, Kitsune.Aws.ConfigProvider.Environment]
@doc """
Returns the AWS Access Key ID
If the configuration was not loaded or the access key ID was not provided in them, `nil` shall be returned.
"""
@spec get_access_key() :: String.t(), nil
def get_access_key(), do: get_var(:access_key)
@doc """
Returns the AWS Secret Access Key
If the configuration was not loaded or the secret access key was not provided in them, `nil` shall be returned.
"""
@spec get_secret_key() :: String.t(), nil
def get_secret_key(), do: get_var(:secret_key)
@doc """
Returns the default AWS region
If the configuration was not loaded or the region was not provided in them, `nil` shall be returned.
"""
@spec get_default_region() :: String.t(), nil
def get_default_region(), do: get_var(:default_region)
defp get_var(var) do
try do
case :ets.lookup(:kitsune_aws_config, var) do
[{^var, value}] -> value
_ -> nil
end
rescue
_ -> nil
end
end
defp save(config) do
if :ets.whereis(:kitsune_aws_config) == :undefined do
:ets.new :kitsune_aws_config, [:named_table]
end
:ets.insert :kitsune_aws_config,
secret_key: config.secret_key,
access_key: config.access_key,
default_region: config.default_region
config
end
defp load_provider(provider) do
try do
apply(provider, :load, [])
rescue
_ -> nil
end
end
end
|
apps/kitsune_aws_core/lib/kitsune/aws/config.ex
| 0.811713 | 0.480296 |
config.ex
|
starcoder
|
defmodule Day17 do
@moduledoc """
--- Day 17: Spinlock ---
Suddenly, whirling in the distance, you notice what looks like a massive, pixelated hurricane: a deadly spinlock.
This spinlock isn't just consuming computing power, but memory, too; vast, digital mountains are being ripped from
the ground and consumed by the vortex.
If you don't move quickly, fixing that printer will be the least of your problems.
This spinlock's algorithm is simple but efficient, quickly consuming everything in its path. It starts with a circular
buffer containing only the value 0, which it marks as the current position. It then steps forward through the circular
buffer some number of steps (your puzzle input) before inserting the first new value, 1, after the value it stopped
on. The inserted value becomes the current position. Then, it steps forward from there the same number of steps, and
wherever it stops, inserts after it the second new value, 2, and uses that as the new current position again.
It repeats this process of stepping forward, inserting a new value, and using the location of the inserted value as
the new current position a total of 2017 times, inserting 2017 as its final operation, and ending with a total of 2018
values (including 0) in the circular buffer.
For example, if the spinlock were to step 3 times per insert, the circular buffer would begin to evolve like this
(using parentheses to mark the current position after each iteration of the algorithm):
(0), the initial state before any insertions.
0 (1): the spinlock steps forward three times (0, 0, 0), and then inserts the first value, 1, after it. 1 becomes the
current position.
0 (2) 1: the spinlock steps forward three times (0, 1, 0), and then inserts the second value, 2, after it. 2 becomes
the current position.
0 2 (3) 1: the spinlock steps forward three times (1, 0, 2), and then inserts the third value, 3, after it. 3 becomes
the current position.
And so on:
0 2 (4) 3 1
0 (5) 2 4 3 1
0 5 2 4 3 (6) 1
0 5 (7) 2 4 3 6 1
0 5 7 2 4 3 (8) 6 1
0 (9) 5 7 2 4 3 8 6 1
Eventually, after 2017 insertions, the section of the circular buffer near the last insertion looks like this:
1512 1134 151 (2017) 638 1513 851
Perhaps, if you can identify the value that will ultimately be after the last value written (2017), you can
short-circuit the spinlock. In this example, that would be 638.
What is the value after 2017 in your completed circular buffer?
Your puzzle input is 370.
--- Part Two ---
The spinlock does not short-circuit. Instead, it gets more angry. At least, you assume that's what happened; it's
spinning significantly faster than it was a moment ago.
You have good news and bad news.
The good news is that you have improved calculations for how to stop the spinlock. They indicate that you actually
need to identify the value after 0 in the current state of the circular buffer.
The bad news is that while you were determining this, the spinlock has just finished inserting its fifty millionth
value (50000000).
What is the value after 0 the moment 50000000 is inserted?
Your puzzle input is still 370.
"""
def part_b do
hd(create_spinlock_at_0(370, 0, 0, {0,50000000}, []))
end
def part_a do
{result, _} = create_spinlock(370, 0, {0,2017},[])
result
end
def test_a do
{result, _} = create_spinlock(3, 0, {0,2017}, [])
result
end
defp create_spinlock_at_0(_skips, _pos, _listlength, {max, max}, acc) do
acc
end
defp create_spinlock_at_0(skips, pos, listlength, {num, max}, acc) do
newpos=true_pos(listlength, pos+skips+1)
case newpos == 0 or newpos == 1 do
true ->
create_spinlock_at_0(skips, newpos, listlength+1, {num+1, max}, [num|acc])
false ->
create_spinlock_at_0(skips, newpos, listlength+1, {num+1, max}, acc)
end
end
defp create_spinlock(skips, pos, {max,max}, acc) do
{Enum.at(acc, pos+skips+1), acc}
end
defp create_spinlock(skips, pos, {num, max}, acc) do
{newpos, newlist} = spin_insert(acc, pos+skips+1, num)
create_spinlock(skips, newpos, {num+1, max}, newlist)
end
defp spin_insert(list, at, val) do
pos = true_pos(length(list), at)
{pos, List.insert_at(list, pos, val)}
end
defp true_pos(length, _pos) when length == 0 do
0
end
defp true_pos(length, pos) when pos <= length do
pos
end
defp true_pos(length, pos) do
true_pos(length, pos-length)
end
end
|
lib/day17.ex
| 0.633864 | 0.793106 |
day17.ex
|
starcoder
|
defmodule Day10 do
@moduledoc """
You come across some programs that are trying to implement a software emulation of a hash based on knot-tying. The
hash these programs are implementing isn't very strong, but you decide to help them anyway. You make a mental note to
remind the Elves later not to invent their own cryptographic functions.
This hash function simulates tying a knot in a circle of string with 256 marks on it. Based on the input to be hashed,
the function repeatedly selects a span of string, brings the ends together, and gives the span a half-twist to reverse
the order of the marks within it. After doing this many times, the order of the marks is used to build the resulting
hash.
4--5 pinch 4 5 4 1
/ \ 5,0,1 / \/ \ twist / \ / \
3 0 --> 3 0 --> 3 X 0
\ / \ /\ / \ / \ /
2--1 2 1 2 5
To achieve this, begin with a list of numbers from 0 to 255, a current position which begins at 0 (the first element
in the list), a skip size (which starts at 0), and a sequence of lengths (your puzzle input). Then, for each length:
Reverse the order of that length of elements in the list, starting with the element at the current position.
Move the current position forward by that length plus the skip size.
Increase the skip size by one.
The list is circular; if the current position and the length try to reverse elements beyond the end of the list, the
operation reverses using as many extra elements as it needs from the front of the list. If the current position moves
past the end of the list, it wraps around to the front. Lengths larger than the size of the list are invalid.
Here's an example using a smaller list:
Suppose we instead only had a circular list containing five elements, 0, 1, 2, 3, 4, and were given input lengths of
3, 4, 1, 5.
The list begins as [0] 1 2 3 4 (where square brackets indicate the current position).
The first length, 3, selects ([0] 1 2) 3 4 (where parentheses indicate the sublist to be reversed).
After reversing that section (0 1 2 into 2 1 0), we get ([2] 1 0) 3 4.
Then, the current position moves forward by the length, 3, plus the skip size, 0: 2 1 0 [3] 4. Finally, the skip size
increases to 1.
The second length, 4, selects a section which wraps: 2 1) 0 ([3] 4.
The sublist 3 4 2 1 is reversed to form 1 2 4 3: 4 3) 0 ([1] 2.
The current position moves forward by the length plus the skip size, a total of 5, causing it not to move because it
wraps around: 4 3 0 [1] 2. The skip size increases to 2.
The third length, 1, selects a sublist of a single element, and so reversing it has no effect.
The current position moves forward by the length (1) plus the skip size (2): 4 [3] 0 1 2. The skip size increases to 3.
The fourth length, 5, selects every element starting with the second: 4) ([3] 0 1 2. Reversing this sublist (3 0 1 2
4 into 4 2 1 0 3) produces: 3) ([4] 2 1 0.
Finally, the current position moves forward by 8: 3 4 2 1 [0]. The skip size increases to 4.
In this example, the first two numbers in the list end up being 3 and 4; to check the process, you can multiply them
together to produce 12.
However, you should instead use the standard list size of 256 (with values 0 to 255) and the sequence of lengths in
your puzzle input. Once this process is complete, what is the result of multiplying the first two numbers in the list?
--- Part Two ---
The logic you've constructed forms a single round of the Knot Hash algorithm; running the full thing requires many of
these rounds. Some input and output processing is also required.
First, from now on, your input should be taken not as a list of numbers, but as a string of bytes instead. Unless
otherwise specified, convert characters to bytes using their ASCII codes. This will allow you to handle arbitrary
ASCII strings, and it also ensures that your input lengths are never larger than 255. For example, if you are given
1,2,3, you should convert it to the ASCII codes for each character: 49,44,50,44,51.
Once you have determined the sequence of lengths to use, add the following lengths to the end of the sequence: 17, 31,
73, 47, 23. For example, if you are given 1,2,3, your final sequence of lengths should be 49,44,50,44,51,17,31,73,47,
23 (the ASCII codes from the input string combined with the standard length suffix values).
Second, instead of merely running one round like you did above, run a total of 64 rounds, using the same length
sequence in each round. The current position and skip size should be preserved between rounds. For example, if the
previous example was your first round, you would start your second round with the same length sequence (3, 4, 1, 5,
17, 31, 73, 47, 23, now assuming they came from ASCII codes and include the suffix), but start with the previous
round's current position (4) and skip size (4).
Once the rounds are complete, you will be left with the numbers from 0 to 255 in some order, called the sparse hash.
Your next task is to reduce these to a list of only 16 numbers called the dense hash. To do this, use numeric bitwise
XOR to combine each consecutive block of 16 numbers in the sparse hash (there are 16 such blocks in a list of 256
numbers). So, the first element in the dense hash is the first sixteen elements of the sparse hash XOR'd together,
the second element in the dense hash is the second sixteen elements of the sparse hash XOR'd together, etc.
For example, if the first sixteen elements of your sparse hash are as shown below, and the XOR operator is ^, you
would calculate the first output number like this:
65 ^ 27 ^ 9 ^ 1 ^ 4 ^ 3 ^ 40 ^ 50 ^ 91 ^ 7 ^ 6 ^ 0 ^ 2 ^ 5 ^ 68 ^ 22 = 64
Perform this operation on each of the sixteen blocks of sixteen numbers in your sparse hash to determine the sixteen
numbers in your dense hash.
Finally, the standard way to represent a Knot Hash is as a single hexadecimal string; the final output is the dense
hash in hexadecimal notation. Because each number in your dense hash will be between 0 and 255 (inclusive), always
represent each number as two hexadecimal digits (including a leading zero as necessary). So, if your first three
numbers are 64, 7, 255, they correspond to the hexadecimal numbers 40, 07, ff, and so the first six characters of the
hash would be 4007ff. Because every Knot Hash is sixteen such numbers, the hexadecimal representation is always 32
hexadecimal digits (0-f) long.
Here are some example hashes:
The empty string becomes a2582a3a0e66e6e86e3812dcb672a272.
AoC 2017 becomes 33efeb34ea91902bb2f59c9920caa6cd.
1,2,3 becomes 3efbe78a8d82f29979031a4aa0b16a9d.
1,2,4 becomes 63960835bcdc130f0b66d7ff4f6a5a8e.
Treating your puzzle input as a string of ASCII characters, what is the Knot Hash of your puzzle input? Ignore any
leading or trailing whitespace you might encounter.
"""
use Bitwise
def common_part_a(file) do
File.read!(file) |>
String.split(",") |>
Enum.map(&String.to_integer/1)
end
def test_a do
{_,_,[c1,c2|_]} = common_part_a("res/day10_test.input") |>
# last param is {current_pos, skip_size}
wrap_hash(Enum.to_list(0..4), {0,0})
c1*c2
end
def test_b do
"a2582a3a0e66e6e86e3812dcb672a272"=test_b('')
"33efeb34ea91902bb2f59c9920caa6cd"=test_b('AoC 2017')
"3efbe78a8d82f29979031a4aa0b16a9d"=test_b('1,2,3')
"63960835bcdc130f0b66d7ff4f6a5a8e"=test_b('1,2,4')
:pass
end
def test_b(inlist) do
{_,_,list64} = List.foldl(Enum.to_list(0..63), {0,0,Enum.to_list(0..255)},
fn(_current, {pos, skip, list}) ->
wrap_hash(Enum.concat(inlist, [17, 31, 73, 47, 23]), list, {pos, skip})
end)
Enum.chunk_every(list64, 16) |>
Enum.map(fn(current_list) -> Enum.reduce(current_list, fn(elm, acc) -> bxor(elm,acc) end) end) |>
List.foldr("",fn(x,acc) -> Enum.join([integer_to_hex(x),acc]) end) |>
String.downcase
end
def help_day14(inlist) do
{_,_,list64} = List.foldl(Enum.to_list(0..63), {0,0,Enum.to_list(0..255)},
fn(_current, {pos, skip, list}) ->
wrap_hash(Enum.concat(inlist, [17, 31, 73, 47, 23]), list, {pos, skip})
end)
Enum.chunk_every(list64, 16) |>
Enum.map(fn(current_list) -> Enum.reduce(current_list, fn(elm, acc) -> bxor(elm,acc) end) end) |>
List.foldr("",fn(x,acc) -> Enum.join([integer_to_bin(x),acc]) end)
end
def part_a do
{_,_,[c1,c2|_]} = common_part_a("res/day10.input") |>
# last param is {current_pos, skip_size}
wrap_hash(Enum.to_list(0..255), {0,0})
c1*c2
end
def part_b do
File.read!("res/day10.input") |>
String.to_charlist |>
test_b
end
defp wrap_hash([], l, {current_pos, skip_size}) do
{rem(current_pos, length(l)), rem(skip_size,length(l)), l}
end
defp wrap_hash([num|t], hash, {current_pos, skip_size}) do
wrap_hash(t, reverse_slice_wrap(hash, current_pos, num), {current_pos+num+skip_size, skip_size+1})
end
defp reverse_slice_wrap(list,start,count) when length(list) >= start+count do
Enum.reverse_slice(list, start, count)
end
defp reverse_slice_wrap(list,start,count) when length(list) <= start do
reverse_slice_wrap(list, start-length(list), count)
end
defp reverse_slice_wrap(list,start,count) do
# calculate extra chars that need to be rotated
{over_start, over_count} = {length(list), (start+count)-length(list)}
# first add prefix of list to the end for rotation
longlist=Enum.concat(list, Enum.slice(list, 0, over_count)) |>
Enum.reverse_slice(start, count)
# replace prefix with reveresed end
Enum.concat(
Enum.slice(longlist, over_start, over_count),
Enum.slice(longlist, over_count, over_start-over_count))
end
defp integer_to_hex(x) when x < 16 do
'0'++Integer.to_charlist(x,16)
end
defp integer_to_hex(x) do
Integer.to_charlist(x,16)
end
def integer_to_bin(x) when x < 2 do
'0000000'++Integer.to_charlist(x,2)
end
def integer_to_bin(x) when x < 4 do
'000000'++Integer.to_charlist(x,2)
end
def integer_to_bin(x) when x < 8 do
'00000'++Integer.to_charlist(x,2)
end
def integer_to_bin(x) when x < 16 do
'0000'++Integer.to_charlist(x,2)
end
def integer_to_bin(x) when x < 32 do
'000'++Integer.to_charlist(x,2)
end
def integer_to_bin(x) when x < 64 do
'00'++Integer.to_charlist(x,2)
end
def integer_to_bin(x) when x < 128 do
'0'++Integer.to_charlist(x,2)
end
def integer_to_bin(x) do
Integer.to_charlist(x,2)
end
end
|
lib/day10.ex
| 0.760384 | 0.876687 |
day10.ex
|
starcoder
|
defmodule Polylens do
@moduledoc """
Functions for using Polylenses to manipulate and query data
"""
import ProtocolEx
import Kernel, except: [get_in: 2, update_in: 3]
defprotocol_ex Lens do
@moduledoc """
The protocol_ex around which Polylens is based.
Uses 2-tuples to fake multiple dispatch
"""
# @type lens() :: term()
# @spec get({lens(), term()}) :: {:ok, term()} | {:error, term()}
@doc """
Gets an item within the data
"""
def get({lens, data})
# @spec set({lens(), term()}, term()) :: {:ok, term()} | {:error, term()}
@doc """
Sets an item within the data
"""
def set({lens, data}, value)
# @spec update({lens(), term()}, (term() -> term())) :: {:ok, term()} | {:error, term()}
@doc """
Updates an item within the data with the provided function
"""
def update(self, func) do
with {:ok, val} <- get(self),
do: set(self, func.(val))
end
end
alias Polylens.Lens
@doc "Gets an item within the data"
def get(lens, data), do: Lens.get({lens, data})
@doc "Sets an item within the data"
def set(lens, data, value), do: Lens.set({lens, data}, value)
@doc "Updates an item within the data with the provided function"
def update(lens, data, func), do: Lens.update({lens, data}, func)
@doc "Like get, but returns the untupled value or throws"
def get!(lens, data) do
{:ok, ret} = get(lens, data)
ret
end
@doc "Like set, but returns the untupled value or throws"
def set!(lens, data, value) do
{:ok, ret} = set(lens, data, value)
ret
end
@doc "Like update, but returns the untupled value or throws"
def update!(lens, data, value) do
{:ok, ret} = update(lens, data, value)
ret
end
@doc "Like get, but takes a list of lenses to get nested data"
def get_in(lenses, data) do
ret = Enum.reduce_while(lenses, data, fn lens, data ->
case get(lens, data) do
{:ok, data} -> {:cont, data}
r -> {:halt, r}
end
end)
case ret do
err = {:error, _} -> err
other -> {:ok, other}
end
end
@doc "Like set, but takes a list of lenses to set nested data"
def set_in(lenses, data, value), do: set_in_h_in(lenses, [], data, value)
defp set_in_h_in([], path, _, value), do: set_in_h_out(path, value)
defp set_in_h_in([lens | lenses], path, data, value) do
case get(lens, data) do
{:ok, new_data} -> set_in_h_in(lenses, [ {lens, data} | path ], new_data, value)
{:error, reason} -> {:error, {reason, path}}
end
end
defp set_in_h_out([], value), do: {:ok, value}
defp set_in_h_out([ {lens, data} | path ], value) do
case set(lens, data, value) do
{:ok, val} -> set_in_h_out(path, val)
{:error, reason} -> {:error, {reason, path}}
end
end
@doc "Like update, but takes a list of lenses to update nested data"
def update_in(lenses, data, func), do: update_in_h_in(lenses, [], data, func)
defp update_in_h_in([], path, data, func), do: set_in_h_out(path, func.(data))
defp update_in_h_in([lens | lenses], path, data, func) do
case get(lens, data) do
{:ok, new_data} -> update_in_h_in(lenses, [ {lens, data} | path ], new_data, func)
{:error, reason} -> {:error, {reason, path}}
end
end
@doc "Like get_in, but returns untupled and throws if not ok"
def get_in!(lenses, data) do
{:ok, ret} = get_in(lenses, data)
ret
end
@doc "Like set_in, but returns untupled and throws if not ok"
def set_in!(lenses, data, value) do
{:ok, ret} = set_in(lenses, data, value)
ret
end
@doc "Like update, but returns untupled and throws if not ok"
def update_in!(lenses, data, value) do
{:ok, ret} = update_in(lenses, data, value)
ret
end
end
|
lib/polylens.ex
| 0.781706 | 0.577078 |
polylens.ex
|
starcoder
|
defmodule Blockchain.Block do
@moduledoc """
This module effectively encodes a block, the heart of the blockchain.
A chain is formed when blocks point to previous blocks,
either as a parent or an ommer (uncle).
For more information, see Section 4.3 of the Yellow Paper.
"""
alias Block.Header
alias Blockchain.BlockGetter
alias Blockchain.BlockSetter
alias Blockchain.{Account, Chain, Transaction}
alias Blockchain.Account.Repo
alias Blockchain.Block.HolisticValidity
alias Blockchain.Transaction.Receipt
alias Blockchain.Transaction.Receipt.Bloom
alias Blockchain.Transaction.Signature
alias ExthCrypto.Hash.Keccak
alias MerklePatriciaTree.{DB, Trie}
alias MerklePatriciaTree.TrieStorage
# Defined in Eq.(19)
# block_hash: Hash for this block, acts simply as a cache,
# header: B_H,
# transactions: B_T,
# ommers: B_U
# metadata: precomputated data required by JSON RPC spec
defstruct block_hash: nil,
header: %Header{},
transactions: [],
receipts: [],
ommers: []
@type t :: %__MODULE__{
block_hash: EVM.hash() | nil,
header: Header.t(),
transactions: [Transaction.t()] | [],
receipts: [Receipt.t()] | [],
ommers: [Header.t()] | []
}
@block_reward_ommer_divisor 32
@block_reward_ommer_offset 8
@doc """
Encodes a block such that it can be represented in RLP encoding.
This is defined as `L_B` Eq.(35) in the Yellow Paper.
## Examples
iex> Blockchain.Block.serialize(%Blockchain.Block{
...> header: %Block.Header{parent_hash: <<1::256>>, ommers_hash: <<2::256>>, beneficiary: <<3::160>>, state_root: <<4::256>>, transactions_root: <<5::256>>, receipts_root: <<6::256>>, logs_bloom: <<>>, difficulty: 5, number: 1, gas_limit: 5, gas_used: 3, timestamp: 6, extra_data: "Hi mom", mix_hash: <<7::256>>, nonce: <<8::64>>},
...> transactions: [%Blockchain.Transaction{nonce: 5, gas_price: 6, gas_limit: 7, to: <<1::160>>, value: 8, v: 27, r: 9, s: 10, data: "hi"}],
...> ommers: [%Block.Header{parent_hash: <<11::256>>, ommers_hash: <<12::256>>, beneficiary: <<13::160>>, state_root: <<14::256>>, transactions_root: <<15::256>>, receipts_root: <<16::256>>, logs_bloom: <<>>, difficulty: 5, number: 1, gas_limit: 5, gas_used: 3, timestamp: 6, extra_data: "Hi mom", mix_hash: <<17::256>>, nonce: <<18::64>>}]
...> })
[
[<<1::256>>, <<2::256>>, <<3::160>>, <<4::256>>, <<5::256>>, <<6::256>>, <<>>, 5, 1, 5, 3, 6, "Hi mom", <<7::256>>, <<8::64>>],
[[<<5>>, <<6>>, <<7>>, <<1::160>>, <<8>>, "hi", <<27>>, <<9>>, <<10>>]],
[[<<11::256>>, <<12::256>>, <<13::160>>, <<14::256>>, <<15::256>>, <<16::256>>, <<>>, 5, 1, 5, 3, 6, "Hi mom", <<17::256>>, <<18::64>>]]
]
iex> Blockchain.Block.serialize(%Blockchain.Block{})
[
[
nil,
<<29, 204, 77, 232, 222, 199, 93, 122, 171, 133, 181, 103, 182, 204, 212, 26, 211, 18, 69, 27, 148, 138, 116, 19, 240, 161, 66, 253, 64, 212, 147, 71>>,
nil,
<<86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33>>,
<<86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33>>,
<<86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33>>,
<<0::2048>>,
nil,
nil,
0,
0,
nil,
"",
<<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0>>,
<<0, 0, 0, 0, 0, 0, 0, 0>>
],
[],
[]
]
"""
@spec serialize(t) :: ExRLP.t()
def serialize(block) do
[
# L_H(B_H)
Header.serialize(block.header),
# L_T(B_T)*
Enum.map(block.transactions, &Transaction.serialize/1),
# L_H(B_U)*
Enum.map(block.ommers, &Header.serialize/1)
]
end
@doc """
Decodes a block from an RLP encoding. Effectively inverts
L_B defined in Eq.(35).
## Examples
iex> Blockchain.Block.deserialize([
...> [<<1::256>>, <<2::256>>, <<3::160>>, <<4::256>>, <<5::256>>, <<6::256>>, <<>>, <<5>>, <<1>>, <<5>>, <<3>>, <<6>>, "Hi mom", <<7::256>>, <<8::64>>],
...> [[<<5>>, <<6>>, <<7>>, <<1::160>>, <<8>>, "hi", <<27>>, <<9>>, <<10>>]],
...> [[<<1fc00:db20:35b:7399::556>>, <<12::256>>, <<13::160>>, <<14::256>>, <<15::256>>, <<16::256>>, <<>>, <<5>>, <<1>>, <<5>>, <<3>>, <<6>>, "Hi mom", <<17::256>>, <<18::64>>]]
...> ])
%Blockchain.Block{
header: %Block.Header{parent_hash: <<1::256>>, ommers_hash: <<2::256>>, beneficiary: <<3::160>>, state_root: <<4::256>>, transactions_root: <<5::256>>, receipts_root: <<6::256>>, logs_bloom: <<>>, difficulty: 5, number: 1, gas_limit: 5, gas_used: 3, timestamp: 6, extra_data: "Hi mom", mix_hash: <<7::256>>, nonce: <<8::64>>},
transactions: [%Blockchain.Transaction{nonce: 5, gas_price: 6, gas_limit: 7, to: <<1::160>>, value: 8, v: 27, r: 9, s: 10, data: "hi"}],
ommers: [%Block.Header{parent_hash: <<11::256>>, ommers_hash: <<12::256>>, beneficiary: <<13::160>>, state_root: <<14::256>>, transactions_root: <<15::256>>, receipts_root: <<16::256>>, logs_bloom: <<>>, difficulty: 5, number: 1, gas_limit: 5, gas_used: 3, timestamp: 6, extra_data: "Hi mom", mix_hash: <<17::256>>, nonce: <<18::64>>}]
}
"""
@spec deserialize(ExRLP.t()) :: t
def deserialize(rlp) do
[
header,
transactions,
ommers
] = rlp
%__MODULE__{
header: Header.deserialize(header),
transactions: Enum.map(transactions, &Transaction.deserialize/1),
ommers: Enum.map(ommers, &Header.deserialize/1)
}
end
@spec decode_rlp(binary()) :: {:ok, [ExRLP.t()]} | {:error, any()}
def decode_rlp("0x" <> hex_data) do
hex_binary = Base.decode16!(hex_data, case: :mixed)
decode_rlp(hex_binary)
rescue
e ->
{:error, e}
end
def decode_rlp(rlp) when is_binary(rlp) do
rlp |> ExRLP.decode() |> decode_rlp()
rescue
e ->
{:error, e}
end
def decode_rlp(rlp_result_list) do
{:ok, deserialize(rlp_result_list)}
rescue
e ->
{:error, e}
end
@doc """
Computes hash of a block, which is simply the hash of the serialized
block after applying RLP encoding.
This is defined in Eq.(37) of the Yellow Paper.
## Examples
iex> %Blockchain.Block{header: %Block.Header{number: 5, parent_hash: <<1, 2, 3>>, beneficiary: <<2, 3, 4>>, difficulty: 100, timestamp: 11, mix_hash: <<1>>, nonce: <<2>>}}
...> |> Blockchain.Block.hash()
<<78, 28, 127, 10, 192, 253, 127, 239, 254, 179, 39, 34, 245, 44, 152, 98, 128, 71, 238, 155, 100, 161, 199, 71, 243, 223, 172, 191, 74, 99, 128, 63>>
iex> %Blockchain.Block{header: %Block.Header{number: 0, parent_hash: <<1, 2, 3>>, beneficiary: <<2, 3, 4>>, difficulty: 100, timestamp: 11, mix_hash: <<1>>, nonce: <<2>>}}
...> |> Blockchain.Block.hash()
<<218, 225, 46, 241, 196, 160, 136, 96, 109, 216, 73, 167, 92, 174, 91, 228, 85, 112, 234, 129, 99, 200, 158, 61, 223, 166, 165, 132, 187, 24, 142, 193>>
"""
@spec hash(t) :: EVM.hash()
def hash(block), do: Header.hash(block.header)
@doc """
Fetches the block hash for a block, either by calculating the block hash
based on the block data, or returning the block hash from the block's struct.
## Examples
iex> %Blockchain.Block{header: %Block.Header{number: 5, parent_hash: <<1, 2, 3>>, beneficiary: <<2, 3, 4>>, difficulty: 100, timestamp: 11, mix_hash: <<1>>, nonce: <<2>>}}
...> |> Blockchain.Block.fetch_block_hash()
<<78, 28, 127, 10, 192, 253, 127, 239, 254, 179, 39, 34, 245, 44, 152, 98, 128, 71, 238, 155, 100, 161, 199, 71, 243, 223, 172, 191, 74, 99, 128, 63>>
iex> %Blockchain.Block{block_hash: <<5::256>>, header: %Block.Header{number: 5, parent_hash: <<1, 2, 3>>, beneficiary: <<2, 3, 4>>, difficulty: 100, timestamp: 11, mix_hash: <<1>>, nonce: <<2>>}}
...> |> Blockchain.Block.fetch_block_hash()
<<5::256>>
"""
@spec fetch_block_hash(t()) :: EVM.hash()
def fetch_block_hash(block) do
case block.block_hash do
nil -> hash(block)
block_hash -> block_hash
end
end
@doc """
If a block already has a hash, returns the same unchanged, but if the block
hash has not yet been calculated, returns the block with the block hash
stored in the struct.
## Examples
iex> %Blockchain.Block{header: %Block.Header{number: 5, parent_hash: <<1, 2, 3>>, beneficiary: <<2, 3, 4>>, difficulty: 100, timestamp: 11, mix_hash: <<1>>, nonce: <<2>>}}
...> |> Blockchain.Block.with_hash()
%Blockchain.Block{
block_hash: <<78, 28, 127, 10, 192, 253, 127, 239, 254, 179, 39, 34, 245, 44, 152, 98, 128, 71, 238, 155, 100, 161, 199, 71, 243, 223, 172, 191, 74, 99, 128, 63>>,
header: %Block.Header{
number: 5,
parent_hash: <<1, 2, 3>>,
beneficiary: <<2, 3, 4>>,
difficulty: 100,
timestamp: 11,
mix_hash: <<1>>,
nonce: <<2>>
}
}
iex> %Blockchain.Block{block_hash: <<5::256>>, header: %Block.Header{number: 5, parent_hash: <<1, 2, 3>>, beneficiary: <<2, 3, 4>>, difficulty: 100, timestamp: 11, mix_hash: <<1>>, nonce: <<2>>}}
...> |> Blockchain.Block.with_hash()
%Blockchain.Block{
block_hash: <<5::256>>,
header: %Block.Header{
number: 5,
parent_hash: <<1, 2, 3>>,
beneficiary: <<2, 3, 4>>,
difficulty: 100,
timestamp: 11,
mix_hash: <<1>>,
nonce: <<2>>
}
}
"""
@spec with_hash(t()) :: t()
def with_hash(block) do
%{block | block_hash: fetch_block_hash(block)}
end
@doc """
Stores a given block in the database and returns the block hash.
This should be used if we ever want to retrieve that block in
the future.
Note: Blocks are identified by a hash of the block header,
thus we will only get the same block back if the header
matches what we stored.
## Examples
iex> trie = MerklePatriciaTree.Test.random_ets_db() |> MerklePatriciaTree.Trie.new()
iex> block = %Blockchain.Block{header: %Block.Header{number: 5, parent_hash: <<1, 2, 3>>, beneficiary: <<2, 3, 4>>, difficulty: 100, timestamp: 11, mix_hash: <<1>>, nonce: <<2>>}}
iex> {:ok, {hash, _}} = Blockchain.Block.put_block(block, trie)
iex> hash
<<78, 28, 127, 10, 192, 253, 127, 239, 254, 179, 39, 34, 245, 44, 152, 98, 128, 71, 238, 155, 100, 161, 199, 71, 243, 223, 172, 191, 74, 99, 128, 63>>
"""
@spec put_block(t, TrieStorage.t(), binary() | nil) :: {:ok, {EVM.hash(), TrieStorage.t()}}
def put_block(block, trie, precomputated_hash \\ nil) do
block_with_metadata = add_metadata(block, trie, precomputated_hash)
block_hash = block_with_metadata.block_hash
block_bin = :erlang.term_to_binary(block_with_metadata)
updated_trie =
trie
|> TrieStorage.put_raw_key!(block_hash, block_bin)
|> TrieStorage.put_raw_key!(
block_hash_key(block.header.number),
block_hash
)
|> store_transaction_locations_by_hash(block_with_metadata)
{:ok, {block_hash, updated_trie}}
end
@doc """
Returns a given block from the database, if the hash
exists in the database.
See `Blockchain.Block.put_block/2` for details.
"""
@spec get_block(EVM.hash() | integer(), TrieStorage.t()) :: {:ok, t} | :not_found
def get_block(block_hash, trie) when is_binary(block_hash) do
with {:ok, block_bin} <- TrieStorage.get_raw_key(trie, block_hash) do
block = :erlang.binary_to_term(block_bin)
{:ok, block}
end
end
def get_block(block_number, trie) when is_integer(block_number) do
with {:ok, hash} <- get_block_hash_by_number(trie, block_number) do
get_block(hash, trie)
end
end
def get_block(nil, _), do: :not_found
@spec block_hash_key(integer()) :: String.t()
defp block_hash_key(number) do
"hash_for_#{number}"
end
@doc """
Returns the parent node for a given block, if it exists.
We assume a block is a genesis block if it does not have
a valid `parent_hash` set.
## Examples
iex> Blockchain.Block.get_parent_block(%Blockchain.Block{header: %Block.Header{number: 0}}, nil)
:genesis
iex> trie = MerklePatriciaTree.Test.random_ets_db() |> MerklePatriciaTree.Trie.new()
iex> block = %Blockchain.Block{header: %Block.Header{number: 5, parent_hash: <<1, 2, 3>>, beneficiary: <<2, 3, 4>>, difficulty: 100, timestamp: 11, mix_hash: <<1>>, nonce: <<2>>}}
iex> Blockchain.Block.put_block(block, trie)
iex> Blockchain.Block.get_parent_block(%Blockchain.Block{header: %Block.Header{parent_hash: block |> Blockchain.Block.hash}}, trie)
{:ok, %Blockchain.Block{block_hash: <<78, 28, 127, 10, 192, 253, 127, 239, 254, 179, 39, 34, 245, 44, 152, 98, 128, 71, 238, 155, 100, 161, 199, 71, 243, 223, 172, 191, 74, 99, 128, 63>>, header: %Block.Header{number: 5, parent_hash: <<1, 2, 3>>, beneficiary: <<2, 3, 4>>, difficulty: 100, timestamp: 11, mix_hash: <<1>>, nonce: <<2>>, size: 415, total_difficulty: 100}}}
iex> db = MerklePatriciaTree.Test.random_ets_db()
iex> block = %Blockchain.Block{header: %Block.Header{number: 5, parent_hash: <<1, 2, 3>>, beneficiary: <<2, 3, 4>>, difficulty: 100, timestamp: 11, mix_hash: <<1>>, nonce: <<2>>}}
iex> Blockchain.Block.get_parent_block(%Blockchain.Block{header: %Block.Header{parent_hash: block |> Blockchain.Block.hash}}, MerklePatriciaTree.Trie.new(db))
:not_found
"""
@spec get_parent_block(t, TrieStorage.t()) :: {:ok, t} | :genesis | :not_found
def get_parent_block(block, trie) do
case block.header.number do
0 -> :genesis
_ -> get_block(block.header.parent_hash, trie)
end
end
@doc """
Returns the total number of transactions
included in a block. This is based on the
transaction list for a given block.
## Examples
iex> Blockchain.Block.get_transaction_count(%Blockchain.Block{transactions: [%Blockchain.Transaction{}, %Blockchain.Transaction{}]})
2
"""
@spec get_transaction_count(t) :: integer()
def get_transaction_count(block), do: Enum.count(block.transactions)
@doc """
Returns a given receipt from a block. This is
based on the receipts root where all receipts
are stored for the given block.
## Examples
iex> trie = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
iex> {updated_block, _new_trie} = Blockchain.Block.put_receipt(%Blockchain.Block{}, 6, %Blockchain.Transaction.Receipt{state: <<1, 2, 3>>, cumulative_gas: 10, bloom_filter: <<2, 3, 4>>, logs: []}, trie)
iex> {updated_block, _new_trie} = Blockchain.Block.put_receipt(updated_block, 7, %Blockchain.Transaction.Receipt{state: <<4, 5, 6>>, cumulative_gas: 11, bloom_filter: <<5, 6, 7>>, logs: []}, trie)
iex> Blockchain.Block.get_receipt(updated_block, 6, trie.db)
%Blockchain.Transaction.Receipt{state: <<1, 2, 3>>, cumulative_gas: 10, bloom_filter: <<2, 3, 4>>, logs: []}
iex> trie = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
iex> {new_block, new_trie} = Blockchain.Block.put_receipt(%Blockchain.Block{}, 6, %Blockchain.Transaction.Receipt{state: <<1, 2, 3>>, cumulative_gas: 10, bloom_filter: <<2, 3, 4>>, logs: []}, trie)
iex> Blockchain.Block.get_receipt(new_block, 7, new_trie.db)
nil
"""
@spec get_receipt(t, integer(), DB.db()) :: Receipt.t() | nil
def get_receipt(block, i, db) do
serialized_receipt =
db
|> Trie.new(block.header.receipts_root)
|> Trie.get_key(i |> ExRLP.encode())
case serialized_receipt do
nil ->
nil
_ ->
serialized_receipt
|> ExRLP.decode()
|> Receipt.deserialize()
end
end
@spec get_receipt_by_transaction_hash(binary(), TrieStorage.t()) ::
{Receipt.t() | nil, Transaction.t() | nil, t()} | nil
def get_receipt_by_transaction_hash(transaction_hash, trie) do
case get_transaction_by_hash(transaction_hash, trie, true) do
{transaction, block} ->
location_key = transaction_key(transaction_hash)
case TrieStorage.get_raw_key(trie, location_key) do
{:ok, location_bin} ->
{_block_hash, transaction_index} = :erlang.binary_to_term(location_bin)
receipt = Enum.at(block.receipts, transaction_index)
{receipt, transaction, block}
_ ->
nil
end
_ ->
nil
end
end
@doc """
Returns a given transaction from a block. This is
based on the transactions root where all transactions
are stored for the given block.
"""
@spec get_transaction(t, integer(), DB.db()) :: Transaction.t() | nil
def get_transaction(block, i, db) do
encoded_transaction_number = ExRLP.encode(i)
serialized_transaction =
db
|> Trie.new(block.header.transactions_root)
|> Trie.get_key(encoded_transaction_number)
case serialized_transaction do
nil ->
nil
_ ->
serialized_transaction
|> ExRLP.decode()
|> Transaction.deserialize()
end
end
@spec get_transaction_by_hash(binary(), TrieStorage.t(), boolean()) ::
Transaction.t() | {Transaction.t(), t()} | nil
def get_transaction_by_hash(transaction_hash, trie, with_block \\ false) do
location_key = transaction_key(transaction_hash)
case TrieStorage.get_raw_key(trie, location_key) do
{:ok, location_bin} ->
{block_hash, transaction_index} = :erlang.binary_to_term(location_bin)
case get_block(block_hash, trie) do
{:ok, block} ->
if with_block do
{Enum.at(block.transactions, transaction_index), block}
else
Enum.at(block.transactions, transaction_index)
end
:not_found ->
nil
end
_ ->
nil
end
end
@doc """
Returns the cumulative gas used by a block based on the
listed transactions. This is defined in largely in the
note after Eq.(66) referenced as l(B_R)_u, or the last
receipt's cumulative gas.
The receipts aren't directly included in the block, so
we'll need to pull it from the receipts root.
Note: this will case if we do not have a receipt for
the most recent transaction.
## Examples
iex> trie = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
iex> {updated_block, new_trie} = %Blockchain.Block{transactions: [1,2,3,4,5,6,7]}
...> |> Blockchain.Block.put_receipt(6, %Blockchain.Transaction.Receipt{state: <<1, 2, 3>>, cumulative_gas: 10, bloom_filter: <<2, 3, 4>>, logs: []}, trie)
iex> {updated_block, new_trie} = Blockchain.Block.put_receipt(updated_block, 7, %Blockchain.Transaction.Receipt{state: <<4, 5, 6>>, cumulative_gas: 11, bloom_filter: <<5, 6, 7>>, logs: []}, new_trie)
iex> Blockchain.Block.get_cumulative_gas(updated_block, new_trie.db)
11
iex> trie = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
iex> {updated_block, new_trie} = %Blockchain.Block{transactions: [1,2,3,4,5,6]}
...> |> Blockchain.Block.put_receipt(6, %Blockchain.Transaction.Receipt{state: <<1, 2, 3>>, cumulative_gas: 10, bloom_filter: <<2, 3, 4>>, logs: []}, trie)
iex> {updated_block, _new_trie} = Blockchain.Block.put_receipt(updated_block, 7, %Blockchain.Transaction.Receipt{state: <<4, 5, 6>>, cumulative_gas: 11, bloom_filter: <<5, 6, 7>>, logs: []}, new_trie)
...> Blockchain.Block.get_cumulative_gas(updated_block, trie.db)
10
iex> trie = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
iex> %Blockchain.Block{}
...> |> Blockchain.Block.get_cumulative_gas(trie.db)
0
iex> trie = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
iex> {updated_block, new_trie} = %Blockchain.Block{transactions: [1,2,3,4,5,6,7,8]}
...> |> Blockchain.Block.put_receipt(6, %Blockchain.Transaction.Receipt{state: <<1, 2, 3>>, cumulative_gas: 10, bloom_filter: <<2, 3, 4>>, logs: []}, trie)
iex> {updated_block, _new_trie} = Blockchain.Block.put_receipt(updated_block, 7, %Blockchain.Transaction.Receipt{state: <<4, 5, 6>>, cumulative_gas: 11, bloom_filter: <<5, 6, 7>>, logs: []}, new_trie)
iex> Blockchain.Block.get_cumulative_gas(updated_block, trie.db)
** (RuntimeError) cannot find receipt
"""
@spec get_cumulative_gas(t, atom()) :: EVM.Gas.t()
def get_cumulative_gas(block = %__MODULE__{}, db) do
case get_transaction_count(block) do
0 ->
0
i ->
case get_receipt(block, i, db) do
nil -> raise "cannot find receipt"
receipt -> receipt.cumulative_gas
end
end
end
@doc """
Creates a new block from a parent block. This will handle setting
the block number, the difficulty and will keep the `gas_limit` the
same as the parent's block unless specified in `opts`.
A timestamp is required for difficulty calculation.
If it's not specified, it will default to the current system time.
This function is not directly addressed in the Yellow Paper.
## Examples
iex> %Blockchain.Block{header: %Block.Header{parent_hash: <<0::256>>, beneficiary: <<5::160>>, state_root: <<1::256>>, number: 100_000, difficulty: 15_500_0000, timestamp: 5_000_000, gas_limit: 500_000}}
...> |> Blockchain.Block.gen_child_block(Blockchain.Test.ropsten_chain(), timestamp: 5010000, extra_data: "hi", beneficiary: <<5::160>>)
%Blockchain.Block{
header: %Block.Header{
state_root: <<1::256>>,
beneficiary: <<5::160>>,
number: 100_001,
difficulty: 147_507_383,
timestamp: 5_010_000,
gas_limit: 500_000,
extra_data: "hi",
parent_hash: <<141, 203, 173, 190, 43, 64, 71, 106, 211, 77, 254, 89, 58, 72, 3, 108, 6, 101, 232, 254, 10, 149, 244, 245, 102, 5, 55, 235, 198, 39, 66, 227>>
}
}
iex> %Blockchain.Block{header: %Block.Header{parent_hash: <<0::256>>, beneficiary: <<5::160>>, state_root: <<1::256>>, number: 100_000, difficulty: 1_500_0000, timestamp: 5000, gas_limit: 500_000}}
...> |> Blockchain.Block.gen_child_block(Blockchain.Test.ropsten_chain(), state_root: <<2::256>>, timestamp: 6010, extra_data: "hi", beneficiary: <<5::160>>)
%Blockchain.Block{
header: %Block.Header{
state_root: <<2::256>>,
beneficiary: <<5::160>>,
number: 100_001,
difficulty: 142_74_924,
timestamp: 6010,
gas_limit: 500_000,
extra_data: "hi",
parent_hash: <<233, 151, 241, 216, 121, 36, 187, 39, 42, 93, 8, 68, 162, 118, 84, 219, 140, 35, 220, 90, 118, 129, 76, 45, 249, 55, 241, 82, 181, 30, 22, 128>>
}
}
"""
@spec gen_child_block(t, Chain.t(), keyword()) :: t
def gen_child_block(parent_block, chain, opts \\ []) do
gas_limit = get_opts_property(opts, :gas_limit, parent_block.header.gas_limit)
header = gen_child_header(parent_block, opts)
%__MODULE__{header: header}
|> BlockSetter.set_block_number(parent_block)
|> BlockSetter.set_block_difficulty(chain, parent_block)
|> BlockSetter.set_block_gas_limit(chain, parent_block, gas_limit)
|> BlockSetter.set_block_parent_hash(parent_block)
end
@spec gen_child_header(t, keyword()) :: Header.t()
defp gen_child_header(parent_block, opts) do
timestamp = get_opts_property(opts, :timestamp, System.system_time(:second))
beneficiary = get_opts_property(opts, :beneficiary, nil)
extra_data = get_opts_property(opts, :extra_data, <<>>)
state_root = get_opts_property(opts, :state_root, parent_block.header.state_root)
mix_hash = get_opts_property(opts, :mix_hash, parent_block.header.mix_hash)
%Header{
state_root: state_root,
timestamp: timestamp,
extra_data: extra_data,
beneficiary: beneficiary,
mix_hash: mix_hash
}
end
@doc """
Attaches an ommer to a block. We do no validation at this stage.
## Examples
iex> Blockchain.Block.add_ommers(%Blockchain.Block{}, [%Block.Header{parent_hash: <<1::256>>, ommers_hash: <<2::256>>, beneficiary: <<3::160>>, state_root: <<4::256>>, transactions_root: <<5::256>>, receipts_root: <<6::256>>, logs_bloom: <<>>, difficulty: 5, number: 1, gas_limit: 5, gas_used: 3, timestamp: 6, extra_data: "Hi mom", mix_hash: <<7::256>>, nonce: <<8::64>>}])
%Blockchain.Block{
ommers: [
%Block.Header{parent_hash: <<1::256>>, ommers_hash: <<2::256>>, beneficiary: <<3::160>>, state_root: <<4::256>>, transactions_root: <<5::256>>, receipts_root: <<6::256>>, logs_bloom: <<>>, difficulty: 5, number: 1, gas_limit: 5, gas_used: 3, timestamp: 6, extra_data: "Hi mom", mix_hash: <<7::256>>, nonce: <<8::64>>}
],
header: %Block.Header{
ommers_hash: <<59, 196, 156, 242, 196, 38, 21, 97, 112, 6, 73, 111, 12, 88, 35, 155, 72, 175, 82, 0, 163, 128, 115, 236, 45, 99, 88, 62, 88, 80, 122, 96>>
}
}
"""
@spec add_ommers(t, [Header.t()]) :: t
def add_ommers(block, ommers) do
total_ommers = block.ommers ++ ommers
serialized_ommers_list = Enum.map(total_ommers, &Block.Header.serialize/1)
new_ommers_hash = serialized_ommers_list |> ExRLP.encode() |> Keccak.kec()
%{block | ommers: total_ommers, header: %{block.header | ommers_hash: new_ommers_hash}}
end
@doc """
Gets an ommer for a given block, based on the ommers_hash.
## Examples
iex> %Blockchain.Block{}
...> |> Blockchain.Block.add_ommers([%Block.Header{parent_hash: <<1::256>>, ommers_hash: <<2::256>>, beneficiary: <<3::160>>, state_root: <<4::256>>, transactions_root: <<5::256>>, receipts_root: <<6::256>>, logs_bloom: <<>>, difficulty: 5, number: 1, gas_limit: 5, gas_used: 3, timestamp: 6, extra_data: "Hi mom", mix_hash: <<7::256>>, nonce: <<8::64>>}])
...> |> Blockchain.Block.get_ommer(0)
%Block.Header{parent_hash: <<1::256>>, ommers_hash: <<2::256>>, beneficiary: <<3::160>>, state_root: <<4::256>>, transactions_root: <<5::256>>, receipts_root: <<6::256>>, logs_bloom: <<>>, difficulty: 5, number: 1, gas_limit: 5, gas_used: 3, timestamp: 6, extra_data: "Hi mom", mix_hash: <<7::256>>, nonce: <<8::64>>}
"""
@spec get_ommer(t, integer()) :: Header.t()
def get_ommer(block, i) do
Enum.at(block.ommers, i)
end
@doc """
Checks the validity of a block, including the validity of the
header and the transactions. This should verify that we should
accept the authenticity of a block.
## Examples
iex> trie = MerklePatriciaTree.Test.random_ets_db() |> MerklePatriciaTree.Trie.new()
iex> chain = Blockchain.Test.ropsten_chain()
iex> {updated_block, _new_trie} = Blockchain.Genesis.create_block(chain, trie)
iex> {updated_block, _new_trie} = Blockchain.Block.add_rewards(updated_block, trie, chain)
iex> {status, _} = Blockchain.Block.validate(updated_block, chain, nil, trie)
iex> status
:valid
iex> trie = MerklePatriciaTree.Test.random_ets_db() |> MerklePatriciaTree.Trie.new()
iex> chain = Blockchain.Test.ropsten_chain()
iex> {parent, _} = Blockchain.Genesis.create_block(chain, trie)
iex> child = Blockchain.Block.gen_child_block(parent, chain)
iex> Blockchain.Block.validate(child, chain, :parent_not_found, trie)
{:invalid, [:non_genesis_block_requires_parent]}
iex> trie = MerklePatriciaTree.Test.random_ets_db() |> MerklePatriciaTree.Trie.new()
iex> chain = Blockchain.Test.ropsten_chain()
iex> {parent, _} = Blockchain.Genesis.create_block(chain, trie)
iex> beneficiary = <<0x0fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b>>
iex> {child, _} = Blockchain.Block.gen_child_block(parent, chain, beneficiary: beneficiary)
...> |> Blockchain.Block.add_rewards(trie, chain)
iex> {status, _} = Blockchain.Block.validate(child, chain, parent, trie)
iex> status
:valid
"""
@spec validate(t, Chain.t(), t, TrieStorage.t()) ::
{:valid, TrieStorage.t()} | {:invalid, [atom()]}
def validate(block, chain, parent_block, db) do
with :valid <- validate_parent_block(block, parent_block),
:valid <- validate_header(block, parent_block, chain) do
HolisticValidity.validate(block, chain, parent_block, db)
end
end
defp validate_header(block, parent_block, chain) do
expected_difficulty = BlockGetter.get_difficulty(block, parent_block, chain)
parent_block_header = if parent_block, do: parent_block.header, else: nil
validate_dao_extra_data =
Chain.support_dao_fork?(chain) &&
Chain.within_dao_fork_extra_range?(chain, block.header.number)
Header.validate(
block.header,
parent_block_header,
expected_difficulty,
chain.params[:gas_limit_bound_divisor],
chain.params[:min_gas_limit],
validate_dao_extra_data
)
end
defp validate_parent_block(block, parent_block) do
if block.header.number > 0 and parent_block == :parent_not_found do
{:invalid, [:non_genesis_block_requires_parent]}
else
:valid
end
end
@doc """
For a given block, this will add the given transactions to its
list of transaction and update the header state accordingly. That
is, we will execute each transaction and update the state root,
transaction receipts, etc. We effectively implement Eq.(2), Eq.(3)
and Eq.(4) of the Yellow Paper, referred to as Π.
The trie db refers to where we expect our trie to exist, e.g.
in `:ets` or `:rocksdb`. See `MerklePatriciaTree.DB`.
"""
@spec add_transactions(t, [Transaction.t()], DB.db(), Chain.t()) :: t
def add_transactions(block, transactions, trie, chain) do
{updated_block, updated_trie} = process_hardfork_specifics(block, chain, trie)
{updated_block, updated_trie} =
do_add_transactions(updated_block, transactions, updated_trie, chain)
updated_block = calculate_logs_bloom(updated_block)
{updated_block, updated_trie}
end
defp process_hardfork_specifics(block, chain, trie) do
if Chain.support_dao_fork?(chain) && Chain.dao_fork?(chain, block.header.number) do
repo =
trie
|> TrieStorage.set_root_hash(block.header.state_root)
|> Account.Repo.new()
|> Blockchain.Hardfork.Dao.execute(chain)
updated_block = put_state(block, repo.state)
{updated_block, repo.state}
else
{block, trie}
end
end
@spec do_add_transactions(t, [Transaction.t()], DB.db(), Chain.t(), integer()) ::
{t, TrieStorage.t()}
defp do_add_transactions(block, transactions, state, chain, trx_count \\ 0)
defp do_add_transactions(block, [], trie, _, _), do: {block, trie}
defp do_add_transactions(
block = %__MODULE__{header: header},
[trx | transactions],
trie,
chain,
trx_count
) do
state = TrieStorage.set_root_hash(trie, header.state_root)
{new_account_repo, gas_used, receipt} =
Transaction.execute_with_validation(state, trx, header, chain)
new_state = Repo.commit(new_account_repo).state
total_gas_used = block.header.gas_used + gas_used
receipt = %{receipt | cumulative_gas: total_gas_used}
updated_block =
block
|> put_state(new_state)
|> put_gas_used(total_gas_used)
{updated_block, updated_state} = put_receipt(updated_block, trx_count, receipt, new_state)
{updated_block, updated_state} = put_transaction(updated_block, trx_count, trx, updated_state)
do_add_transactions(updated_block, transactions, updated_state, chain, trx_count + 1)
end
@spec calculate_logs_bloom(t()) :: t()
defp calculate_logs_bloom(block) do
logs_bloom = Bloom.from_receipts(block.receipts)
updated_header = %{block.header | logs_bloom: logs_bloom}
%{block | header: updated_header}
end
# Updates a block to have a new state root given a state object
@spec put_state(t, Trie.t()) :: t
def put_state(block = %__MODULE__{header: header = %Header{}}, new_state) do
root_hash = TrieStorage.root_hash(new_state)
%{block | header: %{header | state_root: root_hash}}
end
# Updates a block to have total gas used set in the header
@spec put_gas_used(t, EVM.Gas.t()) :: t
def put_gas_used(block = %__MODULE__{header: header}, gas_used) do
%{block | header: %{header | gas_used: gas_used}}
end
@doc """
Updates a block by adding a receipt to the list of receipts
at position `i`.
## Examples
iex> trie = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
iex> {block, _} = Blockchain.Block.put_receipt(%Blockchain.Block{}, 5, %Blockchain.Transaction.Receipt{state: <<1, 2, 3>>, cumulative_gas: 10, bloom_filter: <<2, 3, 4>>, logs: "hi mom"}, trie)
iex> MerklePatriciaTree.Trie.into(block.header.receipts_root, trie)
...> |> MerklePatriciaTree.Trie.Inspector.all_values()
[{<<5>>, <<208, 131, 1, 2, 3, 10, 131, 2, 3, 4, 134, 104, 105, 32, 109, 111, 109>>}]
"""
@spec put_receipt(t, integer(), Receipt.t(), TrieStorage.t()) :: {t, TrieStorage.t()}
def put_receipt(block, i, receipt, trie) do
encoded_receipt = receipt |> Receipt.serialize() |> ExRLP.encode()
{subtrie, updated_trie} =
TrieStorage.update_subtrie_key(
trie,
block.header.receipts_root,
ExRLP.encode(i),
encoded_receipt
)
updated_receipts_root = TrieStorage.root_hash(subtrie)
updated_header = %{block.header | receipts_root: updated_receipts_root}
updated_receipts = block.receipts ++ [receipt]
{%{block | header: updated_header, receipts: updated_receipts}, updated_trie}
end
@doc """
Updates a block by adding a transaction to the list of transactions
and updating the transactions_root in the header at position `i`, which
should be equilvant to the current number of transactions.
## Examples
iex> trie = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
iex> {block, _new_trie} = Blockchain.Block.put_transaction(%Blockchain.Block{}, 0, %Blockchain.Transaction{nonce: 1, v: 2, r: 3, s: 4}, trie)
iex> block.transactions
[%Blockchain.Transaction{nonce: 1, v: 2, r: 3, s: 4}]
iex> MerklePatriciaTree.Trie.into(block.header.transactions_root, trie)
...> |> MerklePatriciaTree.Trie.Inspector.all_values()
[{<<0x80>>, <<201, 1, 128, 128, 128, 128, 128, 2, 3, 4>>}]
"""
@spec put_transaction(t, integer(), Transaction.t(), TrieStorage.t()) :: {t, TrieStorage.t()}
def put_transaction(block, i, trx, trie) do
total_transactions = block.transactions ++ [trx]
encoded_transaction = trx |> Transaction.serialize() |> ExRLP.encode()
{subtrie, updated_trie} =
TrieStorage.update_subtrie_key(
trie,
block.header.transactions_root,
ExRLP.encode(i),
encoded_transaction
)
updated_transactions_root = TrieStorage.root_hash(subtrie)
{%{
block
| transactions: total_transactions,
header: %{block.header | transactions_root: updated_transactions_root}
}, updated_trie}
end
@doc """
Adds the rewards to miners (including for ommers) to a block.
This is defined in Section 11.3, Eq.(159-163) of the Yellow Paper.
## Examples
iex> db = MerklePatriciaTree.Test.random_ets_db()
iex> miner = <<0x05::160>>
iex> chain = Blockchain.Test.ropsten_chain()
iex> state = MerklePatriciaTree.Trie.new(db)
...> |> Blockchain.Account.put_account(miner, %Blockchain.Account{balance: 400_000})
iex> block = %Blockchain.Block{header: %Block.Header{number: 0, state_root: state.root_hash, beneficiary: miner}}
iex> {updated_block, _new_trie} =
...> block
...> |> Blockchain.Block.add_rewards(MerklePatriciaTree.Trie.new(db), chain)
iex> updated_block
...> |> Blockchain.BlockGetter.get_state(MerklePatriciaTree.Trie.new(db))
...> |> Blockchain.Account.get_accounts([miner])
[%Blockchain.Account{balance: 400_000}]
iex> db = MerklePatriciaTree.Test.random_ets_db()
iex> miner = <<0x05::160>>
iex> chain = Blockchain.Test.ropsten_chain()
iex> state = MerklePatriciaTree.Trie.new(db)
...> |> Blockchain.Account.put_account(miner, %Blockchain.Account{balance: 400_000})
iex> block = %Blockchain.Block{header: %Block.Header{state_root: state.root_hash, beneficiary: miner}}
iex> {updated_block, updated_trie} = Blockchain.Block.add_rewards(block, state, chain)
iex> updated_block
...> |> Blockchain.BlockGetter.get_state(updated_trie)
...> |> Blockchain.Account.get_accounts([miner])
[%Blockchain.Account{balance: 2000000000000400000}]
"""
@spec add_rewards(t, TrieStorage.t(), Chain.t()) :: {t, TrieStorage.t()}
def add_rewards(block, trie, chain)
def add_rewards(%{header: %{beneficiary: beneficiary}}, _trie, _chain)
when is_nil(beneficiary),
do: raise("Unable to add block rewards, beneficiary is nil")
def add_rewards(block = %{header: %{number: number}}, trie, _chain)
when number == 0,
do: {block, trie}
def add_rewards(block, trie, chain) do
base_reward = Chain.block_reward_for_block(chain, block.header.number)
state =
block
|> BlockGetter.get_state(trie)
|> add_miner_reward(block, base_reward)
|> add_ommer_rewards(block, base_reward)
updated_block = BlockSetter.set_state(block, state)
{updated_block, state}
end
@spec add_metadata(t(), TrieStorage.t(), binary() | nil) :: t()
def add_metadata(block, trie, predefined_hash \\ nil) do
block_rlp_size =
block
|> serialize
|> ExRLP.encode()
|> byte_size()
total_difficulty =
case get_block(block.header.parent_hash, trie) do
{:ok, parent_block} ->
parent_block.header.total_difficulty + block.header.difficulty
_ ->
block.header.difficulty
end
hash = if predefined_hash, do: predefined_hash, else: hash(block)
updated_block = %{block.header | size: block_rlp_size, total_difficulty: total_difficulty}
%{block | block_hash: hash, header: updated_block}
end
defp add_miner_reward(state, block, base_reward) do
ommer_reward = round(base_reward * length(block.ommers) / @block_reward_ommer_divisor)
reward = ommer_reward + base_reward
Account.add_wei(state, block.header.beneficiary, reward)
end
defp add_ommer_rewards(state, block, base_reward) do
Enum.reduce(block.ommers, state, fn ommer, state ->
height_difference = block.header.number - ommer.number
reward =
round(
(@block_reward_ommer_offset - height_difference) *
(base_reward / @block_reward_ommer_offset)
)
Account.add_wei(state, ommer.beneficiary, reward)
end)
end
defp get_block_hash_by_number(trie, block_number) do
TrieStorage.get_raw_key(trie, block_hash_key(block_number))
end
defp get_opts_property(opts, property, default) do
case Keyword.get(opts, property, nil) do
nil -> default
property_value -> property_value
end
end
defp store_transaction_locations_by_hash(trie, block) do
block.transactions
|> Enum.with_index()
|> Enum.reduce(trie, fn {transaction, idx}, trie_acc ->
transaction_key =
transaction
|> Signature.transaction_hash()
|> transaction_key()
location = :erlang.term_to_binary({block.block_hash, idx})
TrieStorage.put_raw_key!(trie_acc, transaction_key, location)
end)
end
defp transaction_key(hash) do
hash_hex = Base.encode16(hash, case: :lower)
"transaction_#{hash_hex}"
end
end
|
apps/blockchain/lib/blockchain/block.ex
| 0.859708 | 0.451689 |
block.ex
|
starcoder
|
defmodule Indicado.RSI do
@moduledoc """
This is the RSI module used for calculating Relative Strength Index
"""
@doc """
Calculates RSI for the list. It needs list of numbers and the length of
list argument should at least be 1 more than period.
Returns `{:ok, rsi_list}` or `{:error, reason}`
## Examples
iex> Indicado.RSI.eval([1, 2, 3, 4, 5, 6], 2)
{:ok, [100.0, 100.0, 100.0, 100.0]}
iex> Indicado.RSI.eval([5, 4, 3, 2, 1, 0], 4)
{:ok, [0.0, 0.0]}
iex> Indicado.RSI.eval([2, 4, 6, 7, 2, 1, 5, 10], 3)
{:ok, [100.0, 37.5, 14.285714285714292, 39.99999999999999, 90.0]}
iex> Indicado.RSI.eval([], 2)
{:error, :not_enough_data}
iex> Indicado.RSI.eval([1, 5, 10], 3)
{:error, :not_enough_data}
iex> Indicado.RSI.eval([1, 4], 0)
{:error, :bad_period}
"""
@spec eval(nonempty_list(list), pos_integer) :: {:ok, nonempty_list(float) | {:error, atom}}
def eval(list, period), do: calc(list, period)
@doc """
Calculates RSI for the list. It needs list of numbers and the length of
list argument should at least be 1 more than period.
Raises `NotEnoughDataError` if the given list is not longh enough for calculating RSI.
Raises `BadPeriodError` if period is an unacceptable number.
## Examples
iex> Indicado.RSI.eval!([1, 3, 5, 7], 2)
[100.0, 100.0]
iex> Indicado.RSI.eval!([1, 3], 3)
** (NotEnoughDataError) not enough data
iex> Indicado.RSI.eval!([1, 3, 4], 0)
** (BadPeriodError) bad period
"""
@spec eval!(nonempty_list(list), pos_integer) :: nonempty_list(float) | no_return
def eval!(list, period) do
case calc(list, period) do
{:ok, result} -> result
{:error, :not_enough_data} -> raise NotEnoughDataError
{:error, :bad_period} -> raise BadPeriodError
end
end
defp calc(list, period, results \\ [])
defp calc([], _period, []), do: {:error, :not_enough_data}
defp calc(_list, period, _results) when period < 1, do: {:error, :bad_period}
defp calc([], _period, results), do: {:ok, Enum.reverse(results)}
defp calc([_head | tail] = list, period, results) when length(list) < period + 1 do
calc(tail, period, results)
end
defp calc([_head | tail] = list, period, results) do
averages =
list
|> Enum.take(period + 1)
|> Enum.chunk_every(2, 1, :discard)
|> Enum.map(fn [x, y] -> y - x end)
|> Enum.group_by(fn x -> if x > 0, do: :gain, else: :loss end)
|> Map.new(fn
{type, []} -> {type, nil}
{type, values} -> {type, Enum.sum(values) / period}
end)
|> Map.put_new(:loss, 0.0)
|> Map.put_new(:gain, 0.0)
if averages.loss == 0 do
calc(tail, period, [100.0 | results])
else
rs = averages.gain / abs(averages.loss)
rsi = 100.0 - 100.0 / (1.0 + rs)
calc(tail, period, [rsi | results])
end
end
end
|
lib/indicado/rsi.ex
| 0.90539 | 0.620694 |
rsi.ex
|
starcoder
|
defmodule Algorithms.Sorting.SelectionSort do
@moduledoc """
Implementation of SelectionSort algorithm (https://en.wikipedia.org/wiki/Selection_sort)
You will be given an array of numbers, you have to sort numbers in ascending order
using selection sort algorithm.
The algorithm divides the input list into two parts:
- A sorted sublist of items which is built up from left to right at the front (left) of the list
- A sublist of the remaining unsorted items that occupy the rest of the list
Initially, the sorted sublist is empty and the unsorted sublist is the entire input list.
The algorithm proceeds by finding the smallest (or largest, depending on sorting order)
element in the unsorted sublist, exchanging (swapping) it with the leftmost unsorted
element (putting it in sorted order), and moving the sublist boundaries one element to the right.
Example:
Input: [11, 25, 12, 22, 64]
| Sorted sublist | Unsorted sublist | Least element in unsorted list
| [] | [11, 25, 12, 22, 64] | 11
| [11] | [25, 12, 22, 64] | 12
| [11, 12] | [25, 22, 64] | 22
| [11, 12, 22] | [25, 64] | 25
| [11, 12, 22, 25] | [64] | 64
| [11, 12, 22, 25, 64]| [] |
Complexity: O(n^2)
"""
require Integer
@doc """
take a List and return the List with the numbers ordered
"""
@spec selection_sort(list(Integer)) :: list(Integer)
def selection_sort(list) do
do_selection(list, [])
end
def do_selection([head | []], acc) do
acc ++ [head]
end
def do_selection(list, acc) do
min = min(list)
do_selection(:lists.delete(min, list), acc ++ [min])
end
defp min([first | [second | []]]) do
smaller(first, second)
end
defp min([first | [second | tail]]) do
min([smaller(first, second) | tail])
end
defp smaller(e1, e2) do
if e1 <= e2 do
e1
else
e2
end
end
end
|
lib/sorting/selection_sort.ex
| 0.79546 | 0.790975 |
selection_sort.ex
|
starcoder
|
defmodule Unicode.Category.QuoteMarks do
@moduledoc """
Functions to return codepoints that form quotation marks. These
marks are taken from the [Wikipedia definition](https://en.wikipedia.org/wiki/Quotation_mark)
which is more expansive than the Unicode categories [Pi](https://www.compart.com/en/unicode/category/Pi)
and [Pf](https://www.compart.com/en/unicode/category/Pf).
The full list of codepoints considered to be quote marks is tabled here.
## Unicode code point table
These are codepoints noted in the Unicode character data base with the flag
`quotation mark = yes`. These are equivalent to the unicode sets `Pi` and `Pf`.
| Glyph | Code | Unicode name | HTML | Comments |
| ----- | ---- | ------------ | ---- | -------- |
| \u0022 | U+0022 | Quotation mark | &quot; | Typewriter ("programmer's") quote, ambidextrous. Also known as "double quote".
| \u0027 | U+0027 | Apostrophe | &#39; | Typewriter ("programmer's") straight single quote, ambidextrous
| \u00AB | U+00AB | Left-pointing double angle quotation mark | &laquo; | Double angle quote
| \u00BB | U+00BB | Right-pointing double angle quotation mark | &raquo; | Double angle quote, right
| \u2018 | U+2018 | Left single quotation mark | &lsquo; | Single curved quote, left. Also known as ''inverted [[comma]]'' or ''turned comma''
| \u2019 | U+2019 | Right single quotation mark | &rsquo; | Single curved quote, right
| \u201A | U+201A | Single low-9 quotation mark | &sbquo; | Low single curved quote, left
| \u201B | U+201B | Single high-reversed-9 quotation mark | &#8219; | also called ''single reversed comma'', ''quotation mark''
| \u201C | U+201C | Left double quotation mark | &ldquo; | Double curved quote, left
| \u201D | U+201D | Right double quotation mark | &rdquo; | Double curved quote, right
| \u201E | U+201E | Double low-9 quotation mark | &bdquo; | Low double curved quote, left
| \u201F | U+201F | Double high-reversed-9 quotation mark | &#8223; | also called ''double reversed comma'', ''quotation mark''
| \u2039 | U+2039 | Single left-pointing angle quotation mark | &lsaquo; | Single angle quote, left
| \u203A | U+203A | Single right-pointing angle quotation mark | &rsaquo; | Single angle quote, right
| \u2E42 | U+2E42 | Double low-reversed-9 quotation mark | &#11842; | also called ''double low reversed comma'', ''quotation mark''
### Quotation marks in dingbats
| Glyph | Code | Unicode name | HTML | Comments |
| ----- | ---- | ------------ | ---- | -------- |
| \u275B | U+275B | Heavy single turned comma quotation mark ornament | &#10075; | <code>Quotation Mark=No</code>
| \u275C | U+275C | Heavy single comma quotation mark ornament | &#10076; | <code>Quotation Mark=No</code>
| \u275D | U+275D | Heavy double turned comma quotation mark ornament | &#10077; | <code>Quotation Mark=No</code>
| \u275E | U+275E | Heavy double comma quotation mark ornament | &#10078; | <code>Quotation Mark=No</code>
| \u{1F676} | U+1F676 | SANS-SERIF HEAVY DOUBLE TURNED COMMA QUOTATION MARK ORNAMENT | &#128630; | <code>Quotation Mark=No</code>
| \u{1F677} | U+1F677 | SANS-SERIF HEAVY DOUBLE COMMA QUOTATION MARK ORNAMENT | &#128631; | <code>Quotation Mark=No</code>
| \u{1F678} | U+1F678 | SANS-SERIF HEAVY LOW DOUBLE COMMA QUOTATION MARK ORNAMENT | &#128632; | <code>Quotation Mark=No</code>
### Quotation marks in Braille Patterns
| Glyph | Code | Unicode name | HTML | Comments |
| ----- | ---- | ------------ | ---- | -------- |
| \u2826 | U+2826 | Braille pattern dots-236 | &#10292; | Braille double closing quotation mark; <code>Quotation Mark=No</code>
| \u2834 | U+2834 | Braille pattern dots-356 | &#10278; | Braille double opening quotation mark; <code>Quotation Mark=No</code>
### Quotation marks in Chinese, Japanese, and Korean
| Glyph | Code | Unicode name | HTML | Comments |
| ----- | ---- | ------------ | ---- | -------- |
| \u300C | U+300C | Left corner bracket | &#12300; | CJK
| \u300D | U+300D | Right corner bracket | &#12301; | CJK
| \u300E | U+300E | Left white corner bracket | &#12302; | CJK
| \u300F | U+300F | Right white corner bracket | &#12303; | CJK
| \u301D | U+301D | REVERSED DOUBLE PRIME QUOTATION MARK | &#12317; | CJK
| \u301E | U+301E | DOUBLE PRIME QUOTATION MARK | &#12318; | CJK
| \u301F | U+301F | LOW DOUBLE PRIME QUOTATION MARK | &#12319; | CJK
### Alternate encodings
| Glyph | Code | Unicode name | HTML | Comments |
| ----- | ---- | ------------ | ---- | -------- |
| \uFE41 | U+FE41 | PRESENTATION FORM FOR VERTICAL LEFT CORNER BRACKET | &#65089; | CJK Compatibility, preferred use: U+300C
| \uFE42 | U+FE42 | PRESENTATION FORM FOR VERTICAL RIGHT CORNER BRACKET | &#65090; | CJK Compatibility, preferred use: U+300D
| \uFE43 | U+FE43 | PRESENTATION FORM FOR VERTICAL LEFT WHITE CORNER BRACKET | &#65091; | CJK Compatibility, preferred use: U+300E
| \uFE44 | U+FE44 | PRESENTATION FORM FOR VERTICAL RIGHT WHITE CORNER BRACKET | &#65092; | CJK Compatibility, preferred use: U+300F
| \uFF02 | U+FF02 | FULLWIDTH QUOTATION MARK | &#65282; | Halfwidth and Fullwidth Forms, corresponds with U+0022
| \uFF07 | U+FF07 | FULLWIDTH apostrophe | &#65287; | Halfwidth and Fullwidth Forms, corresponds with U+0027
| \uFF62 | U+FF62 | HALFWIDTH LEFT CORNER BRACKET | &#65378; | Halfwidth and Fullwidth Forms, corresponds with U+300C
| \uFF63 | U+FF63 | HALFWIDTH right CORNER BRACKET | &#65379; | Halfwidth and Fullwidth Forms, corresponds with U+300D
"""
@doc """
Return a list of codepoints representing quote marks
typically used on the left (for LTR languages)
"""
def quote_marks_left do
[
0x00AB,
0x2018,
0x201A,
0x201C,
0x201E,
0x2039,
0x2826,
0x300C,
0x300E,
0xFE41,
0xFE43,
0xFF62,
0x1F676,
0x275D,
0x275B
]
end
@doc """
Return a list of codepoints representing quote marks
typically used on the right (for LTR languages)
"""
def quote_marks_right do
[
0x00BB,
0x2019,
0x201B,
0x201D,
0x203A,
0x2834,
0x300D,
0x300F,
0xFE42,
0xFE44,
0xFF63,
0x1F677,
0x275C,
0x275E
]
end
@doc """
Return a list of codepoints representing quote marks
typically used on the left or right (for LTR languages)
"""
def quote_marks_ambidextrous do
[0x0022, 0x0027, 0x201F, 0x2E42, 0x301D, 0x301E, 0x301F, 0xFF02, 0xFF07, 0x1F678]
end
@doc """
Return a list of codepoints representing quote marks
typically used in Braille
"""
def quote_marks_braille do
[0x2826, 0x2834]
end
@doc """
Return a list of codepoints representing quote marks
understood to be single marks
"""
def quote_marks_single do
[
0x0027,
0x2018,
0x2019,
0x201A,
0x201B,
0x2039,
0x203A,
0x275B,
0x275C,
0x300C,
0x300D,
0x300E,
0x300F,
0xFE41,
0xFE42,
0xFE43,
0xFE44,
0xFF07,
0xFF62,
0xFF63
]
end
@doc """
Return a list of codepoints representing quote marks
understood to be double marks
"""
def quote_marks_double do
[
0x0022,
0x00AB,
0x00BB,
0x201C,
0x201D,
0x201E,
0x201F,
0x2E42,
0x275D,
0x275E,
0x1F676,
0x1F677,
0x1F678,
0x2826,
0x2834,
0x301D,
0x301E,
0x301F,
0xFF02
]
end
@doc """
Return a list of codepoints representing all
quote marks.
"""
def all_quote_marks do
[
quote_marks_left(),
quote_marks_right(),
quote_marks_ambidextrous(),
quote_marks_single(),
quote_marks_double()
]
|> List.flatten()
|> Enum.uniq()
end
end
|
lib/unicode/category/quote_marks.ex
| 0.818773 | 0.511839 |
quote_marks.ex
|
starcoder
|
defmodule Matrex.MagicSquare do
@moduledoc false
# Magic square generation algorithms.
@lux %{L: [4, 1, 2, 3], U: [1, 4, 2, 3], X: [1, 4, 3, 2]}
def new(n) when n < 3, do: raise(ArgumentError, "Magic square less than 3x3 is not possible.")
def new(n) when rem(n, 2) == 1 do
for i <- 0..(n - 1) do
for j <- 0..(n - 1),
do: n * rem(i + j + 1 + div(n, 2), n) + rem(i + 2 * j + 2 * n - 5, n) + 1
end
end
def new(n) when rem(n, 4) == 0 do
n2 = n * n
Enum.zip(1..n2, make_pattern(n))
|> Enum.map(fn {i, p} -> if p, do: i, else: n2 - i + 1 end)
|> Enum.chunk(n)
end
def new(n) when rem(n - 2, 4) == 0 do
n2 = div(n, 2)
oms = odd_magic_square(n2)
mat = make_lux_matrix(n2)
square = synthesis(n2, oms, mat)
square
end
# zero beginning, it is 4 multiples.
defp odd_magic_square(m) do
for i <- 0..(m - 1),
j <- 0..(m - 1),
into: %{},
do: {{i, j}, (m * rem(i + j + 1 + div(m, 2), m) + rem(i + 2 * j - 5 + 2 * m, m)) * 4}
end
defp make_lux_matrix(m) do
center = div(m, 2)
lux = List.duplicate(:L, center + 1) ++ [:U] ++ List.duplicate(:X, m - center - 2)
for(
{x, i} <- Enum.with_index(lux),
j <- 0..(m - 1),
into: %{},
do: {{i, j}, x}
)
|> Map.put({center, center}, :U)
|> Map.put({center + 1, center}, :L)
end
defp synthesis(m, oms, mat) do
range = 0..(m - 1)
Enum.reduce(range, [], fn i, acc ->
{row0, row1} =
Enum.reduce(range, {[], []}, fn j, {r0, r1} ->
x = oms[{i, j}]
[lux0, lux1, lux2, lux3] = @lux[mat[{i, j}]]
{[x + lux0, x + lux1 | r0], [x + lux2, x + lux3 | r1]}
end)
[row0, row1 | acc]
end)
end
defp make_pattern(n) do
pattern =
Enum.reduce(1..4, [true], fn _, acc ->
acc ++ Enum.map(acc, &(!&1))
end)
|> Enum.chunk(4)
for i <- 0..(n - 1),
j <- 0..(n - 1),
do: Enum.at(pattern, rem(i, 4)) |> Enum.at(rem(j, 4))
end
end
|
lib/matrex/magic_square.ex
| 0.581184 | 0.589775 |
magic_square.ex
|
starcoder
|
defmodule Serum.Plugins.SitemapGenerator do
@moduledoc """
A Serum plugin that create a sitemap so that the search engine can index posts.
## Using the Plugin
# serum.exs:
%{
server_root: "https://example.io",
plugins: [
{Serum.Plugins.SitemapGenerator, only: :prod}
]
}
"""
@behaviour Serum.Plugin
serum_ver = Version.parse!(Mix.Project.config()[:version])
serum_req = "~> #{serum_ver.major}.#{serum_ver.minor}"
require EEx
alias Serum.GlobalBindings
alias Serum.Result
def name, do: "Create sitemap for search engine"
def version, do: "1.0.0"
def elixir, do: ">= 1.6.0"
def serum, do: unquote(serum_req)
def description do
"Create a sitemap so that the search engine can index posts."
end
def implements,
do: [
:build_succeeded
]
def build_succeeded(_src, dest) do
with {:ok, _} <- write_sitemap(dest),
{:ok, _} <- write_robots(dest) do
:ok
else
{:error, _} = error -> error
end
end
res_dir =
:serum
|> :code.priv_dir()
|> IO.iodata_to_binary()
|> Path.join("build_resources")
sitemap_path = Path.join(res_dir, "sitemap.xml.eex")
robots_path = Path.join(res_dir, "robots.txt.eex")
EEx.function_from_file(:defp, :sitemap_xml, sitemap_path, [
:all_posts,
:transformer,
:server_root
])
EEx.function_from_file(:defp, :robots_txt, robots_path, [:sitemap_path])
defp to_w3c_format(erl_datetime) do
# reference to https://www.w3.org/TR/NOTE-datetime
Timex.format!(erl_datetime, "%Y-%m-%d", :strftime)
end
defp get_server_root do
:site
|> GlobalBindings.get()
|> Map.fetch!(:server_root)
end
@spec write_sitemap(binary()) :: Result.t(Serum.File.t())
defp write_sitemap(dest) do
all_posts = GlobalBindings.get(:all_posts)
file = %Serum.File{
dest: Path.join(dest, "sitemap.xml"),
out_data: sitemap_xml(all_posts, &to_w3c_format/1, get_server_root())
}
Serum.File.write(file)
end
@spec write_robots(binary()) :: Result.t(Serum.File.t())
defp write_robots(dest) do
file = %Serum.File{
dest: Path.join(dest, "robots.txt"),
out_data: robots_txt(Path.join(get_server_root(), "sitemap.xml"))
}
Serum.File.write(file)
end
end
|
lib/serum/plugins/sitemap_generator.ex
| 0.656218 | 0.4099 |
sitemap_generator.ex
|
starcoder
|
defmodule ExDns.Resource.A do
@moduledoc """
Manages the A resource record
The wire protocol is defined in [RFC1035](https://tools.ietf.org/html/rfc1035#section-3.4.1)
3.4.1. A RDATA format
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| ADDRESS |
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
where:
ADDRESS A 32 bit Internet address.
Hosts that have multiple Internet addresses will have multiple A
records.
A records cause no additional section processing. The RDATA section of
an A line in a master file is an Internet address expressed as four
decimal numbers separated by dots without any imbedded spaces (e.g.,
"10.2.0.52" or "192.168.127.12").
"""
defstruct [:name, :ttl, :class, :ipv4]
import ExDns.Resource.Validation
import ExDns.Resource, only: [class_from: 1]
alias ExDns.Inet.Ipv4
@doc """
Returns an A resource from a keyword list
"""
def new(resource) when is_list(resource) do
resource
|> validate_ipv4(:ipv4)
|> validate_integer(:ttl)
|> validate_class(:class, :internet)
|> structify_if_valid(__MODULE__)
end
def encode(%__MODULE__{} = resource) do
%{name: name, ttl: ttl, class: class, ipv4: ipv4} = resource
rdata = << Ipv4.to_integer(ipv4) :: bytes-size(4) >>
rdlength = << byte_size(rdata) :: unsigned-integer-size(32) >>
<< Message.encode_name(name), Message.encode_class(class), Message.encode_type(type),
rdlength, rdata >>
end
@preamble ExDns.Resource.preamble_format()
def format(%__MODULE__{} = resource) do
format_string = [@preamble | '~-20s']
format_string
|> :io_lib.format([
resource.name,
resource.ttl,
class_from(resource.class),
Ipv4.to_string(resource.ipv4)
])
end
defimpl ExDns.Resource.Format do
def format(resource) do
ExDns.Resource.A.format(resource)
end
def encode(%ExDns.Resource.A{} = resource) do
ExDns.Resource.A.encode(resource)
end
end
end
|
lib/ex_dns/resource/a.ex
| 0.649134 | 0.413152 |
a.ex
|
starcoder
|
defmodule AWS.Budgets do
@moduledoc """
The AWS Budgets API enables you to use AWS Budgets to plan your service
usage, service costs, and instance reservations. The API reference provides
descriptions, syntax, and usage examples for each of the actions and data
types for AWS Budgets.
Budgets provide you with a way to see the following information:
<ul> <li> How close your plan is to your budgeted amount or to the free
tier limits
</li> <li> Your usage-to-date, including how much you've used of your
Reserved Instances (RIs)
</li> <li> Your current estimated charges from AWS, and how much your
predicted usage will accrue in charges by the end of the month
</li> <li> How much of your budget has been used
</li> </ul> AWS updates your budget status several times a day. Budgets
track your unblended costs, subscriptions, refunds, and RIs. You can create
the following types of budgets:
<ul> <li> **Cost budgets** - Plan how much you want to spend on a service.
</li> <li> **Usage budgets** - Plan how much you want to use one or more
services.
</li> <li> **RI utilization budgets** - Define a utilization threshold, and
receive alerts when your RI usage falls below that threshold. This lets you
see if your RIs are unused or under-utilized.
</li> <li> **RI coverage budgets** - Define a coverage threshold, and
receive alerts when the number of your instance hours that are covered by
RIs fall below that threshold. This lets you see how much of your instance
usage is covered by a reservation.
</li> </ul> Service Endpoint
The AWS Budgets API provides the following endpoint:
<ul> <li> https://budgets.amazonaws.com
</li> </ul> For information about costs that are associated with the AWS
Budgets API, see [AWS Cost Management
Pricing](https://aws.amazon.com/aws-cost-management/pricing/).
"""
@doc """
Creates a budget and, if included, notifications and subscribers.
<important> Only one of `BudgetLimit` or `PlannedBudgetLimits` can be
present in the syntax at one time. Use the syntax that matches your case.
The Request Syntax section shows the `BudgetLimit` syntax. For
`PlannedBudgetLimits`, see the
[Examples](https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_budgets_CreateBudget.html#API_CreateBudget_Examples)
section.
</important>
"""
def create_budget(client, input, options \\ []) do
request(client, "CreateBudget", input, options)
end
@doc """
Creates a budget action.
"""
def create_budget_action(client, input, options \\ []) do
request(client, "CreateBudgetAction", input, options)
end
@doc """
Creates a notification. You must create the budget before you create the
associated notification.
"""
def create_notification(client, input, options \\ []) do
request(client, "CreateNotification", input, options)
end
@doc """
Creates a subscriber. You must create the associated budget and
notification before you create the subscriber.
"""
def create_subscriber(client, input, options \\ []) do
request(client, "CreateSubscriber", input, options)
end
@doc """
Deletes a budget. You can delete your budget at any time.
<important> Deleting a budget also deletes the notifications and
subscribers that are associated with that budget.
</important>
"""
def delete_budget(client, input, options \\ []) do
request(client, "DeleteBudget", input, options)
end
@doc """
Deletes a budget action.
"""
def delete_budget_action(client, input, options \\ []) do
request(client, "DeleteBudgetAction", input, options)
end
@doc """
Deletes a notification.
<important> Deleting a notification also deletes the subscribers that are
associated with the notification.
</important>
"""
def delete_notification(client, input, options \\ []) do
request(client, "DeleteNotification", input, options)
end
@doc """
Deletes a subscriber.
<important> Deleting the last subscriber to a notification also deletes the
notification.
</important>
"""
def delete_subscriber(client, input, options \\ []) do
request(client, "DeleteSubscriber", input, options)
end
@doc """
Describes a budget.
<important> The Request Syntax section shows the `BudgetLimit` syntax. For
`PlannedBudgetLimits`, see the
[Examples](https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_budgets_DescribeBudget.html#API_DescribeBudget_Examples)
section.
</important>
"""
def describe_budget(client, input, options \\ []) do
request(client, "DescribeBudget", input, options)
end
@doc """
Describes a budget action detail.
"""
def describe_budget_action(client, input, options \\ []) do
request(client, "DescribeBudgetAction", input, options)
end
@doc """
Describes a budget action history detail.
"""
def describe_budget_action_histories(client, input, options \\ []) do
request(client, "DescribeBudgetActionHistories", input, options)
end
@doc """
Describes all of the budget actions for an account.
"""
def describe_budget_actions_for_account(client, input, options \\ []) do
request(client, "DescribeBudgetActionsForAccount", input, options)
end
@doc """
Describes all of the budget actions for a budget.
"""
def describe_budget_actions_for_budget(client, input, options \\ []) do
request(client, "DescribeBudgetActionsForBudget", input, options)
end
@doc """
Describes the history for `DAILY`, `MONTHLY`, and `QUARTERLY` budgets.
Budget history isn't available for `ANNUAL` budgets.
"""
def describe_budget_performance_history(client, input, options \\ []) do
request(client, "DescribeBudgetPerformanceHistory", input, options)
end
@doc """
Lists the budgets that are associated with an account.
<important> The Request Syntax section shows the `BudgetLimit` syntax. For
`PlannedBudgetLimits`, see the
[Examples](https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_budgets_DescribeBudgets.html#API_DescribeBudgets_Examples)
section.
</important>
"""
def describe_budgets(client, input, options \\ []) do
request(client, "DescribeBudgets", input, options)
end
@doc """
Lists the notifications that are associated with a budget.
"""
def describe_notifications_for_budget(client, input, options \\ []) do
request(client, "DescribeNotificationsForBudget", input, options)
end
@doc """
Lists the subscribers that are associated with a notification.
"""
def describe_subscribers_for_notification(client, input, options \\ []) do
request(client, "DescribeSubscribersForNotification", input, options)
end
@doc """
Executes a budget action.
"""
def execute_budget_action(client, input, options \\ []) do
request(client, "ExecuteBudgetAction", input, options)
end
@doc """
Updates a budget. You can change every part of a budget except for the
`budgetName` and the `calculatedSpend`. When you modify a budget, the
`calculatedSpend` drops to zero until AWS has new usage data to use for
forecasting.
<important> Only one of `BudgetLimit` or `PlannedBudgetLimits` can be
present in the syntax at one time. Use the syntax that matches your case.
The Request Syntax section shows the `BudgetLimit` syntax. For
`PlannedBudgetLimits`, see the
[Examples](https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_budgets_UpdateBudget.html#API_UpdateBudget_Examples)
section.
</important>
"""
def update_budget(client, input, options \\ []) do
request(client, "UpdateBudget", input, options)
end
@doc """
Updates a budget action.
"""
def update_budget_action(client, input, options \\ []) do
request(client, "UpdateBudgetAction", input, options)
end
@doc """
Updates a notification.
"""
def update_notification(client, input, options \\ []) do
request(client, "UpdateNotification", input, options)
end
@doc """
Updates a subscriber.
"""
def update_subscriber(client, input, options \\ []) do
request(client, "UpdateSubscriber", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "budgets",
region: "us-east-1"}
host = build_host("budgets", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AWSBudgetServiceGateway.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{endpoint: endpoint}) do
"#{endpoint_prefix}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/budgets.ex
| 0.805441 | 0.590189 |
budgets.ex
|
starcoder
|
defmodule Content.Utilities do
@type track_number :: non_neg_integer()
@type green_line_branch :: :b | :c | :d | :e
defmacro max_time_seconds do
quote do: 20 * 60
end
def width_padded_string(left, right, width) do
max_left_length = width - (String.length(right) + 1)
left = String.slice(left, 0, max_left_length)
padding = width - (String.length(left) + String.length(right))
Enum.join([left, String.duplicate(" ", padding), right])
end
@spec destination_for_prediction(String.t(), 0 | 1, String.t()) ::
{:ok, PaEss.destination()} | {:error, :not_found}
def destination_for_prediction("Mattapan", 0, _), do: {:ok, :mattapan}
def destination_for_prediction("Mattapan", 1, _), do: {:ok, :ashmont}
def destination_for_prediction("Orange", 0, _), do: {:ok, :forest_hills}
def destination_for_prediction("Orange", 1, _), do: {:ok, :oak_grove}
def destination_for_prediction("Blue", 0, _), do: {:ok, :bowdoin}
def destination_for_prediction("Blue", 1, _), do: {:ok, :wonderland}
def destination_for_prediction("Red", 1, _), do: {:ok, :alewife}
def destination_for_prediction("Red", 0, last_stop_id)
when last_stop_id in ["70085", "70086", "70087", "70089", "70091", "70093"],
do: {:ok, :ashmont}
def destination_for_prediction("Red", 0, last_stop_id)
when last_stop_id in [
"70095",
"70096",
"70097",
"70099",
"70101",
"70103",
"70105",
"Braintree-01",
"Braintree-02"
],
do: {:ok, :braintree}
def destination_for_prediction("Red", 0, _), do: {:ok, :southbound}
def destination_for_prediction(_, 0, "70151"), do: {:ok, :kenmore}
def destination_for_prediction(_, 0, "70202"), do: {:ok, :government_center}
def destination_for_prediction(_, 0, "70201"), do: {:ok, :government_center}
def destination_for_prediction(_, 0, "70175"), do: {:ok, :reservoir}
def destination_for_prediction(_, 0, "70107"), do: {:ok, :boston_college}
def destination_for_prediction(_, 0, "70237"), do: {:ok, :cleveland_circle}
def destination_for_prediction(_, 0, "70161"), do: {:ok, :riverside}
def destination_for_prediction(_, 0, "70260"), do: {:ok, :heath_street}
def destination_for_prediction(_, 1, "70209"), do: {:ok, :lechmere}
def destination_for_prediction(_, 1, "70205"), do: {:ok, :north_station}
def destination_for_prediction(_, 1, "70201"), do: {:ok, :government_center}
def destination_for_prediction(_, 1, "70200"), do: {:ok, :park_street}
def destination_for_prediction(_, 1, "71199"), do: {:ok, :park_street}
def destination_for_prediction(_, 1, "70150"), do: {:ok, :kenmore}
def destination_for_prediction(_, 1, "70174"), do: {:ok, :reservoir}
def destination_for_prediction(_, _, "Government Center-Brattle"), do: {:ok, :government_center}
def destination_for_prediction("Green-B", 0, _), do: {:ok, :boston_college}
def destination_for_prediction("Green-C", 0, _), do: {:ok, :cleveland_circle}
def destination_for_prediction("Green-D", 0, _), do: {:ok, :riverside}
def destination_for_prediction("Green-E", 0, _), do: {:ok, :heath_street}
def destination_for_prediction("Green-B", 1, _), do: {:ok, :government_center}
def destination_for_prediction("Green-C", 1, _), do: {:ok, :government_center}
def destination_for_prediction("Green-D", 1, _), do: {:ok, :north_station}
def destination_for_prediction("Green-E", 1, _), do: {:ok, :north_station}
def destination_for_prediction(_, _, _), do: {:error, :not_found}
@spec stop_track_number(String.t()) :: track_number() | nil
def stop_track_number("Alewife-01"), do: 1
def stop_track_number("Alewife-02"), do: 2
def stop_track_number("Braintree-01"), do: 1
def stop_track_number("Braintree-02"), do: 2
def stop_track_number("Forest Hills-01"), do: 1
def stop_track_number("Forest Hills-02"), do: 2
def stop_track_number("Oak Grove-01"), do: 1
def stop_track_number("Oak Grove-02"), do: 2
def stop_track_number(_), do: nil
@spec route_and_destination_branch_letter(String.t(), PaEss.destination()) ::
green_line_branch() | nil
def route_and_destination_branch_letter("Green-B", :boston_college), do: :b
def route_and_destination_branch_letter("Green-C", :cleveland_circle), do: :c
def route_and_destination_branch_letter("Green-D", :riverside), do: :d
def route_and_destination_branch_letter("Green-D", :reservoir), do: :d
def route_and_destination_branch_letter("Green-E", :heath_street), do: :e
def route_and_destination_branch_letter(_route_id, _destination), do: nil
end
|
lib/content/utilities.ex
| 0.800653 | 0.535766 |
utilities.ex
|
starcoder
|
defmodule Mix.Tasks.Cloak.Migrate do
@moduledoc """
Migrate all configured schemas to your new encryption configuration.
While Cloak will automatically decrypt rows which use an old decryption cipher
or key, this isn't usually enough. Usually, you want to retire the old key, so
it won't do to leave it configured indefinitely.
This task allows you to proactively upgrade all rows in your database to the
new encryption configuration, so that you can remove the old key.
## Before You Run This Task...
1. Ensure that you have configured your new encryption cipher.
2. Set the new cipher and/or key as the `:default`. Otherwise, running this
task will have no effect.
## Configuration
In order for the Mix task to update rows in the correct database, it must have
access to the correct repo, and a list of schemas to migrate.
Each schema should be specified in this format:
{schema_name, :encryption_field_name}
Where `:encryption_field_name` is the name of the field the schema uses to
track it's encryption version.
config :cloak, :migration,
repo: MyApp.Repo,
schemas: [{MyApp.Schema1, :encryption_version},
{MyApp.Schema2, :encryption_version}]
## Usage
mix cloak.migrate
The task allows you to customize the repo and schemas which will be migrated at
runtime.
mix cloak.migrate -m MyApp.Schema -f encryption_version -r MyApp.Repo
"""
use Mix.Task
import Ecto.Query, only: [from: 2]
import Logger, only: [info: 1]
import String, only: [to_existing_atom: 1]
@doc false
def run(args) do
_ = info("=== Starting Migration ===")
{repo, schemas} = parse(args)
Mix.Task.run("app.start", args)
Enum.each(schemas, &migrate(&1, repo))
_ = info("=== Migration Complete ===")
:ok
end
defp parse(args) do
{opts, _, _} = OptionParser.parse(args, aliases: [m: :schema, f: :field, r: :repo])
repo =
case opts[:repo] do
nil -> Application.get_env(:cloak, :migration)[:repo]
repo -> to_module(repo)
end
schemas =
case opts[:schema] do
nil -> Application.get_env(:cloak, :migration)[:schemas]
schema -> [{to_module(schema), String.to_atom(opts[:field])}]
end
validate!(repo, schemas)
{repo, schemas}
end
defp validate!(repo, [h | _t]) when repo == nil or not is_tuple(h) do
raise ArgumentError, """
You must specify which schemas you wish to migrate and which repo to use.
You can do this in your Mix config, like so:
config :cloak, :migration,
repo: MyApp.Repo,
schemas: [{MyApp.Schema1, :encryption_version},
{MyApp.Schema2, :encryption_version}]
Alternatively, you can pass in the schema, field, and repo as command line
arguments to `mix cloak.migrate`:
mix cloak.migrate -r Repo -m SchemaName -f encryption_version_field
"""
end
defp validate!(_repo, _schemas), do: :ok
defp migrate({schema, field}, repo) do
_ = info("--- Migrating #{inspect(schema)} Schema ---")
ids = ids_for({schema, field}, repo)
_ = info("#{length(ids)} records found needing migration")
for id <- ids do
schema
|> repo.get(id)
|> migrate_row(repo, field)
end
end
defp ids_for({schema, field}, repo) do
query =
from(
m in schema,
where: field(m, ^field) != ^Cloak.version(),
select: m.id
)
repo.all(query)
end
defp migrate_row(row, repo, field) do
version = Map.get(row, field)
if version != Cloak.version() do
row
|> Ecto.Changeset.change()
|> Ecto.Changeset.put_change(field, Cloak.version())
|> repo.update!
end
end
defp to_module(name) do
to_existing_atom("Elixir." <> name)
end
end
|
lib/mix/tasks/cloak.migrate.ex
| 0.816626 | 0.497131 |
cloak.migrate.ex
|
starcoder
|
defmodule Bonny.PeriodicTask do
@moduledoc """
Register periodically run tasks.
Use for running tasks as a part of reconciling a CRD with a lifetime, duration, or interval field.
__Note:__ Must be started by your operator.
Add `Bonny.PeriodicTask.sttart_link(:ok)` to your application.
Functions are expected to return one of:
- `:ok` - task will be passed to subsequent calls
- `{:ok, new_state}` state field will be updated in task and provided to next call
- `{:stop, reason}` task will be removed from execution loop. Use for tasks opting out of being re-run
- `any()` - any other result is treated as an error, and the execution loop will be halted
## Examples
Registering a task
iex> Bonny.PeriodicTask.new(:pod_evictor, {PodEvictor, :evict, [reconcile_payload_map]}, 5000)
Unregistering a task
iex> Bonny.PeriodicTask.unregister(:pod_evictor)
"""
use DynamicSupervisor
require Logger
@enforce_keys [:handler, :id]
defstruct handler: nil, id: nil, interval: 1000, jitter: 0.0, state: nil
@type t :: %__MODULE__{
handler: fun() | mfa(),
interval: pos_integer(),
jitter: float(),
id: binary() | atom(),
state: any()
}
@spec start_link(any()) :: :ignore | {:error, any()} | {:ok, pid()}
def start_link(_any) do
DynamicSupervisor.start_link(__MODULE__, :ok, name: __MODULE__)
end
@impl true
def init(_arg) do
DynamicSupervisor.init(strategy: :one_for_one)
end
@doc "Registers and starts a new task given `Bonny.PeriodicTask` attributes"
@spec new(binary() | atom(), mfa() | fun(), pos_integer()) :: {:ok, pid} | {:error, term()}
def new(id, handler, interval \\ 5000) do
register(%__MODULE__{
id: id,
handler: handler,
interval: interval
})
end
@doc "Registers and starts a new `Bonny.PeriodicTask`"
@spec register(t()) :: {:ok, pid} | {:error, term()}
def register(%__MODULE__{id: id} = task) do
Logger.info("Task registered", %{id: id})
DynamicSupervisor.start_child(__MODULE__, {Bonny.PeriodicTask.Runner, task})
end
@doc "Unregisters and stops a `Bonny.PeriodicTask`"
@spec unregister(t() | atom()) :: any()
def unregister(%__MODULE__{id: id}), do: unregister(id)
def unregister(id) when is_atom(id) do
Logger.info("Task unregistered", %{id: id})
case Process.whereis(id) do
nil ->
:ok
pid ->
DynamicSupervisor.terminate_child(__MODULE__, pid)
end
end
end
|
lib/bonny/periodic_task.ex
| 0.930624 | 0.588002 |
periodic_task.ex
|
starcoder
|
defmodule ResourceCache do
@moduledoc ~S"""
Fast caching with clear syntax.
## Quick Setup
```elixir
def deps do
[
{:resource_cache, "~> 0.1"}
]
end
```
Define a cache by setting a resource (in this case Ecto schema)
and source. (The Ecto repo to query.)
```elixir
defmodule MyApp.Categories do
use ResourceCache
resource MyApp.Category
source :ecto, repo: MyApp.Repo
end
```
Now the cache can be used for fast listing:
```elixir
iex> MyApp.Categories.list()
[%MyApp.Category{}, ...]
```
Indices can be added to do quick lookups by value:
```elixir
defmodule MyApp.Categories do
use ResourceCache
resource MyApp.Category
source :ecto, repo: MyApp.Repo
index :slug, primary: true
end
```
Now `get_by_slug/1` can be used.
In addition to the standard `get_by_slug`,
`get/1` is also available since it was defined as primary index.
```elixir
iex> MyApp.Categories.get("electronics")
%MyApp.Category{}
iex> MyApp.Categories.get_by_slug("electronics")
%MyApp.Category{}
iex> MyApp.Categories.get("fake")
nil
iex> MyApp.Categories.get_by_slug("fake")
nil
```
There is no limit to the amount of indices that can be added.
It is possible to pass an optional type for each index,
to generate cleaner specs for the function arguments.
```elixir
index :slug, primary: true, type: String.t
```
"""
alias ResourceCache.Cache
@callback __type__() :: :cache | :source | :bridge
@callback __config__() :: module
@callback __config__(:callbacks) :: term
@callback __process__([term]) :: [term]
@type resource :: %{optional(atom) => term}
@type hook ::
(cache :: module -> :ok | :unhook)
| (cache :: module, changes :: ResourceCache.Changeset.t() -> nil)
@doc false
@spec __using__(Keyword.t()) :: term
defmacro __using__(opts \\ []) do
# Register
Module.put_attribute(__CALLER__.module, :cache_type, Keyword.get(opts, :type))
Module.register_attribute(__CALLER__.module, :cache_resource, accumulate: false)
Module.register_attribute(__CALLER__.module, :cache_resource_type, accumulate: false)
Module.register_attribute(__CALLER__.module, :cache_indices, accumulate: true)
Module.register_attribute(__CALLER__.module, :cache_default_source, accumulate: false)
Module.register_attribute(__CALLER__.module, :cache_filters, accumulate: true)
Module.register_attribute(__CALLER__.module, :cache_optimizers, accumulate: true)
Module.register_attribute(__CALLER__.module, :cache_on_configure, accumulate: true)
Module.register_attribute(__CALLER__.module, :cache_pre_update, accumulate: true)
Module.register_attribute(__CALLER__.module, :cache_on_update, accumulate: true)
# Shorthands
resource = Macro.expand(Keyword.get(opts, :resource), __CALLER__)
source = Macro.expand(Keyword.get(opts, :source), __CALLER__)
source_opts = Macro.expand(Keyword.get(opts, :source_opts), __CALLER__)
quote do
@behaviour unquote(__MODULE__)
@before_compile unquote(__MODULE__)
require unquote(__MODULE__)
require unquote(__MODULE__.CacheManager)
import unquote(__MODULE__),
only: [
filter: 1,
filter: 2,
filter: 3,
index: 1,
index: 2,
on_configure: 1,
on_update: 1,
optimize: 1,
optimize: 2,
optimize: 3,
reject: 1,
reject: 2,
reject: 3,
resource: 1,
resource: 2,
resource: 3,
source: 1,
source: 2,
type: 1,
type: 2
]
unquote(if resource, do: Cache.resource(__CALLER__, resource, [], []))
unquote(if source, do: Cache.source(__CALLER__, source, source_opts))
end
end
@spec __before_compile__(Macro.Env.t()) :: term
defmacro __before_compile__(env), do: Cache.generate(env)
@spec type(atom, Keyword.t()) :: term
defmacro type(type, opts \\ []) do
Cache.type(__CALLER__, type, opts)
end
@spec resource(module, Keyword.t(), term) :: term
defmacro resource(resource, opts \\ [], convert \\ []) do
{c, o} = opts_do(convert, opts)
Cache.resource(__CALLER__, Macro.expand(resource, __CALLER__), c, o)
end
@spec source(module, Keyword.t()) :: term
defmacro source(source, opts \\ []),
do: Cache.source(__CALLER__, Macro.expand(source, __CALLER__), opts)
@spec index(atom, Keyword.t()) :: term
defmacro index(field, opts \\ []), do: Cache.index(__CALLER__, field, opts)
@spec on_configure(ResourceCache.hook(), Keyword.t()) :: term
defmacro on_configure(callback, opts \\ []), do: Cache.on_configure(__CALLER__, callback, opts)
@spec on_update(ResourceCache.hook(), Keyword.t()) :: term
defmacro on_update(callback, opts \\ []), do: Cache.on_update(__CALLER__, callback, opts)
defmacro optimize(field \\ nil, opts \\ [], optimizer) do
{optimize, o} = opts_do(optimizer, opts)
Cache.optimize(__CALLER__, field, optimize, o)
end
@spec reject(atom | Keyword.t(), term) :: term
defmacro reject(field_or_opts \\ [], rejecter)
defmacro reject(input, rejecter) do
field = if is_atom(input), do: input
{rejecter, opts} = if is_list(input), do: opts_do(rejecter, input), else: opts_do(rejecter)
Cache.reject(__CALLER__, field, rejecter, opts)
end
@spec reject(atom, Keyword.t(), term) :: term
defmacro reject(field, opts, do: rejecter), do: Cache.reject(__CALLER__, field, rejecter, opts)
defmacro reject(field, opts, rejecter), do: Cache.reject(__CALLER__, field, rejecter, opts)
@spec filter(atom | Keyword.t(), term) :: term
defmacro filter(field_or_opts \\ [], filter)
defmacro filter(input, filter) do
field = if is_atom(input), do: input
{filter, opts} = if is_list(input), do: opts_do(filter, input), else: opts_do(filter)
Cache.filter(__CALLER__, field, filter, opts)
end
@spec filter(atom, Keyword.t(), term) :: term
defmacro filter(field, opts, do: filter), do: Cache.filter(__CALLER__, field, filter, opts)
defmacro filter(field, opts, filter), do: Cache.filter(__CALLER__, field, filter, opts)
@spec opts_do(term | Keyword.t(), Keyword.t()) :: {term, Keyword.t()}
defp opts_do(block_or_opts, opts \\ [])
defp opts_do(block_or_opts, opts) when is_list(block_or_opts) and is_list(opts) do
case Keyword.pop(block_or_opts, :do) do
{nil, o} -> o |> Keyword.merge(opts) |> Keyword.pop(:do)
{block, o} -> {block, Keyword.merge(o, opts)}
end
end
defp opts_do(block, opts) when is_list(opts), do: {block, opts}
defp opts_do(block, opts) when is_list(block), do: {opts, block}
end
|
lib/resource_cache.ex
| 0.825167 | 0.651216 |
resource_cache.ex
|
starcoder
|
if Code.ensure_loaded?(:hackney) do
defmodule Tesla.Adapter.Hackney do
@moduledoc """
Adapter for [hackney](https://github.com/benoitc/hackney).
Remember to add `{:hackney, "~> 1.13"}` to dependencies (and `:hackney` to applications in `mix.exs`)
Also, you need to recompile tesla after adding `:hackney` dependency:
```
mix deps.clean tesla
mix deps.compile tesla
```
## Example usage
```
# set globally in config/config.exs
config :tesla, :adapter, Tesla.Adapter.Hackney
# set per module
defmodule MyClient do
use Tesla
adapter Tesla.Adapter.Hackney
end
```
"""
@behaviour Tesla.Adapter
alias Tesla.Multipart
@impl Tesla.Adapter
def call(env, opts) do
with {:ok, status, headers, body} <- request(env, opts) do
{:ok, %{env | status: status, headers: format_headers(headers), body: format_body(body)}}
end
end
defp format_headers(headers) do
for {key, value} <- headers do
{String.downcase(to_string(key)), to_string(value)}
end
end
defp format_body(data) when is_list(data), do: IO.iodata_to_binary(data)
defp format_body(data) when is_binary(data) or is_reference(data), do: data
defp request(env, opts) do
request(
env.method,
Tesla.build_url(env.url, env.query),
env.headers,
env.body,
Tesla.Adapter.opts(env, opts)
)
end
defp request(method, url, headers, %Stream{} = body, opts),
do: request_stream(method, url, headers, body, opts)
defp request(method, url, headers, body, opts) when is_function(body),
do: request_stream(method, url, headers, body, opts)
defp request(method, url, headers, %Multipart{} = mp, opts) do
headers = headers ++ Multipart.headers(mp)
body = Multipart.body(mp)
request(method, url, headers, body, opts)
end
defp request(method, url, headers, body, opts) do
handle(:hackney.request(method, url, headers, body || '', opts))
end
defp request_stream(method, url, headers, body, opts) do
with {:ok, ref} <- :hackney.request(method, url, headers, :stream, opts) do
for data <- body, do: :ok = :hackney.send_body(ref, data)
handle(:hackney.start_response(ref))
else
e -> handle(e)
end
end
defp handle({:error, _} = error), do: error
defp handle({:ok, status, headers}), do: {:ok, status, headers, []}
defp handle({:ok, ref}) when is_reference(ref) do
handle_async_response({ref, %{status: nil, headers: nil}})
end
defp handle({:ok, status, headers, ref}) when is_reference(ref) do
with {:ok, body} <- :hackney.body(ref) do
{:ok, status, headers, body}
end
end
defp handle({:ok, status, headers, body}), do: {:ok, status, headers, body}
defp handle_async_response({ref, %{headers: headers, status: status}})
when not (is_nil(headers) or is_nil(status)) do
{:ok, status, headers, ref}
end
defp handle_async_response({ref, output}) do
receive do
{:hackney_response, ^ref, {:status, status, _}} ->
handle_async_response({ref, %{output | status: status}})
{:hackney_response, ^ref, {:headers, headers}} ->
handle_async_response({ref, %{output | headers: headers}})
end
end
end
end
|
lib/tesla/adapter/hackney.ex
| 0.845911 | 0.844152 |
hackney.ex
|
starcoder
|
defmodule Mnesiac.Store do
@moduledoc """
This module defines a mnesiac store and contains overridable callbacks.
"""
@doc """
This function returns the store's configuration as a keyword list.
For more information on the options supported here, see mnesia's documentation.
## Examples
```elixir
iex> store_options()
[attributes: [...], index: [:topic_id], disc_copies: [node()]]
```
**Note**: Defining `:record_name` in `store_options()` will set the mnesia table name to the same.
"""
@callback store_options() :: term
@doc """
This function is called by mnesiac either when it has no existing data to use or copy and will initialise a table
## Default Implementation
```elixir
def init_store do
:mnesia.create_table(Keyword.get(store_options(), :record_name, __MODULE__), store_options())
end
```
"""
@callback init_store() :: term
@doc """
This function is called by mnesiac when it joins a mnesia cluster and data for this store is found on the remote node in the cluster that is being connected to.
## Default Implementation
```elixir
def copy_store do
for type <- [:ram_copies, :disc_copies, :disc_only_copies] do
value = Keyword.get(store_options(), type, [])
if Enum.member?(value, node()) do
:mnesia.add_table_copy(Keyword.get(store_options(), :record_name, __MODULE__), node(), type)
end
end
end
```
"""
@callback copy_store() :: term
@doc ~S"""
This function is called by mnesiac when it has detected data for a table on both the local node and the remote node of the cluster it is connecting to.
## Default Implementation
```elixir
def resolve_conflict(cluster_node) do
table_name = Keyword.get(store_options(), :record_name, __MODULE__)
Logger.info(fn -> "[mnesiac:#{node()}] #{inspect(table_name)}: data found on both sides, copy aborted." end)
:ok
end
```
**Note**: The default implementation for this function is to do nothing.
"""
@callback resolve_conflict(node()) :: term
@optional_callbacks copy_store: 0, init_store: 0, resolve_conflict: 1
defmacro __using__(_) do
quote do
require Logger
@behaviour Mnesiac.Store
def init_store do
:mnesia.create_table(Keyword.get(store_options(), :record_name, __MODULE__), store_options())
end
def copy_store do
for type <- [:ram_copies, :disc_copies, :disc_only_copies] do
value = Keyword.get(store_options(), type, [])
if Enum.member?(value, node()) do
:mnesia.add_table_copy(Keyword.get(store_options(), :record_name, __MODULE__), node(), type)
end
end
end
def resolve_conflict(cluster_node) do
table_name = Keyword.get(store_options(), :record_name, __MODULE__)
Logger.info(fn -> "[mnesiac:#{node()}] #{inspect(table_name)}: data found on both sides, copy aborted." end)
:ok
end
defoverridable Mnesiac.Store
end
end
end
|
lib/mnesiac/store.ex
| 0.895891 | 0.881462 |
store.ex
|
starcoder
|
defmodule Resemblixir.Breakpoint do alias Resemblixir.{Scenario, Compare, Paths, Screenshot, MissingReferenceError}
defstruct [:pid, :owner, :name, :width, :ref, :scenario, result: {:error, :not_finished}]
@type result :: Compare.success | Compare.failure | {:error, :not_finished} | {:error, :timeout}
@type t :: %__MODULE__{
pid: pid,
owner: pid,
name: atom,
width: integer,
ref: String.t,
scenario: Scenario.t,
result: result
}
@spec run({name::atom, width::integer}, Scenario.t) :: __MODULE__.t
@doc """
Runs a single breakpoint asynchronously. The initial %Breakpoint{} struct will have {:error, :not_finished}.
Sends a message to the calling process with the result when the task is finished.
"""
def run({breakpoint_name, breakpoint_width}, %Scenario{url: url, folder: "/" <> _} = scenario)
when is_atom(breakpoint_name) and is_integer(breakpoint_width) and is_binary(url) do
args = %__MODULE__{name: breakpoint_name,
width: breakpoint_width,
scenario: scenario,
ref: ref_image_path(scenario, breakpoint_name),
owner: self()}
{:ok, pid} = GenServer.start_link(__MODULE__, args, name: server_name(scenario, breakpoint_name))
%{args | pid: pid}
end
@doc """
Awaits a breakpoint's results. Use as an alternative to listening for the result message in a separate process.
"""
def await(task, timeout \\ 5000)
def await(%__MODULE__{pid: nil}, _timeout) do
{:error, :no_pid}
end
def await(%__MODULE__{pid: pid, result: {:error, :not_finished}} = task, timeout) when is_pid(pid) do
receive do
%__MODULE__{result: {:ok, %Compare{}}} = result -> result
%__MODULE__{result: {:error, %Compare{}}} = result -> result
%__MODULE__{result: {:error, %MissingReferenceError{}}} = result -> result
after
timeout ->
send pid, :timeout
%{task | result: {:error, :timeout}}
end
end
@spec server_name(Scenario.t, atom) :: atom
@doc false
def server_name(%Scenario{name: name}, breakpoint_name) when is_atom(breakpoint_name) do
name
|> Macro.camelize()
|> String.to_atom()
|> Module.concat(breakpoint_name)
end
@spec init(__MODULE__.t) :: Compare.result
def init(%__MODULE__{} = args) do
args = %{args | pid: self()}
send self(), :start
{:ok, args}
end
def handle_info(:start, %__MODULE__{} = state) do
if File.exists?(state.ref) do
:ok = start(state)
{:noreply, state}
else
error = %MissingReferenceError{path: state.ref, breakpoint: state.name}
state = %{state | result: {:error, error}}
if state.owner, do: send state.owner, state
{:stop, :normal, state}
end
end
def handle_info({:result, {:ok, %Compare{} = result}}, %__MODULE__{} = state) do
state = %{state | result: {:ok, result}}
if state.owner, do: send state.owner, state
{:stop, :normal, state}
end
def handle_info({:result, {:error, %Compare{} = result}}, %__MODULE__{} = state) do
state = %{state | result: {:error, result}}
if state.owner, do: send state.owner, state
{:stop, :normal, state}
end
def handle_info(:timeout, %__MODULE__{} = state) do
state = %{state | result: {:error, :timeout}}
{:stop, :normal, state}
end
def handle_info(message, state) do
send state.owner, message
{:noreply, state}
end
def start(%__MODULE__{name: name, width: width, ref: ref, scenario: %Scenario{} = scenario}) do
scenario
|> Screenshot.take({name, width})
|> Compare.compare(scenario, name, ref)
end
def ref_image_path(%Scenario{name: scenario_name}, breakpoint_name)
when is_binary(scenario_name) and is_atom(breakpoint_name) do
scenario_name
|> breakpoint_file_name(breakpoint_name)
|> Paths.reference_file()
end
def test_image_path(%Scenario{name: scenario_name, folder: test_folder_name}, breakpoint_name)
when is_binary(scenario_name) and is_atom(breakpoint_name) do
Paths.test_file(test_folder_name, breakpoint_file_name(scenario_name, breakpoint_name))
end
defp breakpoint_file_name(scenario_name, breakpoint_name) do
scenario_name
|> String.replace(" ", "_")
|> String.downcase()
|> Kernel.<>("_#{breakpoint_name}.png")
end
end
|
lib/resemblixir/breakpoint.ex
| 0.793266 | 0.415492 |
breakpoint.ex
|
starcoder
|
import Realm.Semigroupoid.Algebra
defprotocol Realm.Arrow do
@moduledoc """
Arrows abstract the idea of computations, potentially with a context.
Arrows are in fact an abstraction above monads, and can be used both to
express all other type classes in Realm. They also enable some nice
flow-based reasoning about computation.
For a nice illustrated explination,
see [Haskell/Understanding arrows](https://en.wikibooks.org/wiki/Haskell/Understanding_arrows)
Arrows let you think diagrammatically, and is a powerful way of thinking
about flow programming, concurrency, and more.
┌---> f --------------------------┐
| v
input ---> split unsplit ---> result
| ^
| ┌--- h ---┐ |
| | v |
└---> g ---> split unsplit ---┘
| ^
└--- i ---┘
## Type Class
An instance of `Realm.Arrow` must also implement `Realm.Category`,
and define `Realm.Arrow.arrowize/2`.
Semigroupoid [compose/2, apply/2]
↓
Category [identity/1]
↓
Arrow [arrowize/2]
"""
@doc """
Lift a function into an arrow, much like how `of/2` does with data.
Essentially a label for composing functions end-to-end, where instances
may have their own special idea of what composition means. The simplest example
is a regular function. Others are possible, such as Kleisli arrows.
## Examples
iex> use Realm.Arrow
...> times_ten = arrowize(fn -> nil end, &(&1 * 10))
...> 5 |> Realm.pipe(times_ten)
50
"""
@spec arrowize(t(), fun()) :: t()
def arrowize(sample, fun)
end
defmodule Realm.Arrow.Algebra do
alias Realm.{Arrow, Semigroupoid}
@doc """
Swap positions of elements in a tuple.
## Examples
iex> swap({1, 2})
{2, 1}
"""
@spec swap({any(), any()}) :: {any(), any()}
def swap({x, y}), do: {y, x}
@doc """
Target the first element of a tuple.
## Examples
iex> import Realm.Arrow.Algebra
...> first(fn x -> x * 50 end).({1, 1})
{50, 1}
"""
@spec first(Arrow.t()) :: Arrow.t()
def first(arrow) do
Arrow.arrowize(arrow, fn {x, y} ->
{x |> pipe(arrow), y |> pipe(id_arrow(arrow))}
end)
end
@doc """
Target the second element of a tuple.
## Examples
iex> import Realm.Arrow.Algebra
...> second(fn x -> x * 50 end).({1, 1})
{1, 50}
"""
@spec second(Arrow.t()) :: Arrow.t()
def second(arrow) do
Arrow.arrowize(arrow, fn {x, y} ->
{x |> pipe(id_arrow(arrow)), y |> pipe(arrow)}
end)
end
@doc """
The identity function lifted into an arrow of the correct type.
## Examples
iex> id_arrow(fn -> nil end).(99)
99
"""
@spec id_arrow(Arrow.t()) :: (any() -> Arrow.t())
def id_arrow(sample), do: Arrow.arrowize(sample, &Quark.id/1)
@doc """
Copy a single value into both positions of a 2-tuple.
This is useful is you want to run functions on the input separately.
## Examples
iex> import Realm.Arrow.Algebra
...> split(42)
{42, 42}
iex> import Realm.Arrow.Algebra
...> 5
...> |> split()
...> |> (second(fn x -> x - 2 end)
...> <~> first(fn y -> y * 10 end)
...> <~> second(&inspect/1)).()
{50, "3"}
iex> import Realm.Arrow.Algebra
...> import Realm.Semigroupoid.Algebra
...> 5
...> |> split()
...> |> pipe(second(fn x -> x - 2 end))
...> |> pipe(first(fn y -> y * 10 end))
...> |> pipe(second(&inspect/1))
{50, "3"}
"""
@spec split(any()) :: {any(), any()}
def split(x), do: {x, x}
@doc """
Merge two tuple values with a combining function.
## Examples
iex> import Realm.Apply.Algebra
...> unsplit({1, 2}, &+/2)
3
"""
@spec unsplit({any(), any()}, (any(), any() -> any())) :: any()
def unsplit({x, y}, fun), do: fun.(x, y)
@doc """
Switch the associativity of a nested tuple. Helpful since many arrows act
on a subset of a tuple, and you may want to move portions in and out of that stream.
## Examples
iex> import Realm.Apply.Algebra
...> reassociate({1, {2, 3}})
{{1, 2}, 3}
iex> import Realm.Apply.Algebra
...> reassociate({{1, 2}, 3})
{1, {2, 3}}
"""
@spec reassociate({any(), {any(), any()}} | {{any(), any()}, any()}) ::
{{any(), any()}, any()} | {any(), {any(), any()}}
def reassociate({{a, b}, c}), do: {a, {b, c}}
def reassociate({a, {b, c}}), do: {{a, b}, c}
@doc """
Compose a function (left) with an arrow (right) to produce a new arrow.
## Examples
iex> import Realm.Apply.Algebra
...> f = precompose(
...> fn x -> x + 1 end,
...> Arrow.arrowize(fn _ -> nil end, fn y -> y * 10 end)
...> )
...> f.(42)
430
"""
@spec precompose(fun(), Arrow.t()) :: Arrow.t()
def precompose(fun, arrow), do: Semigroupoid.compose(Arrow.arrowize(arrow, fun), arrow)
@doc """
Compose an arrow (left) with a function (right) to produce a new arrow.
## Examples
iex> import Realm.Apply.Algebra
...> f = postcompose(
...> Arrow.arrowize(fn _ -> nil end, fn x -> x + 1 end),
...> fn y -> y * 10 end
...> )
...> f.(42)
430
"""
@spec postcompose(Arrow.t(), fun()) :: Arrow.t()
def postcompose(arrow, fun), do: Semigroupoid.compose(arrow, Arrow.arrowize(arrow, fun))
end
defimpl Realm.Arrow, for: Function do
use Quark
def arrowize(_, fun), do: curry(fun)
def first(arrow), do: fn {target, unchanged} -> {arrow.(target), unchanged} end
end
|
lib/realm/arrow.ex
| 0.840111 | 0.641998 |
arrow.ex
|
starcoder
|
defmodule ExVcf.Vcf.Info do
alias ExVcf.Vcf.Info
alias ExVcf.Vcf.HeaderLine
@header_type "INFO"
def type, do: @header_type
@reserved_info_keys MapSet.new([
"AA",
"AC",
"AF",
"AN",
"BQ",
"CIGAR",
"DB",
"DP",
"END",
"H2",
"H3",
"MQ",
"MQ0",
"NS",
"SB",
"SOMATIC",
"VALIDATED",
"1000G"
])
def reserved_info_keys, do: @reserved_info_keys
@doc ~S"""
## Examples
iex> ExVcf.Vcf.Info.new_string("NS", 1, "Number of Samples With Data")
%ExVcf.Vcf.HeaderLine{fields: [ID: "NS", Number: 1, Type: "String", "Description": "Number of Samples With Data"], key: "INFO", value: ""}
"""
def new_string(id, number, description, fields \\ []) do
HeaderLine.new(@header_type, [ID: id, Number: number, Type: HeaderLine.string, Description: description] ++ fields)
end
def new_integer(id, number, description, fields \\ []) do
HeaderLine.new(@header_type, [ID: id, Number: number, Type: HeaderLine.integer, Description: description] ++ fields)
end
def new_float(id, number, description, fields \\ []) do
HeaderLine.new(@header_type, [ID: id, Number: number, Type: HeaderLine.float, Description: description] ++ fields)
end
def new_flag(id, description, fields \\ []) do
HeaderLine.new(@header_type, [ID: id, Number: 0, Type: HeaderLine.flag, Description: description] ++ fields)
end
def new_character(id, number, description, fields \\ []) do
HeaderLine.new(@header_type, [ID: id, Number: number, Type: HeaderLine.character, Description: description] ++ fields)
end
def imprecise(), do: new_flag("IMPRECISE", "Imprecise structural variation")
def novel(), do: new_flag("NOVEL", "Indicates a novel structural variation")
def end_pos(), do: new_integer("END", 1, "End position of the variant described in this record")
def allele_depth(), do: new_integer("AD", "R", "Allele depth")
def allele_depth_forward(), do: new_integer("ADF", "R", "Allele depth forward")
def allele_depth_reverse(), do: new_integer("ADF", "R", "Allele depth reverse")
end
|
lib/vcf/info.ex
| 0.663342 | 0.421076 |
info.ex
|
starcoder
|
defmodule Construct.Type do
@moduledoc """
Type-coercion module, originally copied and modified from
[Ecto.Type](https://github.com/elixir-ecto/ecto/blob/master/lib/ecto/type.ex)
and behaviour to implement your own types.
## Defining custom types
defmodule CustomType do
@behaviour Construct.Type
def cast(value) do
{:ok, value}
end
end
"""
@type t :: builtin | custom | list(builtin | custom)
@type custom :: module | Construct.t
@type builtin :: :integer | :float | :boolean | :string |
:binary | :pid | :reference | :decimal | :utc_datetime |
:naive_datetime | :date | :time | :any |
:array | {:array, t} | :map | {:map, t} | :struct
@type cast_ret :: {:ok, term} | {:error, term} | :error
@builtin ~w(
integer float boolean string binary pid reference decimal
utc_datetime naive_datetime date time any array map struct
)a
@builtinc ~w(array map)a
@doc """
Casts the given input to the custom type.
"""
@callback cast(term) :: cast_ret
## Functions
def builtin do
@builtin
end
def builtinc do
@builtinc
end
@doc """
Checks if we have a primitive type.
iex> primitive?(:string)
true
iex> primitive?(Another)
false
iex> primitive?({:array, :string})
true
iex> primitive?({:array, Another})
true
iex> primitive?([Another, {:array, :integer}])
false
"""
@spec primitive?(t) :: boolean
def primitive?({type, _}) when type in @builtinc, do: true
def primitive?(type) when type in @builtin, do: true
def primitive?(_), do: false
@doc """
Casts a value to the given type.
iex> cast(:any, "whatever")
{:ok, "whatever"}
iex> cast(:any, nil)
{:ok, nil}
iex> cast(:string, nil)
:error
iex> cast(:integer, 1)
{:ok, 1}
iex> cast(:integer, "1")
{:ok, 1}
iex> cast(:integer, "1.0")
:error
iex> cast(:float, 1.0)
{:ok, 1.0}
iex> cast(:float, 1)
{:ok, 1.0}
iex> cast(:float, "1")
{:ok, 1.0}
iex> cast(:float, "1.0")
{:ok, 1.0}
iex> cast(:float, "1-foo")
:error
iex> cast(:boolean, true)
{:ok, true}
iex> cast(:boolean, false)
{:ok, false}
iex> cast(:boolean, "1")
{:ok, true}
iex> cast(:boolean, "0")
{:ok, false}
iex> cast(:boolean, "whatever")
:error
iex> cast(:string, "beef")
{:ok, "beef"}
iex> cast(:binary, "beef")
{:ok, "beef"}
iex> cast(:decimal, Decimal.from_float(1.0))
{:ok, Decimal.from_float(1.0)}
iex> cast(:decimal, Decimal.new("1.0"))
{:ok, Decimal.from_float(1.0)}
iex> cast(:decimal, 1.0)
{:ok, Decimal.from_float(1.0)}
iex> cast(:decimal, "1.0")
{:ok, Decimal.from_float(1.0)}
iex> cast({:array, :integer}, [1, 2, 3])
{:ok, [1, 2, 3]}
iex> cast({:array, :integer}, ["1", "2", "3"])
{:ok, [1, 2, 3]}
iex> cast({:array, :string}, [1, 2, 3])
:error
iex> cast(:string, [1, 2, 3])
:error
"""
@spec cast(t, term, options) :: cast_ret | any
when options: Keyword.t()
def cast({:array, type}, term, opts) when is_list(term) do
array(term, type, &cast/3, opts)
end
def cast({:map, type}, term, opts) when is_map(term) do
map(Map.to_list(term), type, &cast/3, %{}, opts)
end
def cast({typec, arg}, term, _opts) when typec not in @builtinc do
typec.castc(term, arg)
end
def cast(type, term, opts) when is_atom(type) do
cond do
not primitive?(type) ->
if Code.ensure_loaded?(type) && function_exported?(type, :cast, 2) do
type.cast(term, opts)
else
type.cast(term)
end
true ->
cast(type, term)
end
end
def cast(type, term, _opts) do
cast(type, term)
end
@doc """
Behaves like `cast/3`, but without options provided to nested types.
"""
@spec cast(t, term) :: cast_ret | any
def cast(type, term)
def cast(types, term) when is_map(types) do
if is_map(term) or is_list(term) do
Construct.Cast.make(types, term)
else
:error
end
end
def cast(types, term) when is_list(types) do
Enum.reduce(types, {:ok, term}, fn
(type, {:ok, term}) -> cast(type, term)
(_, ret) -> ret
end)
end
def cast({:array, type}, term) when is_list(term) do
array(term, type, &cast/3, [])
end
def cast({:map, type}, term) when is_map(term) do
map(Map.to_list(term), type, &cast/3, %{}, [])
end
def cast({typec, arg}, term) when typec not in @builtinc do
typec.castc(term, arg)
end
def cast(:float, term) when is_binary(term) do
case Float.parse(term) do
{float, ""} -> {:ok, float}
_ -> :error
end
end
def cast(:float, term) when is_integer(term), do: {:ok, :erlang.float(term)}
def cast(:boolean, term) when term in ~w(true 1), do: {:ok, true}
def cast(:boolean, term) when term in ~w(false 0), do: {:ok, false}
def cast(:pid, term) when is_pid(term), do: {:ok, term}
def cast(:reference, term) when is_reference(term), do: {:ok, term}
def cast(:decimal, term) when is_binary(term) do
validate_decimal(apply(Decimal, :parse, [term]))
end
def cast(:decimal, term) when is_integer(term) do
{:ok, apply(Decimal, :new, [term])}
end
def cast(:decimal, term) when is_float(term) do
{:ok, apply(Decimal, :from_float, [term])}
end
def cast(:decimal, %{__struct__: Decimal} = term) do
validate_decimal({:ok, term})
end
def cast(:date, term) do
cast_date(term)
end
def cast(:time, term) do
cast_time(term)
end
def cast(:naive_datetime, term) do
cast_naive_datetime(term)
end
def cast(:utc_datetime, term) do
cast_utc_datetime(term)
end
def cast(:integer, term) when is_binary(term) do
case Integer.parse(term) do
{int, ""} -> {:ok, int}
_ -> :error
end
end
def cast(type, term) do
cond do
not primitive?(type) ->
type.cast(term)
of_base_type?(type, term) ->
{:ok, term}
true ->
:error
end
end
## Date
defp cast_date(binary) when is_binary(binary) do
case Date.from_iso8601(binary) do
{:ok, _} = ok ->
ok
{:error, _} ->
case NaiveDateTime.from_iso8601(binary) do
{:ok, naive_datetime} -> {:ok, NaiveDateTime.to_date(naive_datetime)}
{:error, _} -> :error
end
end
end
defp cast_date(%{"year" => empty, "month" => empty, "day" => empty}) when empty in ["", nil],
do: {:ok, nil}
defp cast_date(%{year: empty, month: empty, day: empty}) when empty in ["", nil],
do: {:ok, nil}
defp cast_date(%{"year" => year, "month" => month, "day" => day}),
do: cast_date(to_i(year), to_i(month), to_i(day))
defp cast_date(%{year: year, month: month, day: day}),
do: cast_date(to_i(year), to_i(month), to_i(day))
defp cast_date(_),
do: :error
defp cast_date(year, month, day) when is_integer(year) and is_integer(month) and is_integer(day) do
case Date.new(year, month, day) do
{:ok, _} = ok -> ok
{:error, _} -> :error
end
end
defp cast_date(_, _, _),
do: :error
## Time
defp cast_time(<<hour::2-bytes, ?:, minute::2-bytes>>),
do: cast_time(to_i(hour), to_i(minute), 0, nil)
defp cast_time(binary) when is_binary(binary) do
case Time.from_iso8601(binary) do
{:ok, _} = ok -> ok
{:error, _} -> :error
end
end
defp cast_time(%{"hour" => empty, "minute" => empty}) when empty in ["", nil],
do: {:ok, nil}
defp cast_time(%{hour: empty, minute: empty}) when empty in ["", nil],
do: {:ok, nil}
defp cast_time(%{"hour" => hour, "minute" => minute} = map),
do: cast_time(to_i(hour), to_i(minute), to_i(Map.get(map, "second")), to_i(Map.get(map, "microsecond")))
defp cast_time(%{hour: hour, minute: minute, second: second, microsecond: {microsecond, precision}}),
do: cast_time(to_i(hour), to_i(minute), to_i(second), {to_i(microsecond), to_i(precision)})
defp cast_time(%{hour: hour, minute: minute} = map),
do: cast_time(to_i(hour), to_i(minute), to_i(Map.get(map, :second)), to_i(Map.get(map, :microsecond)))
defp cast_time(_),
do: :error
defp cast_time(hour, minute, sec, usec) when is_integer(usec) do
cast_time(hour, minute, sec, {usec, 6})
end
defp cast_time(hour, minute, sec, nil) do
cast_time(hour, minute, sec, {0, 0})
end
defp cast_time(hour, minute, sec, {usec, precision})
when is_integer(hour) and is_integer(minute) and
(is_integer(sec) or is_nil(sec)) and is_integer(usec) and is_integer(precision) do
case Time.new(hour, minute, sec || 0, {usec, precision}) do
{:ok, _} = ok -> ok
{:error, _} -> :error
end
end
defp cast_time(_, _, _, _) do
:error
end
## Naive datetime
defp cast_naive_datetime(nil) do
:error
end
defp cast_naive_datetime(binary) when is_binary(binary) do
case NaiveDateTime.from_iso8601(binary) do
{:ok, _} = ok -> ok
{:error, _} -> :error
end
end
defp cast_naive_datetime(%{"year" => empty, "month" => empty, "day" => empty,
"hour" => empty, "minute" => empty}) when empty in ["", nil],
do: {:ok, nil}
defp cast_naive_datetime(%{year: empty, month: empty, day: empty,
hour: empty, minute: empty}) when empty in ["", nil],
do: {:ok, nil}
defp cast_naive_datetime(%{} = map) do
with {:ok, date} <- cast_date(map),
{:ok, time} <- cast_time(map) do
NaiveDateTime.new(date, time)
end
end
defp cast_naive_datetime(_) do
:error
end
## UTC datetime
defp cast_utc_datetime(binary) when is_binary(binary) do
case DateTime.from_iso8601(binary) do
{:ok, datetime, _offset} -> {:ok, datetime}
{:error, :missing_offset} ->
case NaiveDateTime.from_iso8601(binary) do
{:ok, naive_datetime} -> {:ok, DateTime.from_naive!(naive_datetime, "Etc/UTC")}
{:error, _} -> :error
end
{:error, _} -> :error
end
end
defp cast_utc_datetime(%DateTime{time_zone: "Etc/UTC"} = datetime), do: {:ok, datetime}
defp cast_utc_datetime(%DateTime{} = datetime) do
case (datetime |> DateTime.to_unix() |> DateTime.from_unix()) do
{:ok, _} = ok -> ok
{:error, _} -> :error
end
end
defp cast_utc_datetime(value) do
case cast_naive_datetime(value) do
{:ok, %NaiveDateTime{} = naive_datetime} ->
{:ok, DateTime.from_naive!(naive_datetime, "Etc/UTC")}
{:ok, _} = ok ->
ok
:error ->
:error
end
end
## Helpers
defp validate_decimal({:ok, %{__struct__: Decimal, coef: coef}}) when coef in [:inf, :qNaN, :sNaN],
do: :error
defp validate_decimal(value),
do: value
# Checks if a value is of the given primitive type.
defp of_base_type?(:any, _), do: true
defp of_base_type?(:float, term), do: is_float(term)
defp of_base_type?(:integer, term), do: is_integer(term)
defp of_base_type?(:boolean, term), do: is_boolean(term)
defp of_base_type?(:binary, term), do: is_binary(term)
defp of_base_type?(:string, term), do: is_binary(term)
defp of_base_type?(:map, term), do: is_map(term) and not Map.has_key?(term, :__struct__)
defp of_base_type?(:struct, term), do: is_map(term) and Map.has_key?(term, :__struct__)
defp of_base_type?(:decimal, value), do: match?(%{__struct__: Decimal}, value)
defp of_base_type?(_, _), do: false
defp array(term, type, fun, opts) do
if Keyword.get(opts, :error_values) do
array_acc_err(term, type, fun, {false, []}, opts)
else
array_acc(term, type, fun, [], opts)
end
end
defp array_acc_err([h|t], type, fun, {has_errors?, acc}, opts) do
case fun.(type, h, opts) do
{:ok, h} -> array_acc_err(t, type, fun, {has_errors?, [h|acc]}, opts)
{:error, reason} -> array_acc_err(t, type, fun, {true, [{{:error, reason}, h}|acc]}, opts)
:error -> array_acc_err(t, type, fun, {true, [{{:error, :error}, h}|acc]}, opts)
end
end
defp array_acc_err([], _type, _fun, {false, acc}, _opts) do
{:ok, Enum.reverse(acc)}
end
defp array_acc_err([], _type, _fun, {true, acc}, _opts) do
errors =
acc
|> Enum.reverse()
|> Enum.with_index()
|> Enum.reduce([], fn
({{{:error, reason}, _}, index}, acc) when is_map(reason) -> [%{index: index, error: reason} | acc]
({{{:error, reason}, value}, index}, acc) -> [%{index: index, error: reason, value: value} | acc]
({_value, _index}, acc) -> acc
end)
|> Enum.reverse()
{:error, errors}
end
defp array_acc([h|t], type, fun, acc, opts) do
case fun.(type, h, opts) do
{:ok, h} -> array_acc(t, type, fun, [h|acc], opts)
{:error, reason} -> {:error, reason}
:error -> :error
end
end
defp array_acc([], _type, _fun, acc, _opts) do
{:ok, Enum.reverse(acc)}
end
defp map(list, type, fun, acc, opts \\ [])
defp map([{key, value} | t], type, fun, acc, opts) do
case fun.(type, value, opts) do
{:ok, value} -> map(t, type, fun, Map.put(acc, key, value))
{:error, reason} -> {:error, reason}
:error -> :error
end
end
defp map([], _type, _fun, acc, _opts) do
{:ok, acc}
end
defp to_i(nil), do: nil
defp to_i(int) when is_integer(int), do: int
defp to_i(bin) when is_binary(bin) do
case Integer.parse(bin) do
{int, ""} -> int
_ -> nil
end
end
end
|
lib/construct/type.ex
| 0.884133 | 0.477615 |
type.ex
|
starcoder
|
defmodule Solution do
def factorize(1, _), do: [1]
def factorize(n ,t) do
num = case div(t, n) do
0 -> []
x -> [x]
end
cond do
rem(n, 2) == 0 ->
[n] ++ num ++ (div(n,2) |> factorize(t))
rem(n, 3) == 0 ->
[n] ++ num ++ (div(n,3) |> factorize(t))
true -> [n] ++ factorize(n-1, t)
end
end
def is_prime(n) do
half = cond do
rem(n, 2) == 0 -> div(n, 2)
rem(n, 3) == 0 -> div(n, 3)
true -> :nil
end
res = case is_integer(half) do
false -> [n, 1]
true -> [n] ++ [div(n, half)] ++ factorize(half, n)
end
case (res |> Enum.uniq |> length) > 2 do
true -> IO.puts "Not prime"
false -> IO.puts "Prime"
end
end
def run do
r = IO.gets("") |> String.trim |> String.to_integer
Enum.map(Range.new(1, r), fn(_) ->
IO.gets("") |> String.trim |> String.to_integer |> is_prime
end)
end
end
# Solution.run
defmodule BetterSolution do
def is_prime(n, n_sqrt, lo \\ 2)
def is_prime(_, n_sqrt, hi) when hi > n_sqrt, do: []
def is_prime(n, n_sqrt, lo) do
case rem(n, lo) do
0 -> [lo] ++ is_prime(n, n_sqrt, lo + 1)
_ -> is_prime(n, n_sqrt, lo + 1)
end
end
def find_prime(1), do: IO.puts "Not prime"
def find_prime(2), do: IO.puts "Prime"
def find_prime(n) do
n_sqrt = n |> :math.sqrt |> :math.ceil |> round
case [n] ++ is_prime(n, n_sqrt) ++ [1] do
[n, 1] -> IO.puts "Prime"
_ -> IO.puts "Not prime"
end
end
def run do
r = IO.gets("") |> String.trim |> String.to_integer
Enum.map(Range.new(1, r), fn(_) ->
IO.gets("") |> String.trim |> String.to_integer |> find_prime
end)
end
end
BetterSolution.run
|
math/is_prime.ex
| 0.536313 | 0.701764 |
is_prime.ex
|
starcoder
|
defmodule PersianCalendar do
@moduledoc """
convert shamsi/milady dates
"""
@doc """
returns shamsi date from given milady date in format {year, month, day}
"""
@spec from_milady({number, number, number}) :: {number, number, number}
def from_milady({year, month, day}) do
is_leap_year? = rem(year, 4) == 0
second_month_days = if is_leap_year?, do: 29, else: 28 # on leap years it has 29 days
milady_month_days = [31,second_month_days,31,30,31,30,31,31,30,31,30,31]
shamsi_month_days = [31,31,31,31,31,31,30,30,30,30,30,29]
days_year = (year - 1) * 365
days_from_year_start = day + Enum.reduce( Enum.take(milady_month_days,month - 1), 0, fn acc,sum -> acc+sum end)
leap_years_day = trunc((year-1) / 4)
total_milady_days = days_year + leap_years_day + days_from_year_start
shamsi_days = total_milady_days - 226899 # differnce in shamsi and milady days on calendar
leap_years_day = trunc(shamsi_days / (365 * 4)) # calculate total shamsi leap years (and 1 day for each leap year)
shamsi_days = shamsi_days - leap_years_day
year = trunc(shamsi_days / 365) + 1
remaining_days = rem shamsi_days, 365
month = trunc(remaining_days / 30) + 1
shamsi_days_from_year_start = Enum.reduce( Enum.take(shamsi_month_days,month - 1), 0, fn acc,sum -> acc+sum end)
remaining_days = remaining_days - shamsi_days_from_year_start
day = rem remaining_days, 30
{year, month, day}
end
@doc """
returns milady date from given shamsi date in format {year, month, day}
"""
@spec from_shamsi({number, number, number}) :: {number, number, number}
def from_shamsi({year, month, day}) do
shamsi_month_days = [31,31,31,31,31,31,30,30,30,30,30,29]
days_year = (year - 1) * 365
days_from_year_start = day + Enum.reduce( Enum.take(shamsi_month_days,month - 1), 0, fn acc,sum -> acc+sum end)
leap_years_day = trunc((year-1) / 4)
total_shamsi_days = days_year + leap_years_day + days_from_year_start
milady_days = total_shamsi_days + 226899 # differnce in shamsi and milady days on calendar
leap_years_day = trunc(milady_days / (365 * 4)) # calculate total milady leap years (and 1 day for each leap year)
milady_days = milady_days - leap_years_day
year = trunc(milady_days / 365) + 1
is_leap_year? = rem(year, 4) == 0
second_month_days = if is_leap_year?, do: 29, else: 28 # on leap years it has 29 days
milady_month_days = [31,second_month_days,31,30,31,30,31,31,30,31,30,31]
remaining_days = rem milady_days, 365
month = trunc(remaining_days / 30) + 1
milady_days_from_year_start = Enum.reduce( Enum.take(milady_month_days,month - 1), 0, fn acc,sum -> acc+sum end)
remaining_days = remaining_days - milady_days_from_year_start
day = rem remaining_days, 30
{year, month, day}
end
@doc """
returns local shamsi date in format {year, month, day}
"""
@spec local_date :: {number, number, number}
def local_date do
{{year,month,day},{_,_,_}} = :calendar.local_time
from_milady {year,month,day}
end
end
|
lib/persian_calendar.ex
| 0.795142 | 0.523664 |
persian_calendar.ex
|
starcoder
|
defmodule Explorer.Shared do
# A collection of **private** helpers shared in Explorer.
@moduledoc false
@doc """
All supported dtypes.
"""
def dtypes, do: [:float, :integer, :boolean, :string, :date, :datetime]
@doc """
Gets the backend from a `Keyword.t()` or `nil`.
"""
def backend_from_options!(opts) do
case Keyword.fetch(opts, :backend) do
{:ok, backend} when is_atom(backend) ->
backend
{:ok, other} ->
raise ArgumentError,
":backend must be an atom, got: #{inspect(other)}"
:error ->
nil
end
end
@doc """
Gets the implementation of a dataframe or series.
"""
def impl!(%{data: %struct{}}), do: struct
def impl!([%{data: %first_struct{}} | _] = dfs) when is_list(dfs),
do: Enum.reduce(dfs, first_struct, fn %{data: %struct{}}, acc -> pick_struct(acc, struct) end)
def impl!(%{data: %struct1{}}, %{data: %struct2{}}),
do: pick_struct(struct1, struct2)
@doc """
Applies a function with args using the implementation of a dataframe or series.
"""
def apply_impl(df_or_series, fun, args \\ []) do
impl = impl!(df_or_series)
apply(impl, fun, [df_or_series | args])
end
@doc """
Gets the implementation of a list of maybe dataframes or series.
"""
def find_impl!(list) do
Enum.reduce(list, fn
%{data: %struct{}}, acc -> pick_struct(struct, acc)
_, acc -> acc
end)
end
defp pick_struct(struct, struct), do: struct
defp pick_struct(struct1, struct2) do
raise "cannot invoke Explorer function because it relies on two incompatible implementations: " <>
"#{inspect(struct1)} and #{inspect(struct2)}. You may need to call Explorer.backend_transfer/1 " <>
"(or Explorer.backend_copy/1) on one or both of them to transfer them to a common implementation"
end
@doc """
Gets the `dtype` of a list or raise error if not possible.
"""
def check_types!(list) do
type =
Enum.reduce(list, nil, fn el, type ->
new_type = type(el, type) || type
cond do
new_type == :numeric and type in [:float, :integer] ->
new_type
new_type != type and type != nil ->
raise ArgumentError,
"the value #{inspect(el)} does not match the inferred series dtype #{inspect(type)}"
true ->
new_type
end
end)
type || :float
end
defp type(item, type) when is_integer(item) and type == :float, do: :numeric
defp type(item, type) when is_float(item) and type == :integer, do: :numeric
defp type(item, type) when is_number(item) and type == :numeric, do: :numeric
defp type(item, _type) when is_integer(item), do: :integer
defp type(item, _type) when is_float(item), do: :float
defp type(item, _type) when is_boolean(item), do: :boolean
defp type(item, _type) when is_binary(item), do: :string
defp type(%Date{} = _item, _type), do: :date
defp type(%NaiveDateTime{} = _item, _type), do: :datetime
defp type(item, _type) when is_nil(item), do: nil
defp type(item, _type), do: raise(ArgumentError, "unsupported datatype: #{inspect(item)}")
@doc """
Downcasts lists of mixed numeric types (float and int) to float.
"""
def cast_numerics(list, type) when type == :numeric do
data =
Enum.map(list, fn
nil -> nil
item -> item / 1
end)
{data, :float}
end
def cast_numerics(list, type), do: {list, type}
@doc """
Helper for shared behaviour in inspect.
"""
def to_string(i, _opts) when is_nil(i), do: "nil"
def to_string(i, _opts) when is_binary(i), do: "\"#{i}\""
def to_string(i, _opts), do: Kernel.to_string(i)
end
|
lib/explorer/shared.ex
| 0.880296 | 0.556098 |
shared.ex
|
starcoder
|
defmodule Sanbase.Influxdb.Measurement do
@moduledoc ~S"""
Module, defining the structure and common parts of a influxdb measurement
"""
defstruct [:timestamp, :fields, :tags, :name]
alias __MODULE__
alias Sanbase.ExternalServices.Coinmarketcap
alias Sanbase.Model.Project
@doc ~s"""
Converts the measurement to a format that the Influxdb and the Instream library
understand.
The timestamp should be either a DateTime struct or timestamp in nanoseconds.
"""
def convert_measurement_for_import(nil), do: nil
def convert_measurement_for_import(%Measurement{
timestamp: timestamp,
fields: fields,
tags: tags,
name: name
})
when %{} != fields do
%{
points: [
%{
measurement: name,
fields: fields,
tags: tags || [],
timestamp: timestamp |> format_timestamp()
}
]
}
end
def get_timestamp(%Measurement{timestamp: %DateTime{} = datetime}) do
DateTime.to_unix(datetime, :nanosecond)
end
def get_timestamp(%Measurement{timestamp: ts}), do: ts
def get_datetime(%Measurement{timestamp: %DateTime{} = datetime}) do
datetime
end
def get_datetime(%Measurement{timestamp: ts}) do
DateTime.from_unix!(ts, :nanosecond)
end
def name_from(%Sanbase.Model.Project{ticker: ticker, slug: slug})
when nil != ticker and nil != slug do
ticker <> "_" <> slug
end
def name_from(%Coinmarketcap.Ticker{symbol: ticker, id: coinmarketcap_id})
when nil != ticker and nil != coinmarketcap_id do
ticker <> "_" <> coinmarketcap_id
end
def name_from(_), do: nil
def name_from_slug(slug) when is_nil(slug), do: nil
def name_from_slug(slug) do
case Project.ticker_by_slug(slug) do
ticker when is_binary(ticker) -> ticker <> "_" <> slug
_ -> nil
end
end
@doc ~s"""
convert a list of slugs to measurement-slug map
"""
def names_from_slugs(slugs) when is_list(slugs) do
measurement_slug_map =
Project.tickers_by_slug_list(slugs)
|> Enum.map(fn {ticker, slug} -> {ticker <> "_" <> slug, slug} end)
|> Map.new()
{:ok, measurement_slug_map}
end
# Private functions
defp format_timestamp(%DateTime{} = datetime) do
DateTime.to_unix(datetime, :nanosecond)
end
defp format_timestamp(ts) when is_integer(ts), do: ts
end
|
lib/sanbase/influxdb/measurement.ex
| 0.765769 | 0.517693 |
measurement.ex
|
starcoder
|
defmodule Circuits.I2C do
@moduledoc """
`Circuits.I2C` lets you communicate with hardware devices using the I2C
protocol.
"""
alias Circuits.I2C.Nif
# Public API
@typedoc """
I2C device address
This is a "7-bit" address for the device. Some devices specify an "8-bit"
address in their documentation. You can tell if you have an "8-bit" address
if it's greater than 127 (0x7f) or if the documentation talks about different
read and write addresses. If you have an 8-bit address, divide it by 2.
"""
@type address() :: 0..127
@typedoc """
I2C bus
Call `open/1` to obtain an I2C bus reference and then pass it to the read
and write functions for interacting with devices.
"""
@type bus() :: reference()
@typedoc """
Function to report back whether a device is present
See `discover/2` for how a custom function can improve device detection when
the type of device being looked for is known.
"""
@type present?() :: (bus(), address() -> boolean())
@type opt() :: {:retries, non_neg_integer()}
@doc """
Open an I2C bus
I2C bus names depend on the platform. Names are of the form "i2c-n" where the
"n" is the bus number. The correct bus number can be found in the
documentation for the device or on a schematic. Another option is to call
`Circuits.I2C.bus_names/0` to list them for you.
I2c buses may be opened more than once. There is no need to share an I2C bus
reference between modules.
On success, this returns a reference to the I2C bus. Use the reference in
subsequent calls to read and write I2C devices
"""
@spec open(binary() | charlist()) :: {:ok, bus()} | {:error, term()}
def open(bus_name) do
Nif.open(to_charlist(bus_name))
end
@doc """
Initiate a read transaction to the I2C device at the specified `address`
Options:
* :retries - number of retries before failing (defaults to no retries)
"""
@spec read(bus(), address(), pos_integer(), [opt()]) :: {:ok, binary()} | {:error, term()}
def read(i2c_bus, address, bytes_to_read, opts \\ []) do
retries = Keyword.get(opts, :retries, 0)
retry(fn -> Nif.read(i2c_bus, address, bytes_to_read) end, retries)
end
@doc """
Initiate a read transaction and raise on error
"""
@spec read!(bus(), address(), pos_integer(), [opt()]) :: binary()
def read!(i2c_bus, address, bytes_to_read, opts \\ []) do
retries = Keyword.get(opts, :retries, 0)
retry!(fn -> Nif.read(i2c_bus, address, bytes_to_read) end, retries)
end
@doc """
Write `data` to the I2C device at `address`.
Options:
* :retries - number of retries before failing (defaults to no retries)
"""
@spec write(bus(), address(), iodata(), [opt()]) :: :ok | {:error, term()}
def write(i2c_bus, address, data, opts \\ []) do
retries = Keyword.get(opts, :retries, 0)
data_as_binary = IO.iodata_to_binary(data)
retry(fn -> Nif.write(i2c_bus, address, data_as_binary) end, retries)
end
@doc """
Write `data` to the I2C device at `address` and raise on error
Options:
* :retries - number of retries before failing (defaults to no retries)
"""
@spec write!(bus(), address(), iodata(), [opt()]) :: :ok
def write!(i2c_bus, address, data, opts \\ []) do
retries = Keyword.get(opts, :retries, 0)
data_as_binary = IO.iodata_to_binary(data)
retry!(fn -> Nif.write(i2c_bus, address, data_as_binary) end, retries)
end
@doc """
Write `data` to an I2C device and then immediately issue a read.
This function is useful for devices that want you to write the "register"
location to them first and then issue a read to get its contents. Many
devices operate this way and this function will issue the commands
back-to-back on the I2C bus. Some I2C devices actually require that the read
immediately follows the write. If the target supports this, the I2C
transaction will be issued that way. On the Raspberry Pi, this can be enabled
globally with `File.write!("/sys/module/i2c_bcm2708/parameters/combined", "1")`
Options:
* :retries - number of retries before failing (defaults to no retries)
"""
@spec write_read(bus(), address(), iodata(), pos_integer(), [opt()]) ::
{:ok, binary()} | {:error, term()}
def write_read(i2c_bus, address, write_data, bytes_to_read, opts \\ []) do
retries = Keyword.get(opts, :retries, 0)
data_as_binary = IO.iodata_to_binary(write_data)
retry(fn -> Nif.write_read(i2c_bus, address, data_as_binary, bytes_to_read) end, retries)
end
@doc """
Write `data` to an I2C device and then immediately issue a read. Raise on errors.
Options:
* :retries - number of retries before failing (defaults to no retries)
"""
@spec write_read!(bus(), address(), iodata(), pos_integer(), [opt()]) :: binary()
def write_read!(i2c_bus, address, write_data, bytes_to_read, opts \\ []) do
retries = Keyword.get(opts, :retries, 0)
data_as_binary = IO.iodata_to_binary(write_data)
retry!(fn -> Nif.write_read(i2c_bus, address, data_as_binary, bytes_to_read) end, retries)
end
@doc """
close the I2C bus
"""
@spec close(bus()) :: :ok
def close(i2c_bus) do
Nif.close(i2c_bus)
end
@doc """
Return a list of available I2C bus names. If nothing is returned, it's
possible that the kernel driver for that I2C bus is not enabled or the
kernel's device tree is not configured. On Raspbian, run `raspi-config` and
look in the advanced options.
```elixir
iex> Circuits.I2C.bus_names()
["i2c-1"]
```
"""
if Mix.env() != :test do
@spec bus_names() :: [binary()]
def bus_names() do
Path.wildcard("/dev/i2c-*")
|> Enum.map(fn p -> String.replace_prefix(p, "/dev/", "") end)
end
else
# Return a hardcoded set of I2C bus names for test purposes
def bus_names() do
["i2c-test-0", "i2c-test-1"]
end
end
@doc """
Scan the I2C bus for devices by performing a read at each device address and
returning a list of device addresses that respond.
WARNING: This is intended to be a debugging aid. Reading bytes from devices
can advance internal state machines and might cause them to get out of sync
with other code.
```elixir
iex> Circuits.I2C.detect_devices("i2c-1")
[4]
```
The return value is a list of device addresses that were detected on the
specified I2C bus. If you get back `'Hh'` or other letters, then IEx
converted the list to an Erlang string. Run `i v()` to get information about
the return value and look at the raw string representation for addresses.
If you already have a reference to an open device, then you may pass its
`reference` to `detect_devices/1` instead.
"""
@spec detect_devices(bus() | binary()) :: [address()] | {:error, term()}
def detect_devices(i2c_bus) when is_reference(i2c_bus) do
Enum.filter(0x03..0x77, &device_present?(i2c_bus, &1))
end
def detect_devices(bus_name) when is_binary(bus_name) do
case open(bus_name) do
{:ok, i2c_bus} ->
devices = detect_devices(i2c_bus)
close(i2c_bus)
devices
error ->
error
end
end
@doc """
Convenience method to scan all I2C buses for devices
This is only intended to be called from the IEx prompt. Programs should
use `detect_devices/1`.
"""
def detect_devices() do
buses = bus_names()
total_devices = Enum.reduce(buses, 0, &detect_and_print/2)
IO.puts("#{total_devices} devices detected on #{length(buses)} I2C buses")
:"do not show this result in output"
end
@doc """
Scan all I2C buses for one or more devices
This function takes a list of possible addresses and an optional detection
function. It only scans addresses in the possible addresses list to avoid
disturbing unrelated I2C devices.
If a detection function is not passed in, a default one that performs a
simple read and checks whether it succeeds is used. If the desired device has
an ID register or other means of identification, the optional function should
try to query that. If passing a custom function, be sure to return `false`
rather than raise if there are errors.
A list of bus name and address tuples is returned. The list may be empty.
See also `discover_one/2`.
"""
@spec discover([address()], present?()) :: [{binary(), address()}]
def discover(possible_addresses, present? \\ &device_present?/2) do
Enum.flat_map(bus_names(), &discover(&1, possible_addresses, present?))
end
@spec discover(binary(), [address()], present?()) :: [{binary(), address()}]
defp discover(bus_name, possible_addresses, present?) when is_binary(bus_name) do
case open(bus_name) do
{:ok, i2c_bus} ->
possible_addresses
|> Enum.filter(fn address -> present?.(i2c_bus, address) end)
|> Enum.map(&{bus_name, &1})
{:error, reason} ->
raise "I2C discovery error: Opening #{bus_name} failed with #{reason}"
end
end
@doc """
Scans all I2C buses for one specific device
This function and `discover_one!/2` are convenience functions for the use
case of helping a user find a specific device. They both call `discover/2` with
a list of possible I2C addresses and an optional function for checking whether
the device is present.
This function returns an `:ok` or `:error` tuple depending on whether one and
only one device was found. See `discover_one!/2` for the raising version.
"""
@spec discover_one([address()], present?()) ::
{:ok, {binary(), address()}} | {:error, :not_found | :multiple_possible_matches}
def discover_one(possible_addresses, present? \\ &device_present?/2) do
case discover(possible_addresses, present?) do
[actual_device] -> {:ok, actual_device}
[] -> {:error, :not_found}
_ -> {:error, :multiple_possible_matches}
end
end
@doc """
Same as `discover_one/2` but raises on error
"""
@spec discover_one!([address()], present?()) :: {binary(), address()}
def discover_one!(possible_addresses, present? \\ &device_present?/2) do
case discover_one(possible_addresses, present?) do
{:ok, actual_device} -> actual_device
{:error, reason} -> raise "I2C discovery error: #{inspect(reason)}"
end
end
defp detect_and_print(bus_name, count) do
IO.puts("Devices on I2C bus \"#{bus_name}\":")
devices = detect_devices(bus_name)
Enum.each(devices, &IO.puts(" * #{&1} (0x#{Integer.to_string(&1, 16)})"))
IO.puts("")
count + length(devices)
end
@doc """
Return whether a device is present
This function performs a simplistic check for an I2C device on the specified
bus and address. It's not perfect, but works enough to be useful. Be warned
that it does perform an I2C read on the specified address and this may cause
some devices to actually do something.
"""
@spec device_present?(bus(), address()) :: boolean()
def device_present?(i2c, address) do
case read(i2c, address, 1) do
{:ok, _} -> true
_ -> false
end
end
@doc """
Return info about the low level I2C interface
This may be helpful when debugging I2C issues.
"""
@spec info() :: map()
defdelegate info(), to: Nif
defp retry!(fun, times) do
case retry(fun, times) do
{:error, reason} ->
raise "I2C failure: " <> to_string(reason)
:ok ->
:ok
{:ok, result} ->
result
end
end
defp retry(fun, 0), do: fun.()
defp retry(fun, times) when times > 0 do
case fun.() do
{:error, _reason} -> retry(fun, times - 1)
result -> result
end
end
defmodule :circuits_i2c do
@moduledoc """
Provide an Erlang friendly interface to Circuits
Example Erlang code: circuits_i2c:open("i2c-1")
"""
defdelegate open(bus_name), to: Circuits.I2C
defdelegate read(ref, address, count), to: Circuits.I2C
defdelegate read(ref, address, count, opts), to: Circuits.I2C
defdelegate write(ref, address, data), to: Circuits.I2C
defdelegate write(ref, address, data, opts), to: Circuits.I2C
defdelegate write_read(ref, address, write_data, read_count), to: Circuits.I2C
defdelegate write_read(ref, address, write_data, read_count, opts), to: Circuits.I2C
defdelegate close(ref), to: Circuits.I2C
end
end
|
lib/i2c.ex
| 0.873296 | 0.633609 |
i2c.ex
|
starcoder
|
defmodule ApiWeb.StopController do
use ApiWeb.Web, :api_controller
alias ApiWeb.LegacyStops
alias State.Stop
plug(ApiWeb.Plugs.ValidateDate)
@filters ~w(id date direction_id latitude longitude radius route route_type location_type service)s
@pagination_opts ~w(offset limit order_by distance)a
@includes ~w(child_stops connecting_stops facilities parent_station recommended_transfers route)
@show_includes ~w(child_stops connecting_stops facilities parent_station recommended_transfers)
@nodoc_includes ~w(recommended_transfers)
def state_module, do: State.Stop.Cache
swagger_path :index do
get(path(__MODULE__, :index))
description("""
List stops.
#{swagger_path_description("/data/{index}")}
### Nearby
The `filter[latitude]` and `filter[longitude]` can be used together to find any stops near that latitude and \
longitude. The distance is in degrees as if latitude and longitude were on a flat 2D plane and normal \
Pythagorean distance was calculated. Over the region MBTA serves, `0.02` degrees is approximately `1` mile. How \
close is considered nearby, is controlled by `filter[radius]`, which default to `0.01` degrees (approximately a \
half mile).
""")
common_index_parameters(__MODULE__, :stop, :include_distance)
include_parameters(@includes -- @nodoc_includes,
description:
"Note that `route` can only be included if `filter[route]` is present and has exactly one `/data/{index}/relationships/route/data/id`."
)
filter_param(:date,
description:
"Filter by date when stop is in use. Will be ignored unless filter[route] is present. If filter[service] is present, this filter will be ignored."
)
filter_param(:direction_id)
parameter("filter[latitude]", :query, :string, """
Latitude in degrees North in the [WGS-84](https://en.wikipedia.org/wiki/World_Geodetic_System#A_new_World_Geodetic_System:_WGS.C2.A084) \
coordinate system to search `filter[radius]` degrees around with `filter[longitude]`.
""")
parameter("filter[longitude]", :query, :string, """
Longitude in degrees East in the [WGS-84](https://en.wikipedia.org/wiki/World_Geodetic_System#Longitudes_on_WGS.C2.A084) \
coordinate system to search `filter[radius]` degrees around with `filter[latitude]`.
""")
parameter("filter[radius]", :query, :number, """
The distance is in degrees as if latitude and longitude were on a flat 2D plane and normal Pythagorean distance \
was calculated. Over the region MBTA serves, `0.02` degrees is approximately `1` mile. Defaults to `0.01` \
degrees (approximately a half mile).
""")
parameter("filter[id]", :query, :string, """
Filter by `/data/{index}/id` (the stop ID). Multiple `/data/{index}/id` #{comma_separated_list()}.
""")
filter_param(:route_type)
filter_param(:id, name: :route)
parameter("filter[service]", :query, :string, """
Filter by service_id for which stop is in use. Multiple service_ids #{comma_separated_list()}.
""")
parameter("filter[location_type]", :query, :string, """
Filter by location_type https://github.com/mbta/gtfs-documentation/blob/master/reference/gtfs.md#stopstxt. Multiple location_type #{comma_separated_list()}.
""")
consumes("application/vnd.api+json")
produces("application/vnd.api+json")
response(200, "OK", Schema.ref(:Stops))
response(400, "Bad Request", Schema.ref(:BadRequest))
response(403, "Forbidden", Schema.ref(:Forbidden))
response(429, "Too Many Requests", Schema.ref(:TooManyRequests))
end
def index_data(conn, params) do
filter_opts = Params.filter_opts(params, @pagination_opts, conn)
with true <- check_distance_filter?(filter_opts),
:ok <- Params.validate_includes(params, @includes, conn),
{:ok, filtered} <- Params.filter_params(params, @filters, conn) do
filtered
|> format_filters()
|> expand_stops_filter(:ids, conn.assigns.api_version)
|> Stop.filter_by()
|> State.all(filter_opts)
else
false -> {:error, :distance_params}
{:error, _, _} = error -> error
end
end
defp check_distance_filter?(%{order_by: order_by} = filter_opts),
do: check_distance_params(%{filter_opts | order_by: Enum.into(order_by, %{})})
defp check_distance_filter?(_), do: true
defp check_distance_params(%{order_by: %{distance: _}, latitude: _, longitude: _}), do: true
defp check_distance_params(%{order_by: %{distance: _}}), do: false
defp check_distance_params(_), do: true
defp format_filters(filters) do
filters
|> Enum.flat_map(&do_format_filter/1)
|> Enum.into(%{})
end
defp do_format_filter({"date", date_string}) do
case Date.from_iso8601(date_string) do
{:ok, date} ->
%{date: date}
_ ->
[]
end
end
defp do_format_filter({"service", service_string}) do
case Params.split_on_comma(service_string) do
[] ->
[]
service_ids ->
%{services: service_ids}
end
end
defp do_format_filter({"route", route_string}) do
case Params.split_on_comma(route_string) do
[] ->
[]
route_ids ->
%{routes: route_ids}
end
end
defp do_format_filter({"route_type", type_string}) do
route_type_ids =
type_string
|> Params.split_on_comma()
|> Enum.flat_map(fn type_id_string ->
case Integer.parse(type_id_string) do
{type_id, ""} ->
[type_id]
_ ->
[]
end
end)
if route_type_ids == [] do
[]
else
%{route_types: route_type_ids}
end
end
defp do_format_filter({"id", stop_ids}) do
%{ids: Params.split_on_comma(stop_ids)}
end
defp do_format_filter({"direction_id", direction_id}) do
case Params.direction_id(%{"direction_id" => direction_id}) do
nil ->
[]
parsed_direction_id ->
%{direction_id: parsed_direction_id}
end
end
defp do_format_filter({"location_type", type_string}) do
location_types =
type_string
|> Params.split_on_comma()
|> Enum.flat_map(fn type_id_string ->
case Integer.parse(type_id_string) do
{type_id, ""} ->
[type_id]
_ ->
[]
end
end)
if location_types == [] do
[]
else
%{location_types: location_types}
end
end
defp do_format_filter({key, value})
when key in ["radius", "longitude", "latitude"] do
case Float.parse(value) do
{parsed_value, ""} ->
%{String.to_existing_atom(key) => parsed_value}
_ ->
[]
end
end
swagger_path :show do
get(path(__MODULE__, :show))
description("""
Detail for a specific stop.
#{swagger_path_description("/data")}
""")
parameter(:id, :path, :string, "Unique identifier for stop")
common_show_parameters(:stop)
include_parameters(@show_includes -- @nodoc_includes)
consumes("application/vnd.api+json")
produces("application/vnd.api+json")
response(200, "OK", Schema.ref(:Stop))
response(400, "Bad Request", Schema.ref(:BadRequest))
response(403, "Forbidden", Schema.ref(:Forbidden))
response(404, "Not Found", Schema.ref(:NotFound))
response(406, "Not Acceptable", Schema.ref(:NotAcceptable))
response(429, "Too Many Requests", Schema.ref(:TooManyRequests))
end
def show_data(conn, %{"id" => id} = params) do
case Params.validate_includes(params, @show_includes, conn) do
:ok ->
[id]
|> LegacyStops.expand(conn.assigns.api_version, only_renames: true)
|> Enum.find_value(&Stop.by_id/1)
{:error, _, _} = error ->
error
end
end
def swagger_definitions do
import PhoenixSwagger.JsonApi, except: [page: 1]
%{
StopResource:
resource do
description(
"Physical location where transit can pick-up or drop-off passengers. See https://github.com/google/transit/blob/master/gtfs/spec/en/reference.md#stopstxt for more details and https://github.com/mbta/gtfs-documentation/blob/master/reference/gtfs.md#stopstxt for specific extensions."
)
attributes do
name(
:string,
"""
Name of a stop or station in the local and tourist vernacular. See \
[GTFS `stops.txt` `stop_name](https://github.com/google/transit/blob/master/gtfs/spec/en/reference.md#stopstxt)
""",
example: "Parker St @ Hagen Rd"
)
description(
[:string, :null],
"""
Description of the stop. See [GTFS `stops.txt` `stop_desc`](https://github.com/google/transit/blob/master/gtfs/spec/en/reference.md#stopstxt).
""",
example: "Alewife - Red Line"
)
address(
[:string, :null],
"""
A street address for the station. See [MBTA extensions to GTFS](https://docs.google.com/document/d/1RoQQj3_-7FkUlzFP4RcK1GzqyHp4An2lTFtcmW0wrqw/view).
""",
example: "Alewife Brook Parkway and Cambridge Park Drive, Cambridge, MA 02140"
)
platform_code(
[:string, :null],
"""
A short code representing the platform/track (like a number or letter). See [GTFS `stops.txt` `platform_code`](https://developers.google.com/transit/gtfs/reference/gtfs-extensions#stopstxt_1).
""",
example: "5"
)
platform_name(
[:string, :null],
"""
A textual description of the platform or track. See [MBTA extensions to GTFS](https://docs.google.com/document/d/1RoQQj3_-7FkUlzFP4RcK1GzqyHp4An2lTFtcmW0wrqw/view).
""",
example: "Red Line"
)
latitude(
:number,
"""
Latitude of the stop or station. Degrees North, in the \
[WGS-84](https://en.wikipedia.org/wiki/World_Geodetic_System#A_new_World_Geodetic_System:_WGS.C2.A084) \
coordinate system. See \
[GTFS `stops.txt` `stop_lat`](https://github.com/google/transit/blob/master/gtfs/spec/en/reference.md#stopstxt).
""",
example: -71.194994
)
longitude(
:number,
"""
Longitude of the stop or station. Degrees East, in the \
[WGS-84](https://en.wikipedia.org/wiki/World_Geodetic_System#Longitudes_on_WGS.C2.A084) coordinate \
system. See
[GTFS `stops.txt` `stop_lon`](https://github.com/google/transit/blob/master/gtfs/spec/en/reference.md#stopstxt).
""",
example: 42.316115
)
wheelchair_boarding(
%Schema{type: :integer, enum: [0, 1, 2]},
"""
Whether there are any vehicles with wheelchair boarding or paths to stops that are \
wheelchair acessible: 0, 1, 2.
#{wheelchair_boarding("*")}
""",
example: 0
)
location_type(%Schema{type: :integer, enum: [0, 1, 2]}, """
The type of the stop.
| Value | Type | Description |
| - | - | - |
| `0` | Stop | A location where passengers board or disembark from a transit vehicle. |
| `1` | Station | A physical structure or area that contains one or more stops. |
| `2` | Station Entrance/Exit | A location where passengers can enter or exit a station from the street. The stop entry must also specify a parent_station value referencing the stop ID of the parent station for the entrance. |
| `3` | Generic Node | A location within a station, not matching any other location_type, which can be used to link together pathways defined in pathways.txt. |
See also [GTFS `stops.txt` `location_type`](https://github.com/google/transit/blob/master/gtfs/spec/en/reference.md#stopstxt).
""")
municipality(
[:string, :null],
"The municipality in which the stop is located.",
example: "Cambridge"
)
on_street(
[:string, :null],
"The street on which the stop is located.",
example: "Massachusetts Avenue"
)
at_street(
[:string, :null],
"The cross street at which the stop is located.",
example: "Essex Street"
)
vehicle_type(
[:integer, :null],
"""
The type of transportation used at the stop. `vehicle_type` will be a valid routes.txt `route_type` value:
#{route_type_description()}
""",
example: 3
)
end
relationship(:parent_station)
end,
Stop: single(:StopResource),
Stops: page(:StopResource)
}
end
defp swagger_path_description(parent_pointer) do
"""
## Accessibility
#{wheelchair_boarding(parent_pointer)}
## Location
### World
Use `#{parent_pointer}/attributes/latitude` and `#{parent_pointer}/attributes/longitude` to get the location of a \
stop.
### Entrance
The stop may be inside a station. If `#{parent_pointer}/relationships/parent_station/data/id` is present, you \
should look up the parent station (`/stops/{parent_id}`) and use its location to give direction first to the \
parent station and then route from there to the stop.
"""
end
defp wheelchair_boarding(parent_pointer) do
"""
Wheelchair boarding (`#{parent_pointer}/attributes/wheelchair_boarding`) corresponds to \
[GTFS wheelchair_boarding](https://github.com/google/transit/blob/master/gtfs/spec/en/reference.md#stopstxt). The \
MBTA handles parent station inheritance itself, so value can be treated simply:
| Value | Meaning |
|-------|-----------------------------------------------|
| `0` | No Information |
| `1` | Accessible (if trip is wheelchair accessible) |
| `2` | Inaccessible |
"""
end
def filters, do: @filters
end
|
apps/api_web/lib/api_web/controllers/stop_controller.ex
| 0.858259 | 0.449574 |
stop_controller.ex
|
starcoder
|
defmodule AMQP.Connection do
@moduledoc """
Functions to operate on Connections.
"""
import AMQP.Core
alias AMQP.Connection
defstruct [:pid]
@type t :: %Connection{pid: pid}
@doc """
Opens a new connection.
Behaves like `open/2` but takes only either AMQP URI or options.
## Examples
iex> options = [host: "localhost", port: 5672, virtual_host: "/", username: "guest", password: "<PASSWORD>"]
iex> AMQP.Connection.open(options)
{:ok, %AMQP.Connection{}}
iex> AMQP.Connection.open("amqp://guest:guest@localhost")
{:ok, %AMQP.Connection{}}
"""
@spec open(keyword | String.t()) :: {:ok, t()} | {:error, atom()} | {:error, any()}
def open(uri_or_options \\ []) when is_binary(uri_or_options) or is_list(uri_or_options) do
open(uri_or_options, :undefined)
end
@doc """
Opens an new Connection to an AMQP broker.
The connections created by this module are supervised under amqp_client's supervision tree.
Please note that connections do not get restarted automatically by the supervision tree in
case of a failure. If you need robust connections and channels, use monitors on the returned
connection PID.
## Options
* `:username` - The name of a user registered with the broker (defaults to `"guest"`);
* `:password` - The password of user (defaults to `"<PASSWORD>"`);
* `:virtual_host` - The name of a virtual host in the broker (defaults to `"/"`);
* `:host` - The hostname of the broker (defaults to `"localhost"`);
* `:port` - The port the broker is listening on (defaults to `5672`);
* `:channel_max` - The channel_max handshake parameter (defaults to `0`);
* `:frame_max` - The frame_max handshake parameter (defaults to `0`);
* `:heartbeat` - The hearbeat interval in seconds (defaults to `10`);
* `:connection_timeout` - The connection timeout in milliseconds (defaults to `50000`);
* `:ssl_options` - Enable SSL by setting the location to cert files (defaults to `:none`);
* `:client_properties` - A list of extra client properties to be sent to the server (defaults to `[])`;
* `:socket_options` - Extra socket options. These are appended to the default options. \
See http://www.erlang.org/doc/man/inet.html#setopts-2 and http://www.erlang.org/doc/man/gen_tcp.html#connect-4 \
for descriptions of the available options;
* `:auth_mechanisms` - A list of authentication of SASL authentication mechanisms to use. \
See https://www.rabbitmq.com/access-control.html#mechanisms and https://github.com/rabbitmq/rabbitmq-auth-mechanism-ssl \
for descriptions of the available options;
* `:name` - A human-readable string that will be displayed in the management UI. \
Connection names do not have to be unique and cannot be used as connection identifiers \
(defaults to `:undefined`).
## Examples
iex> options = [host: "localhost", port: 5672, virtual_host: "/", username: "guest", password: "<PASSWORD>", name: "my-conn"]
iex> AMQP.Connection.open(options)
{:ok, %AMQP.Connection{}}
iex> AMQP.Connection.open("amqp://guest:guest@localhost", port: 5673)
{:ok, %AMQP.Connection{}}
## Enabling SSL
To enable SSL, supply the following in the `ssl_options` field:
* `:cacertfile` - Specifies the certificates of the root Certificate Authorities that we wish to implicitly trust;
* `:certfile` - The client's own certificate in PEM format;
* `:keyfile` - The client's private key in PEM format.
Here is an example:
iex> AMQP.Connection.open(
port: 5671,
ssl_options: [
cacertfile: '/path/to/testca/cacert.pem',
certfile: '/path/to/client/cert.pem',
keyfile: '/path/to/client/key.pem',
# only necessary with intermediate CAs
# depth: 2,
verify: :verify_peer,
fail_if_no_peer_cert: true
]
)
## Backward compatibility for connection name
RabbitMQ supports user-specified connection names since version 3.6.2.
Previously AMQP took a connection name as a separate parameter on `open/2` and `open/3` and it is still supported in this version.
iex> options = [host: "localhost", port: 5672, virtual_host: "/", username: "guest", password: "<PASSWORD>"]
iex> AMQP.Connection.open(options, :undefined)
{:ok, %AMQP.Connection{}}
iex> AMQP.Connection.open("amqp://guest:guest@localhost", "my-connection")
{:ok, %AMQP.Connection{}}
iex> AMQP.Connection.open("amqp://guest:guest@localhost", "my-connection", options)
{:ok, %AMQP.Connection{}}
However the connection name parameter is now deprecated and might not be supported in the future versions.
You are recommented to pass it with `:name` option instead:
iex> AMQP.Connection.open("amqp://guest:guest@localhost", name: "my-connection")
{:ok, %AMQP.Connection{}}
"""
@spec open(String.t() | keyword, keyword | String.t() | :undefined) ::
{:ok, t()} | {:error, atom()} | {:error, any()}
def open(uri, options)
def open(uri, name) when is_binary(uri) and (is_binary(name) or name == :undefined) do
do_open(uri, name, _options = [])
end
def open(options, name) when is_list(options) and (is_binary(name) or name == :undefined) do
{name_from_opts, options} = take_connection_name(options)
name = if name == :undefined, do: name_from_opts, else: name
options
|> merge_options_to_default()
|> do_open(name)
end
def open(uri, options) when is_binary(uri) and is_list(options) do
{name, options} = take_connection_name(options)
do_open(uri, name, options)
end
@doc false
@deprecated "Use :name in open/2 instead"
@spec open(String.t(), String.t() | :undefined, keyword) ::
{:ok, t()} | {:error, atom()} | {:error, any()}
def open(uri, name, options) when is_binary(uri) and is_list(options) do
do_open(uri, name, options)
end
defp do_open(uri, name, options) do
case uri |> String.to_charlist() |> :amqp_uri.parse() do
{:ok, amqp_params} -> amqp_params |> merge_options_to_amqp_params(options) |> do_open(name)
error -> error
end
end
defp do_open(amqp_params, name) do
case :amqp_connection.start(amqp_params, name) do
{:ok, pid} -> {:ok, %Connection{pid: pid}}
error -> error
end
end
# take name from options
defp take_connection_name(options) do
name = options[:name] || :undefined
options = Keyword.delete(options, :name)
{name, options}
end
@doc false
@spec merge_options_to_amqp_params(tuple, keyword) :: tuple
def merge_options_to_amqp_params(amqp_params, options) do
options = normalize_ssl_options(options)
params = amqp_params_network(amqp_params)
amqp_params_network(
username: keys_get(options, params, :username),
password: keys_get(options, params, :password),
virtual_host: keys_get(options, params, :virtual_host),
host: keys_get(options, params, :host) |> to_charlist(),
port: keys_get(options, params, :port) |> normalize_int_opt(),
channel_max: keys_get(options, params, :channel_max) |> normalize_int_opt(),
frame_max: keys_get(options, params, :frame_max) |> normalize_int_opt(),
heartbeat: keys_get(options, params, :heartbeat) |> normalize_int_opt(),
connection_timeout: keys_get(options, params, :connection_timeout) |> normalize_int_opt(),
ssl_options: keys_get(options, params, :ssl_options),
client_properties: keys_get(options, params, :client_properties),
socket_options: keys_get(options, params, :socket_options),
auth_mechanisms: keys_get(options, params, :auth_mechanisms)
)
end
# Gets the value from k1. If empty, gets the value from k2.
defp keys_get(k1, k2, key) do
Keyword.get(k1, key, Keyword.get(k2, key))
end
defp merge_options_to_default(options) do
amqp_params_network(
username: Keyword.get(options, :username, "guest"),
password: Keyword.get(options, :password, "<PASSWORD>"),
virtual_host: Keyword.get(options, :virtual_host, "/"),
host: Keyword.get(options, :host, 'localhost') |> to_charlist(),
port: Keyword.get(options, :port, :undefined) |> normalize_int_opt(),
channel_max: Keyword.get(options, :channel_max, 0) |> normalize_int_opt(),
frame_max: Keyword.get(options, :frame_max, 0) |> normalize_int_opt(),
heartbeat: Keyword.get(options, :heartbeat, 10) |> normalize_int_opt(),
connection_timeout: Keyword.get(options, :connection_timeout, 50000) |> normalize_int_opt(),
ssl_options: Keyword.get(options, :ssl_options, :none),
client_properties: Keyword.get(options, :client_properties, []),
socket_options: Keyword.get(options, :socket_options, []),
auth_mechanisms:
Keyword.get(options, :auth_mechanisms, [
&:amqp_auth_mechanisms.plain/3,
&:amqp_auth_mechanisms.amqplain/3
])
)
end
# If an integer value is configured as a string, cast it to an integer where applicable
defp normalize_int_opt(value) when is_binary(value), do: String.to_integer(value)
defp normalize_int_opt(value), do: value
@doc """
Closes an open Connection.
"""
@spec close(t) :: :ok | {:error, any}
def close(conn) do
case :amqp_connection.close(conn.pid) do
:ok -> :ok
error -> {:error, error}
end
end
defp normalize_ssl_options(options) when is_list(options) do
for {k, v} <- options do
if k in [:cacertfile, :certfile, :keyfile] do
{k, to_charlist(v)}
else
{k, v}
end
end
end
defp normalize_ssl_options(options), do: options
end
|
lib/amqp/connection.ex
| 0.887186 | 0.428592 |
connection.ex
|
starcoder
|
defmodule RailwayIpc.Core.MessageFormat.BinaryProtobuf do
@moduledoc """
_This is an internal module, not part of the public API._
Messages that use the `BinaryProtobuf` format have the following
characteristics:
* The payload s a struct that contains two attributes: `type` and
`encoded_message`
* The `type` attribute is the name of the Elixir module for the protobuf
without the "Elixir" prefix and to use colon notation instead of dots.
* The `encoded_message` attribute is the encoded protobuf which is then
Base64 encoded to make it friendly to JSON conversion.
* The entire payload is then converted to JSON.
Note:
I _think_ the reason for converting to colon notation is an artifact of
wanting to be compatable with the Ruby version since Ruby classes use
colon notation. -BN
"""
alias RailwayIpc.DefaultMessage
@doc """
Encodes `protobuf` in message format. Returns the encoded protobuf and the
message type as a string in colon format.
Encodes the given `protobuf` by creating a JSON string with two attributes:
* `type` -- the Protobuf module name as a string using colon notation
* `encoded_message` -- the Base64 encoded Protobuf
"""
def encode(protobuf) when is_map(protobuf) or is_atom(protobuf) do
protobuf
|> build_payload()
|> encode_type()
|> encode_message()
|> encode_payload_as_json()
end
def encode(_), do: {:error, "Argument Error: Valid Protobuf required"}
@doc """
Decodes the given `message` into a Protobuf `struct`.
"""
def decode(message) when not is_binary(message) do
{:error, "Malformed JSON given. Must be a string. (#{inspect(message)})"}
end
def decode(message) do
message
|> decode_json()
|> parse_type()
|> check_that_module_is_defined()
|> decode_protobuf()
end
defp build_payload(protobuf), do: {:ok, %{}, protobuf}
defp encode_type({:ok, payload, protobuf}) do
type =
protobuf.__struct__
|> to_string
|> replace_invalid_chars()
{:ok, Map.put(payload, :type, type), protobuf}
rescue
KeyError ->
{:error, "Argument Error: Valid Protobuf required"}
end
defp encode_message({:ok, payload, protobuf}) do
encoded_message =
protobuf
|> protobuf.__struct__.encode()
|> Base.encode64()
{:ok, Map.put(payload, :encoded_message, encoded_message), protobuf}
end
defp encode_message({:error, _} = error), do: error
defp encode_payload_as_json({:ok, payload, _}) do
case Jason.encode(payload) do
{:ok, json} -> {:ok, json, payload.type}
{:error, error} -> {:error, error}
end
end
defp encode_payload_as_json({:error, _} = error), do: error
defp parse_type({:ok, %{type: type} = payload}) when is_binary(type) do
{:ok, Map.put(payload, :module, type_to_module(type))}
end
defp parse_type({:ok, %{type: type}}) when not is_binary(type) do
{:error, "Message `type` attribute must be a string"}
end
defp parse_type({:ok, _}), do: {:error, "Message is missing the `type` attribute"}
defp parse_type({:error, _} = error), do: error
defp check_that_module_is_defined({:ok, payload}) do
%{module: module} = payload
module.__info__(:module)
{:ok, payload}
rescue
UndefinedFunctionError ->
{:unknown_message_type, Map.put(payload, :module, DefaultMessage)}
end
defp check_that_module_is_defined({:error, _} = error), do: error
defp decode_protobuf({:error, _} = error), do: error
defp decode_protobuf({status, payload}) do
%{module: module, encoded_message: encoded_message, type: type} = payload
decoded_message =
encoded_message
|> Base.decode64!(ignore: :whitespace)
|> module.decode
{status, decoded_message, type}
rescue
# TODO: What's the specific error we should rescue here? Can't find
# it in the protobuf docs
_ -> {:error, "Cannot decode protobuf"}
end
defp type_to_module(type) do
type
|> String.split("::")
|> Module.concat()
end
defp replace_invalid_chars(module_name_string) do
Regex.replace(~r/\AElixir\./, module_name_string, "")
|> String.replace(".", "::")
end
defp decode_json(json) do
{:ok, Jason.decode!(json, keys: :atoms)}
rescue
Jason.DecodeError -> {:error, "Message is invalid JSON (#{json})"}
end
end
|
lib/railway_ipc/core/message_format/binary_protobuf.ex
| 0.760917 | 0.480783 |
binary_protobuf.ex
|
starcoder
|
defprotocol Validix.Stage.Convert do
@spec as(any, field :: term, type :: Type.key, value :: term, Type.key)
:: {:ok, value :: term} | {:error, term} | :parent
def as(_, field, type, value, args)
end
defimpl Validix.Stage.Convert, for: Validix.Type.Core do
def as(_, field, type, value, to_type) do
try do
{:ok, convert(value, to_type)}
rescue
e ->
error = %Validix.Error{
message: "Converting #{inspect value} to #{inspect to_type} for #{inspect type} field #{inspect field} failed",
reason: :bad_value,
field: field,
type: type,
value: value,
cause: e,
stacktrace: System.stacktrace(),
}
{:error, error}
end
end
defp convert(v, :float) when is_integer(v), do: v / 1
defp convert(v, :string) when is_integer(v), do: v |> Integer.to_string
defp convert(v, :atom) when is_integer(v), do: v |> convert(:string) |> convert(:atom)
defp convert(v, :integer) when is_float(v), do: v |> round
defp convert(v, :string) when is_float(v), do: v |> Float.to_string
defp convert(v, :atom) when is_float(v), do: v |> convert(:string) |> convert(:atom)
defp convert(v, :integer) when is_binary(v), do: v |> String.to_integer
defp convert(v, :atom) when is_binary(v), do: v |> String.to_existing_atom
defp convert(v, :boolean) when is_binary(v), do: v |> convert(:atom) |> convert(:boolean)
defp convert(v, :integer) when is_atom(v), do: v |> convert(:string) |> convert(:integer)
defp convert(v, :string) when is_atom(v), do: v |> Atom.to_string
defp convert(%MapSet{} = v, :list), do: v |> MapSet.to_list
defp convert(%MapSet{} = v, :map), do: v |> convert(:list) |> Map.new(&{&1, true})
defp convert(%_{} = v, :map), do: v |> Map.from_struct
defp convert(v, :list) when is_map(v), do: v |> Map.to_list
defp convert(v, :set) when is_map(v), do: v |> Map.keys |> MapSet.new
defp convert(v, :map) when is_list(v), do: v |> Map.new
defp convert(v, :set) when is_list(v), do: v |> MapSet.new
defp convert(v, :tuple) when is_list(v), do: v |> List.to_tuple
defp convert(v, :list) when is_tuple(v), do: v |> Tuple.to_list
defp convert(v, :boolean), do: !!v
defp convert(v, {:list_of, type}), do: v |> convert(:list) |> Enum.map(&convert(&1, type))
defp convert(v, {:map_of, type}), do: v |> convert(:map) |> convert({:list_of, {:tuple, type}}) |> convert(:map)
defp convert(v, {:set_of, type}), do: v |> convert({:list_of, type}) |> convert(:set)
defp convert(v, {:tuple, types}) when tuple_size(v) == tuple_size(types) do
v
|> convert(:list)
|> Enum.zip(Tuple.to_list(types))
|> Enum.map(fn({v, t}) -> convert(v, t) end)
|> convert(:tuple)
end
## No known conversion possible
defp convert(v, _), do: v
end
defimpl Validix.Stage.Convert, for: Any do
def as(_, _, _, _, _), do: :parent
end
|
lib/validix/stage/convert.ex
| 0.676299 | 0.527499 |
convert.ex
|
starcoder
|
defmodule HyperEx.Abbreviation do
@moduledoc false
alias HyperEx.Util
@doc """
Expands an Emmet-like abbreviation to a tuple containing the tag name and
it's attributes (can contain an id and a class list).
## Examples
iex> HyperEx.Abbreviation.expand("div")
{"div", []}
iex> HyperEx.Abbreviation.expand("div#foo")
{"div", [id: "foo"]}
iex> HyperEx.Abbreviation.expand("div.foo")
{"div", [class: "foo"]}
iex> HyperEx.Abbreviation.expand("div.foo.bar")
{"div", [class: "foo bar"]}
iex> HyperEx.Abbreviation.expand("div#foo.bar")
{"div", [id: "foo", class: "bar"]}
iex> HyperEx.Abbreviation.expand("#foo")
{"div", [id: "foo"]}
iex> HyperEx.Abbreviation.expand("#.foo")
{"div", [class: "foo"]}
"""
def expand(abbreviation) do
# abbreviation "div#foo.bar..baz"
# |> explode ["div", "#", "foo", ".", ".", "bar", ".", "baz"]
# |> regroup_separators ["div", "#foo", ".bar", ".", ".baz"]
# |> reject_invalid_selectors ["div", "#foo", ".bar", ".baz"]
# |> extract_tag {"div", ["#foo", ".bar", ".baz"]}
# |> selectors_to_attributes {"div", [id: "foo", class: "bar baz"]}
abbreviation
|> explode
|> regroup_separators
|> reject_invalid_selectors
|> extract_tag
|> selectors_to_attributes
end
defp explode(abbreviation) do
String.split(abbreviation, ~r{#|\.}, include_captures: true, trim: true)
end
# When building the selectors from a list, we'll occasionally need access to
# the last element we processed, which is part of the new selectors list.
# It's way easier to pattern match the first element of a list (the head)
# than the last in Elixir, so we're going to build the selectors list
# backwards, and reverse it afterwards.
defp regroup_separators(parts) do
parts
|> Enum.reduce([], &build_selector/2)
|> Enum.reverse()
end
defp build_selector("#", selectors), do: ["#"] ++ selectors
defp build_selector(".", selectors), do: ["."] ++ selectors
defp build_selector(selector, []), do: [selector]
defp build_selector(selector, [head | tail]), do: [head <> selector] ++ tail
defp reject_invalid_selectors(selectors) do
Enum.reject(selectors, fn s -> s in ["#", "."] end)
end
defp extract_tag([]), do: {"div", []}
defp extract_tag(["#" <> head | tail]), do: {"div", ["##{head}"] ++ tail}
defp extract_tag(["." <> head | tail]), do: {"div", [".#{head}"] ++ tail}
defp extract_tag([head | tail]), do: {head, tail}
defp selectors_to_attributes({tag, selectors}) do
attributes =
selectors
|> Enum.map(&selector_to_attribute/1)
|> Util.merge_attrs()
{tag, attributes}
end
defp selector_to_attribute("#" <> selector), do: [id: selector]
defp selector_to_attribute("." <> selector), do: [class: selector]
end
|
lib/hyper_ex/abbreviation.ex
| 0.843911 | 0.431764 |
abbreviation.ex
|
starcoder
|
defmodule Snitch.Data.Model.PaymentMethod do
@moduledoc """
PaymentMethod API and utilities.
Snitch currently supports the following payment methods:
## Debit and Credit cards
See `Snitch.Data.Model.CardPayment`. Such payments are backed by the
"`snitch_card_payments`" table that references the `Card` used for payment.
## Check or Cash (and cash-on-delivery)
There's no separate schema for such payments as they are completely expressed
by the fields in `Snitch.Data.Model.Payment`.
"""
use Snitch.Data.Model
alias Snitch.Data.Schema.PaymentMethod
alias SnitchPayments.PaymentMethodCode, as: Codes
@spec create(map) :: {:ok, PaymentMethod.t()} | {:error, Ecto.Changeset.t()}
def create(params) do
QH.create(PaymentMethod, params, Repo)
end
@spec update(map, PaymentMethod.t() | nil) ::
{:ok, PaymentMethod.t()} | {:error, Ecto.Changeset.t()}
def update(query_fields, instance \\ nil) do
QH.update(PaymentMethod, query_fields, instance, Repo)
end
@doc """
Deletes a PaymentMethod.
"""
@spec delete(non_neg_integer | PaymentMethod.t()) ::
{:ok, PaymentMethod.t()} | {:error, Ecto.Changeset.t()} | {:error, :not_found}
def delete(id_or_instance) do
QH.delete(PaymentMethod, id_or_instance, Repo)
end
@spec get(map | non_neg_integer) :: PaymentMethod.t() | nil
def get(query_fields_or_primary_key) do
QH.get(PaymentMethod, query_fields_or_primary_key, Repo)
end
@spec get_card() :: PaymentMethod.t() | nil
def get_card do
get(%{code: "ccd"})
end
@spec get_check() :: PaymentMethod.t() | nil
def get_check do
get(%{code: "chk"})
end
@spec get_all() :: [PaymentMethod.t()]
def get_all, do: Repo.all(PaymentMethod)
@doc """
Returns all the active payment methods.
"""
@spec get_active_payment_methods() :: [PaymentMethod.t()]
def get_active_payment_methods do
query =
from(
payment_method in PaymentMethod,
where: payment_method.active? == true
)
Repo.all(query)
end
end
|
apps/snitch_core/lib/core/data/model/payment/payment_method.ex
| 0.87046 | 0.488405 |
payment_method.ex
|
starcoder
|
defmodule Couch.Test.Suite do
@moduledoc """
Common code to configure ExUnit runner.
It replaces the usual invocation of `ExUnit.start()` in
`test_helper.exs` related to integration tests with:
```
Couch.Test.Suite.start()
```
"""
@doc """
This helper function can be used to create `suite.elixir`
as
```
tests =
Couch.Test.Suite.list()
|> Enum.sort()
|> Couch.Test.Suite.group_by()
IO.puts(Couch.Test.Suite.pretty_print(tests))
```
"""
def list() do
test_paths = Keyword.get(Mix.Project.config(), :test_paths, [])
Enum.reduce(test_paths, [], fn directory, acc ->
list(directory) ++ acc
end)
end
@doc """
This helper function can be used to create `suite.elixir`
as
```
tests =
Couch.Test.Suite.list(["test/elixir/test"])
|> Enum.sort()
|> Couch.Test.Suite.group_by()
IO.puts(Couch.Test.Suite.pretty_print(tests))
```
"""
def list(directory) do
ensure_exunit_started()
Enum.reduce(test_files(directory), [], fn file_path, acc ->
tests_in_file(file_path) ++ acc
end)
end
@doc """
This helper function is used in a snippet to create `suite.elixir`
see list/1
"""
def group_by(tests) do
tests |> Enum.group_by(&module_name/1, &test_name/1)
end
@doc """
This helper function is used in a snippet to create `suite.elixir`
see list/1
"""
def pretty_print(tests) do
tests = Enum.join(Enum.sort(Enum.map(tests, fn {module_name, test_names} ->
test_names = test_names
|> Enum.map(fn x -> ~s("#{x}") end) |> Enum.join(",\n ")
~s( "#{module_name}": [\n #{test_names}\n ])
end)), ",\n")
"%{\n#{tests}\n}"
end
def start(exclude \\ []) do
# If build number detected assume we running on Jenkins
# and skip certain tests that fail on jenkins.
default_exclude =
case System.get_env("BUILD_NUMBER") !== nil do
true -> [:pending, :skip_on_jenkins]
false -> [:pending]
end
current_exclude = Keyword.get(ExUnit.configuration(), :exclude, [])
{ignores, current_exclude} = from_file(current_exclude)
current_include = Keyword.get(ExUnit.configuration(), :include, [])
{suite, current_include} = from_file(current_include)
only_test_ids =
case suite -- ignores do
[] ->
nil
test_ids ->
to_tests(test_ids)
end
ExUnit.configure(
exclude: Enum.uniq(default_exclude ++ current_exclude ++ exclude),
include: current_include,
formatters: [JUnitFormatter, ExUnit.CLIFormatter],
only_test_ids: only_test_ids
)
ExUnit.start()
end
# Helpers for start/0
defp split_files(opts) do
{files, opts} =
Enum.split_with(opts, fn x ->
String.ends_with?(Atom.to_string(x), ".elixir")
end)
{Enum.map(files, &Atom.to_string/1), opts}
end
defp read_from_file(file_name) do
{map, _} = Code.eval_file(file_name)
map
|> Enum.reduce([], fn {module, tests}, acc ->
Enum.map(tests, &{module, &1}) ++ acc
end)
end
defp from_file(opts) do
case split_files(opts) do
{[], opts} ->
{[], opts}
{[file_name], opts} ->
{read_from_file(file_name), opts}
{_, _} ->
throw("Only one file is supported in --exclude or --include")
end
end
defp to_tests(ids) do
MapSet.new(
Enum.map(ids, fn {module_name, test_name} ->
{String.to_atom("Elixir.#{module_name}"), String.to_atom("test #{test_name}")}
end)
)
end
# Helpers for list/0
defp ensure_exunit_started() do
if not Process.get(EXUNIT_STARTED, false) do
started? =
Application.started_applications()
|> Enum.map(&Kernel.elem(&1, 0))
|> Enum.member?(:ex_unit)
if not started? do
ExUnit.start(autorun: false)
Process.put(EXUNIT_STARTED, true)
end
end
end
defp test_files(directory) do
files = Path.wildcard(Path.join(directory, "*_test.exs"))
Enum.filter(files, &File.regular?/1)
end
def tests_in_file(file_path) do
ensure_exunit_started()
Code.compiler_options(ignore_module_conflict: true)
tests =
Enum.reduce(require_file(file_path), [], fn {module_name, _}, acc ->
if :erlang.function_exported(module_name, :__ex_unit__, 0) do
module_name.__ex_unit__().tests ++ acc
else
acc
end
end)
Code.unrequire_files([file_path])
tests
end
def require_file(file_path) do
drop_stderr(fn ->
Code.require_file(file_path)
end)
end
defp drop_stderr(fun) do
{:ok, pid} = StringIO.open("")
original_pid = Process.whereis(:standard_error)
try do
Process.unregister(:standard_error)
Process.register(pid, :standard_error)
fun.()
after
Process.unregister(:standard_error)
Process.register(original_pid, :standard_error)
StringIO.close(pid)
end
end
defp test_name(test) do
String.replace_leading(Atom.to_string(test.name), "test ", "")
end
defp module_name(test) do
test.module
|> Atom.to_string()
|> String.replace_leading("Elixir.", "")
end
end
|
test/elixir/lib/suite.ex
| 0.767036 | 0.87289 |
suite.ex
|
starcoder
|
defmodule Interpreter.Diff do
alias InterpreterTerms.SymbolMatch, as: Sym
alias InterpreterTerms.WordMatch, as: Word
def similarity(a, b) do
{matching, total} = similarity_calc(a, b)
matching / total
end
@doc """
Returns a similarity number. Comparing how similar the two objects
are.
We compare this by looking at the amount of terms in the query, and
seeing how much they overlap.
The matching is returned as a tuple containing the total amount of
positive similarities as the first value, and the total amount of
compared similarities as the second value.
@return { positive_similaties, total_similarities }
"""
def similarity_calc(%Sym{submatches: asub} = a, %Sym{submatches: bsub} = b) do
if shallow_same?(a, b) do
{self_positive_similarities, self_total_similarities} = {1, 1}
asub =
if asub == :none do
[]
else
asub
end
bsub =
if bsub == :none do
[]
else
bsub
end
longest_length = max(Enum.count(asub), Enum.count(bsub))
shortest_length = min(Enum.count(asub), Enum.count(bsub))
{sub_positive_similarities, sub_total_similarities} =
[asub, bsub]
|> Enum.zip()
|> Enum.reduce({0, 0}, fn sub_elts, acc_similarities ->
{asub_elt, bsub_elt} = sub_elts
{positive_similarities, total_similarities} = acc_similarities
{add_pos, add_total} = similarity_calc(asub_elt, bsub_elt)
{positive_similarities + add_pos, total_similarities + add_total}
end)
{missing_matches_positive_similarities, missing_matches_total_similarities} =
{0, longest_length - shortest_length}
{self_positive_similarities + sub_positive_similarities +
missing_matches_positive_similarities,
self_total_similarities + sub_total_similarities + missing_matches_total_similarities}
else
{0, 1}
end
end
def similarity_calc(a, b) do
if shallow_same?(a, b) do
{1, 1}
else
{0, 1}
end
end
def shallow_same?(%Sym{symbol: a, submatches: :none, string: str_a}, %Sym{
symbol: a,
submatches: :none,
string: str_b
}) do
str_a == str_b
end
def shallow_same?(%Sym{symbol: a, whitespace: whitespace}, %Sym{
symbol: a,
whitespace: whitespace
}) do
true
end
def shallow_same?(%Sym{symbol: a, whitespace: _whitespace_one}, %Sym{
symbol: a,
whitespace: _whitespace_two
}) do
# Symbols with different whitespace are different.
# TODO: merge with the last clause? this will basically fall
# through to there.
false
end
def shallow_same?(%Word{word: word, whitespace: whitespace}, %Word{
word: word,
whitespace: whitespace
}) do
true
end
def shallow_same?(_, _) do
false
end
end
|
lib/interpreter/diff/diff.ex
| 0.650356 | 0.537223 |
diff.ex
|
starcoder
|
defmodule Module.Types.Helpers do
# AST and enumeration helpers.
@moduledoc false
@doc """
Guard function to check if an AST node is a variable.
"""
defmacro is_var(expr) do
quote do
is_tuple(unquote(expr)) and
tuple_size(unquote(expr)) == 3 and
is_atom(elem(unquote(expr), 0)) and
is_atom(elem(unquote(expr), 2))
end
end
@doc """
Returns unique identifier for the current assignment of the variable.
"""
def var_name({_name, meta, _context}), do: Keyword.fetch!(meta, :version)
@doc """
Returns the AST metadata.
"""
def get_meta({_, meta, _}), do: meta
def get_meta(_other), do: []
@doc """
Push expression to stack.
The expression stack is used to give the context where a type variable
was refined when show a type conflict error.
"""
def push_expr_stack(expr, stack) do
%{stack | last_expr: expr}
end
@doc """
Like `Enum.reduce/3` but only continues while `fun` returns `{:ok, acc}`
and stops on `{:error, reason}`.
"""
def reduce_ok(list, acc, fun) do
do_reduce_ok(list, acc, fun)
end
defp do_reduce_ok([head | tail], acc, fun) do
case fun.(head, acc) do
{:ok, acc} ->
do_reduce_ok(tail, acc, fun)
{:error, reason} ->
{:error, reason}
end
end
defp do_reduce_ok([], acc, _fun), do: {:ok, acc}
@doc """
Like `Enum.unzip/1` but only continues while `fun` returns `{:ok, elem1, elem2}`
and stops on `{:error, reason}`.
"""
def unzip_ok(list) do
do_unzip_ok(list, [], [])
end
defp do_unzip_ok([{:ok, head1, head2} | tail], acc1, acc2) do
do_unzip_ok(tail, [head1 | acc1], [head2 | acc2])
end
defp do_unzip_ok([{:error, reason} | _tail], _acc1, _acc2), do: {:error, reason}
defp do_unzip_ok([], acc1, acc2), do: {:ok, Enum.reverse(acc1), Enum.reverse(acc2)}
@doc """
Like `Enum.map/2` but only continues while `fun` returns `{:ok, elem}`
and stops on `{:error, reason}`.
"""
def map_ok(list, fun) do
do_map_ok(list, [], fun)
end
defp do_map_ok([head | tail], acc, fun) do
case fun.(head) do
{:ok, elem} ->
do_map_ok(tail, [elem | acc], fun)
{:error, reason} ->
{:error, reason}
end
end
defp do_map_ok([], acc, _fun), do: {:ok, Enum.reverse(acc)}
@doc """
Like `Enum.each/2` but only continues while `fun` returns `:ok`
and stops on `{:error, reason}`.
"""
def each_ok([head | tail], fun) do
case fun.(head) do
:ok -> each_ok(tail, fun)
{:error, reason} -> {:error, reason}
end
end
def each_ok([], _fun), do: :ok
@doc """
Like `Enum.map_reduce/3` but only continues while `fun` returns `{:ok, elem, acc}`
and stops on `{:error, reason}`.
"""
def map_reduce_ok(list, acc, fun) do
do_map_reduce_ok(list, {[], acc}, fun)
end
defp do_map_reduce_ok([head | tail], {list, acc}, fun) do
case fun.(head, acc) do
{:ok, elem, acc} ->
do_map_reduce_ok(tail, {[elem | list], acc}, fun)
{:error, reason} ->
{:error, reason}
end
end
defp do_map_reduce_ok([], {list, acc}, _fun), do: {:ok, Enum.reverse(list), acc}
def flat_map_reduce_ok(list, acc, fun) do
do_flat_map_reduce_ok(list, {[], acc}, fun)
end
defp do_flat_map_reduce_ok([head | tail], {list, acc}, fun) do
case fun.(head, acc) do
{:ok, elems, acc} ->
do_flat_map_reduce_ok(tail, {[elems | list], acc}, fun)
{:error, reason} ->
{:error, reason}
end
end
defp do_flat_map_reduce_ok([], {list, acc}, _fun),
do: {:ok, Enum.reverse(Enum.concat(list)), acc}
@doc """
Given a list of `[{:ok, term()} | {:error, term()}]` it returns a list of
errors `{:error, [term()]}` in case of at least one error or `{:ok, [term()]}`
if there are no errors.
"""
def oks_or_errors(list) do
case Enum.split_with(list, &match?({:ok, _}, &1)) do
{oks, []} -> {:ok, Enum.map(oks, fn {:ok, ok} -> ok end)}
{_oks, errors} -> {:error, Enum.map(errors, fn {:error, error} -> error end)}
end
end
# TODO: Remove this and let multiple when be treated as multiple clauses,
# meaning they will be intersection types
def guards_to_or([]) do
[]
end
def guards_to_or(guards) do
Enum.reduce(guards, fn guard, acc -> {{:., [], [:erlang, :orelse]}, [], [guard, acc]} end)
end
end
|
lib/elixir/lib/module/types/helpers.ex
| 0.730963 | 0.556219 |
helpers.ex
|
starcoder
|
defmodule Theater.Storage do
@moduledoc """
Defines a persistence storage provider.
Persistenced providers are responsible for keeping the state of Actors so
that when they are cleaned out of memory they can be restored to a previously
saved state.
Implementations can be generic, designed to store any kind of state. Or they
can be custom built, with each type of Actor stored in its own database
table, for instance. You would just have to match on the different module
types.
A default storage implementation, `Theater.Storage.MnesiaDisk`, is provided
with Theater, but it is **not recommended** for actual production use. Mnesia
has significant issues with scaling, and only exists within the cluster
itself, which can lead to problems when nodes are added or removed, and in a
"split brain" scenario it completely defeats the purpose of the persistence
storage. It is included only because Mnesia comes in the box with Erlang and
it is sufficient to play with for understanding how Theater works. Please do
not consider it anything more than a toy implementation.
Modules that implement this behaviour must implement all three methods. There
are no suitable defaults.
"""
@doc """
Invoked when a stored Actor state is needed.
This call should find the state for the indicated type (module) and ID.
Returning `{:ok, state}` indicates that the Actor's state was found and
provides it.
Returning `{:error, reason}` indicates that there was a problem trying to
retrieve the Actor's state.
Returning `:not_present` indicates that the Actor's state is not stored. This
means that the Actor should be created anew with init().
"""
@callback get(module :: atom, id :: any) ::
{:ok, state :: any}
| {:error, reason :: any}
| :not_present
@doc """
Invoked to store an Actor's state.
Returning `:ok` indicates that the Actor's state was successfully stored.
Returning `{:error, reason}` indicates that there was an error storing the
Actor's state. The caller should assume that the Actor's state was not
persisted.
"""
@callback put(module :: atom, id :: any, state :: any) ::
:ok
| {:error, reason :: any}
@doc """
Invoked to remove an Actor's state.
Returning `:ok` indicates that the Actor's state was successfully removed.
Returning `{:error, reason}` indicates that there was an error deleting the
Actor's state. The caller may not assume that the Actor's state was removed.
"""
@callback delete(module :: atom, id :: any) ::
:ok
| {:error, reason :: any}
end
|
lib/theater/storage.ex
| 0.845624 | 0.631594 |
storage.ex
|
starcoder
|
defmodule Athink do
alias Lexthink.AST, as: L
defrecordp :query, __MODULE__, terms: []
defmacro __using__(_opts) do
quote do
import unquote(__MODULE__), only: [r: 0, r: 1]
end
end
defmacro r do
quote do
unquote(__MODULE__)
end
end
defmacro r(query) do
quote do
unquote(query).run(:azuki)
end
end
# MANIPULATING DATABASES
@spec db_create(binary) :: :term.t
def db_create(name) do
new_term(db_create: [name])
end
@spec db_drop(binary) :: :term.t
def db_drop(name) do
new_term(db_drop: [name])
end
@spec db_list() :: :term.t
def db_list() do
new_term(db_list: [])
end
# MANIPULATING TABLES
@spec table_create(binary) :: :query.t
def table_create(name) do
new_term(table_create: [name])
end
@spec table_create(binary, :query.t) :: :query.t
def table_create(name, query() = old) do
new_term(old, table_create: [name])
end
@spec table_drop(binary) :: :query.t
def table_drop(name) do
new_term(table_drop: [name])
end
@spec table_drop(binary, :query.t) :: :query.t
def table_drop(name, query() = old) do
new_term(old, table_drop: [name])
end
@spec table_list() :: :query.t
def table_list do
new_term(table_list: [])
end
@spec table_list(:query.t()) :: :query.t
def table_list(query() = old) do
new_term(old, table_list: [])
end
# SELECTING DATA
@spec db(binary) :: :query.t
def db(name) do
new_term(db: [name])
end
@spec table(binary) :: :query.t
def table(name) do
new_term(table: [name])
end
@spec table(binary, :query.t()) :: :query.t
def table(name, query() = old) do
new_term(old, table: [name])
end
def get(key, query() = old) do
new_term(old, get: [key])
end
@spec filter(Dict.t | [Dict.t] | fun, Keyword.t, :query.t) :: :query.t
def filter(data, options // [], query() = old) do
new_term(old, filter: [data, options])
end
# WRITING DATA
@spec insert(Dict.t | [Dict.t], Keyword.t, :query.t) :: :term.t
def insert(data, options // [], query() = old) do
new_term(old, insert: [data, options])
end
@spec update(Dict.t | fun, Keyword.t, :query.t) :: :query.t
def update(data, options // [], query() = old) do
new_term(old, update: [data, options])
end
@spec replace(Dict.t | fun, Keyword.t, :query.t) :: :query.t
def replace(data, options // [], query() = old) do
new_term(old, replace: [data, options])
end
@spec delete(Keyword.t, :query.t) :: :query.t
def delete(options // [], query() = old) do
new_term(old, delete: [options])
end
#TRANSFORMATIONS
@spec limit(number, :query.t) :: :query.t
def limit(limit, query() = old) do
new_term(old, term: [[type: :'LIMIT', args: [expr(limit)]]])
end
# CONTROL STRUCTURES
@spec type_of(:query.t) :: :query.t
def type_of(query() = old) do
new_term(old, term: [[type: :'TYPEOF']])
end
@spec info(:query.t) :: :query.t
def info(query() = old) do
new_term(old, term: [[type: :'INFO']])
end
def expr(expr_arg, query() = old) do
new_term(old, expr: [expr_arg])
end
def expr(expr_arg) do
new_term(expr: [expr_arg])
end
# ACCESSING RQL
def run(pool, query() = old) do
Lexthink.run(old.build, pool)
end
#%% Math and logic
@operators [
:add, :sub, :mul, :div, :mod,
:or_, :and_, :not_,
:eq, :ne, :gt, :ge, :lt, :le
]
Module.eval_quoted __MODULE__, Enum.map(@operators, fn(logic) ->
quote do
def unquote(logic)(term, value) do
apply(L, unquote(logic), [term, value])
end
end
end)
# Utils
def build(query(terms: terms)) do
Enum.reduce(terms, nil, fn([func, args], terms) ->
case func do
:term ->
[mod, func] = [:term, :new]
args = [expand_args(terms, args)]
_ ->
mod = L
if terms != nil, do: args = [terms] ++ args
end
apply(mod, func, args)
end)
end
defp expand_args(terms, opts) do
opts = List.flatten(opts)
args = Enum.map(Keyword.get(opts, :args, []), fn
query() = reql -> reql.build
arg -> arg
end)
if terms do
args = [terms] ++ args
end
Keyword.put(opts, :args, args)
end
defp new_term([{func, args}]) do
query(terms: [[func, args]])
end
defp new_term(query(terms: terms) = old, [{func, args}]) do
query(old, terms: terms ++ [[func, args]])
end
end
|
lib/athink.ex
| 0.532182 | 0.450662 |
athink.ex
|
starcoder
|
defmodule Phoenix.Template do
@moduledoc """
Templates are used by Phoenix on rendering.
Since many views require rendering large contents, for example
a whole HTML file, it is common to put those files in the file
system into a particular directory, typically "web/templates".
This module provides conveniences for reading all files from a
particular directory and embeding them into a single module.
Imagine you have a directory with templates:
# templates/foo.html.eex
Hello <%= @name %>
# templates.ex
defmodule Templates do
use Phoenix.Template, root: "templates"
end
Now the template foo can be directly rendered with:
Templates.render("foo.html", %{name: "<NAME>"})
In practice though, developers rarely use `Phoenix.Template`
directly. Instead they use `Phoenix.View` which wraps the template
functionality and add some extra conveniences.
## Terminology
Here is a quick introduction into Phoenix templates terms:
* template name - is the name of the template as
given by the user, without the template engine extension,
for example: "users.html"
* template path - is the complete path of the template
in the filesystem, for example, "path/to/users.html.eex"
* template root - the directory were templates are defined
* template engine - a module that receives a template path
and transforms its source code into Elixir quoted expressions.
## Custom Template Engines
Phoenix supports custom template engines. Engines tell
Phoenix how to convert a template path into quoted expressions.
Please check `Phoenix.Template.Engine` for more information on
the API required to be implemented by custom engines.
Once a template engine is defined, you can tell Phoenix
about it via the template engines option:
config :phoenix, :template_engines,
eex: Phoenix.Template.EExEngine,
exs: Phoenix.Template.ExsEngine
## Format encoders
Besides template engines, Phoenix has the concept of format encoders.
Format encoders work per format and are responsible for encoding a
given format to string once the view layer finishes processing.
A format encoder must export a function called `encode!/1` which
receives the rendering artifact and returns a string.
New encoders can be added via the format encoder option:
config :phoenix, :format_encoders,
html: Phoenix.HTML.Engine,
json: Poison
"""
@type name :: binary
@type path :: binary
@type root :: binary
alias Phoenix.Template
@encoders [html: Phoenix.HTML.Engine, json: Poison]
@engines [eex: Phoenix.Template.EExEngine, exs: Phoenix.Template.ExsEngine]
defmodule UndefinedError do
@moduledoc """
Exception raised when a template cannot be found.
"""
defexception [:available, :template, :module, :root]
def message(exception) do
"Could not render #{inspect exception.template} for #{inspect exception.module}, "
<> "please define a clause for render/2 or define a template at "
<> "#{inspect Path.relative_to_cwd exception.root}. "
<> available_templates(exception.available)
end
defp available_templates([]), do: "No templates were compiled for this module."
defp available_templates(available) do
"The following templates were compiled:\n\n"
<> Enum.map_join(available, "\n", &"* #{&1}")
<> "\n"
end
end
@doc false
defmacro __using__(options) do
path = Dict.fetch! options, :root
quote do
@template_root Path.relative_to_cwd(unquote(path))
@before_compile unquote(__MODULE__)
@doc """
Renders the given template locally.
"""
def render(template, assigns \\ %{})
end
end
@doc false
defmacro __before_compile__(env) do
root = Module.get_attribute(env.module, :template_root)
pairs = for path <- find_all(root) do
compile(path, root)
end
names = Enum.map(pairs, &elem(&1, 0))
codes = Enum.map(pairs, &elem(&1, 1))
# We are using line -1 because we don't want warnings coming from
# render/2 to be reported in case the user has already defined a
# catch all render/2 clause.
quote line: -1 do
unquote(codes)
def render(template, _assign) do
raise UndefinedError,
available: unquote(names),
template: template,
root: @template_root,
module: __MODULE__
end
@doc """
Returns true whenever the list of templates change in the filesystem.
"""
def __phoenix_recompile__?, do: unquote(hash(root)) != Template.hash(@template_root)
end
end
@doc """
Returns the format encoder for the given template name.
"""
@spec format_encoder(name) :: module | nil
def format_encoder(template_name) when is_binary(template_name) do
Map.get(compiled_format_encoders, Path.extname(template_name))
end
defp compiled_format_encoders do
case Application.fetch_env(:phoenix, :compiled_format_encoders) do
{:ok, encoders} ->
encoders
:error ->
encoders =
@encoders
|> Keyword.merge(raw_config(:format_encoders))
|> Enum.filter(fn {_, v} -> v end)
|> Enum.into(%{}, fn {k, v} -> {".#{k}", v} end)
Application.put_env(:phoenix, :compiled_format_encoders, encoders)
encoders
end
end
@doc """
Returns a keyword list with all template engines
extensions followed by their modules.
"""
@spec engines() :: %{atom => module}
def engines do
compiled_engines()
end
defp compiled_engines do
case Application.fetch_env(:phoenix, :compiled_template_engines) do
{:ok, engines} ->
engines
:error ->
engines =
@engines
|> Keyword.merge(raw_config(:template_engines))
|> Enum.filter(fn {_, v} -> v end)
|> Enum.into(%{})
Application.put_env(:phoenix, :compiled_template_engines, engines)
engines
end
end
defp raw_config(name) do
Application.get_env(:phoenix, name) ||
raise "could not load #{name} configuration for Phoenix." <>
" Was the :phoenix application started?"
end
@doc """
Converts the template path into the template name.
## Examples
iex> Phoenix.Template.template_path_to_name(
...> "lib/templates/admin/users/show.html.eex",
...> "lib/templates")
"admin/users/show.html"
"""
@spec template_path_to_name(path, root) :: name
def template_path_to_name(path, root) do
path
|> Path.rootname()
|> Path.relative_to(root)
end
@doc """
Converts a module, without the suffix, to a template root.
## Examples
iex> Phoenix.Template.module_to_template_root(MyApp.UserView, "View")
"user"
iex> Phoenix.Template.module_to_template_root(MyApp.Admin.User, "View")
"admin/user"
"""
def module_to_template_root(module, suffix) do
module
|> Phoenix.Naming.unsuffix(suffix)
|> Module.split
|> tl
|> Enum.map(&Phoenix.Naming.underscore/1)
|> Path.join
end
@doc """
Returns all template paths in a given template root.
"""
@spec find_all(root) :: [path]
def find_all(root) do
extensions = engines |> Map.keys() |> Enum.join(",")
Path.wildcard("#{root}/*.{#{extensions}}")
end
@doc """
Returns the hash of all template paths in the given root.
Used by Phoenix to check if a given root path requires recompilation.
"""
@spec hash(root) :: binary
def hash(root) do
find_all(root)
|> Enum.sort
|> :erlang.md5
end
defp compile(path, root) do
name = template_path_to_name(path, root)
defp = String.to_atom(name)
ext = Path.extname(path) |> String.lstrip(?.) |> String.to_atom
engine = engines()[ext]
quoted = engine.compile(path, name)
{name, quote do
@file unquote(path)
@external_resource unquote(path)
defp unquote(defp)(var!(assigns)) do
_ = var!(assigns)
unquote(quoted)
end
def render(unquote(name), assigns) do
unquote(defp)(assigns)
end
end}
end
end
|
lib/phoenix/template.ex
| 0.864268 | 0.491029 |
template.ex
|
starcoder
|
defmodule ExConfig.Source do
@moduledoc """
Interface for pluggable modules to get data from external sources.
It is very often case when parameter value is dynamic and is based on
something from outside an application, like OS environment variables,
file system objects, etc. When a parameter is read and it's value
matches pattern `{module(), Keyword.t()}`, ExConfig tries to treat it
as `Source` behaviour implementation. For example:
{ExConfig.Source.System, name: "ENV_NAME", default: "is not defined"}
An implementation has to define struct and `handle/2` function.
"""
@type value() :: {module(), Keyword.t()}
@type handle_result() :: %ExConfig.Param{}
| {:ok, data :: any()}
| {:error, reason :: any()}
@callback __struct__(any) :: any
@callback handle(source :: struct,
param :: %ExConfig.Param{}) :: handle_result
@spec get_source_occurrences(module, (Keyword.t -> boolean)) :: [value]
def get_source_occurrences(source, filter \\ fn _ -> true end)
when is_atom(source)
and is_function(filter, 1) do
get_source_occurrences(source, filter, ExConfig.Utils.get_all_env())
end
@doc false
@spec get_source_occurrences(module, function, Keyword.t) :: [value]
def get_source_occurrences(source, filter, all_envs) do
Enum.reduce(all_envs, [], fn ({_app, envs}, acc) ->
deep_source_search(source, filter, envs, acc)
end)
end
@spec deep_source_search(module, function, any, list) :: [value]
defp deep_source_search(source, filter, envs, acc)
when is_list(envs) or is_map(envs) do
Enum.reduce(envs, acc, &deep_source_search(source, filter, &1, &2))
end
defp deep_source_search(source, filter, {source, options}, acc) when is_list(options) do
if Keyword.keyword?(options) do
if filter.(options) do
[{source, options} | acc]
else
acc
end
else
deep_source_search(source, filter, options, acc)
end
end
defp deep_source_search(source, filter, {_key, value}, acc) do
deep_source_search(source, filter, value, acc)
end
defp deep_source_search(_, _, _, acc), do: acc
end
|
lib/ex_config/source.ex
| 0.669205 | 0.402128 |
source.ex
|
starcoder
|
defmodule OMG.Eth.RootChain.AbiEventSelector do
@moduledoc """
We define Solidity Event selectors that help us decode returned values from function calls.
Function names are to be used as inputs to Event Fetcher.
Function names describe the type of the event Event Fetcher will retrieve.
"""
@spec exit_started() :: ABI.FunctionSelector.t()
def exit_started() do
%ABI.FunctionSelector{
function: "ExitStarted",
input_names: ["owner", "exitId"],
inputs_indexed: [true, false],
method_id: <<221, 111, 117, 92>>,
returns: [],
type: :event,
types: [:address, {:uint, 160}]
}
end
@spec in_flight_exit_started() :: ABI.FunctionSelector.t()
def in_flight_exit_started() do
%ABI.FunctionSelector{
function: "InFlightExitStarted",
input_names: ["initiator", "txHash"],
inputs_indexed: [true, true],
method_id: <<213, 241, 254, 157>>,
returns: [],
type: :event,
types: [:address, {:bytes, 32}]
}
end
@spec in_flight_exit_deleted() :: ABI.FunctionSelector.t()
def in_flight_exit_deleted() do
%ABI.FunctionSelector{
function: "InFlightExitDeleted",
input_names: ["exitId"],
inputs_indexed: [true],
method_id: <<25, 145, 196, 195>>,
returns: [],
type: :event,
types: [uint: 160]
}
end
@spec in_flight_exit_challenged() :: ABI.FunctionSelector.t()
def in_flight_exit_challenged() do
%ABI.FunctionSelector{
function: "InFlightExitChallenged",
input_names: ["challenger", "txHash", "challengeTxPosition"],
inputs_indexed: [true, true, false],
method_id: <<104, 116, 1, 150>>,
returns: [],
type: :event,
types: [:address, {:bytes, 32}, {:uint, 256}]
}
end
@spec deposit_created() :: ABI.FunctionSelector.t()
def deposit_created() do
%ABI.FunctionSelector{
function: "DepositCreated",
input_names: ["depositor", "blknum", "token", "amount"],
inputs_indexed: [true, true, true, false],
method_id: <<24, 86, 145, 34>>,
returns: [],
type: :event,
types: [:address, {:uint, 256}, :address, {:uint, 256}]
}
end
@spec in_flight_exit_input_piggybacked() :: ABI.FunctionSelector.t()
def in_flight_exit_input_piggybacked() do
%ABI.FunctionSelector{
function: "InFlightExitInputPiggybacked",
input_names: ["exitTarget", "txHash", "inputIndex"],
inputs_indexed: [true, true, false],
method_id: <<169, 60, 14, 155>>,
returns: [],
type: :event,
types: [:address, {:bytes, 32}, {:uint, 16}]
}
end
@spec in_flight_exit_output_piggybacked() :: ABI.FunctionSelector.t()
def in_flight_exit_output_piggybacked() do
%ABI.FunctionSelector{
function: "InFlightExitOutputPiggybacked",
input_names: ["exitTarget", "txHash", "outputIndex"],
inputs_indexed: [true, true, false],
method_id: <<110, 205, 142, 121>>,
returns: [],
type: :event,
types: [:address, {:bytes, 32}, {:uint, 16}]
}
end
@spec block_submitted() :: ABI.FunctionSelector.t()
def block_submitted() do
%ABI.FunctionSelector{
function: "BlockSubmitted",
input_names: ["blockNumber"],
inputs_indexed: [false],
method_id: <<90, 151, 143, 71>>,
returns: [],
type: :event,
types: [uint: 256]
}
end
@spec exit_finalized() :: ABI.FunctionSelector.t()
def exit_finalized() do
%ABI.FunctionSelector{
function: "ExitFinalized",
input_names: ["exitId"],
inputs_indexed: [true],
method_id: <<10, 219, 41, 176>>,
returns: [],
type: :event,
types: [uint: 160]
}
end
@spec in_flight_exit_challenge_responded() :: ABI.FunctionSelector.t()
def in_flight_exit_challenge_responded() do
# <<99, 124, 196, 167>> == "c|ħ"
%ABI.FunctionSelector{
function: "InFlightExitChallengeResponded",
input_names: ["challenger", "txHash", "challengeTxPosition"],
inputs_indexed: [true, true, false],
# method_id: "c|ħ",
method_id: <<99, 124, 196, 167>>,
returns: [],
type: :event,
types: [:address, {:bytes, 32}, {:uint, 256}]
}
end
@spec exit_challenged() :: ABI.FunctionSelector.t()
def exit_challenged() do
%ABI.FunctionSelector{
function: "ExitChallenged",
input_names: ["utxoPos"],
inputs_indexed: [true],
method_id: <<93, 251, 165, 38>>,
returns: [],
type: :event,
types: [uint: 256]
}
end
@spec in_flight_exit_input_blocked() :: ABI.FunctionSelector.t()
def in_flight_exit_input_blocked() do
%ABI.FunctionSelector{
function: "InFlightExitInputBlocked",
input_names: ["challenger", "txHash", "inputIndex"],
inputs_indexed: [true, true, false],
method_id: <<71, 148, 4, 88>>,
returns: [],
type: :event,
types: [:address, {:bytes, 32}, {:uint, 16}]
}
end
@spec in_flight_exit_output_blocked() :: ABI.FunctionSelector.t()
def in_flight_exit_output_blocked() do
%ABI.FunctionSelector{
function: "InFlightExitOutputBlocked",
input_names: ["challenger", "txHash", "outputIndex"],
inputs_indexed: [true, true, false],
method_id: <<203, 232, 218, 210>>,
returns: [],
type: :event,
types: [:address, {:bytes, 32}, {:uint, 16}]
}
end
@spec in_flight_exit_input_withdrawn() :: ABI.FunctionSelector.t()
def in_flight_exit_input_withdrawn() do
%ABI.FunctionSelector{
function: "InFlightExitInputWithdrawn",
input_names: ["exitId", "inputIndex"],
inputs_indexed: [true, false],
method_id: <<68, 70, 236, 17>>,
returns: [],
type: :event,
types: [uint: 160, uint: 16]
}
end
@spec in_flight_exit_output_withdrawn() :: ABI.FunctionSelector.t()
def in_flight_exit_output_withdrawn() do
%ABI.FunctionSelector{
function: "InFlightExitOutputWithdrawn",
input_names: ["exitId", "outputIndex"],
inputs_indexed: [true, false],
method_id: <<162, 65, 198, 222>>,
returns: [],
type: :event,
types: [uint: 160, uint: 16]
}
end
end
|
apps/omg_eth/lib/omg_eth/root_chain/abi_event_selector.ex
| 0.705684 | 0.472562 |
abi_event_selector.ex
|
starcoder
|
use Bitwise
defmodule D3 do
@moduledoc """
--- Day 3: Toboggan Trajectory ---
With the toboggan login problems resolved, you set off toward the airport. While travel by toboggan might be easy, it's certainly not safe: there's very minimal steering and the area is covered in trees. You'll need to see which angles will take you near the fewest trees.
Due to the local geology, trees in this area only grow on exact integer coordinates in a grid. You make a map (your puzzle input) of the open squares (.) and trees (#) you can see. For example:
These aren't the only trees, though; due to something you read about once involving arboreal genetics and biome stability, the same pattern repeats to the right many times:
The toboggan can only follow a few specific slopes (you opted for a cheaper model that prefers rational numbers); start by counting all the trees you would encounter for the slope right 3, down 1:
From your starting position at the top-left, check the position that is right 3 and down 1. Then, check the position that is right 3 and down 1 from there, and so on until you go past the bottom of the map.
The locations you'd check in the above example are marked here with O where there was an open square and X where there was a tree:
Starting at the top-left corner of your map and following a slope of right 3 and down 1, how many trees would you encounter?
--- Part Two ---
Time to check the rest of the slopes - you need to minimize the probability of a sudden arboreal stop, after all.
Determine the number of trees you would encounter if, for each of the following slopes, you start at the top-left corner and traverse the map all the way to the bottom:
What do you get if you multiply together the number of trees encountered on each of the listed slopes?
"""
@behaviour Day
defp bin_at(bin, at) do
mask = 1 <<< at
mask == (bin &&& mask)
end
@impl true
def solve(input) do
input = input |> Utils.to_strings()
lines = Enum.count(input)
line_length = input |> List.first() |> String.length()
total_length = lines * line_length
joined = Enum.join(input, "")
bin =
joined
|> to_charlist
|> Enum.reverse()
|> Enum.reduce(0, fn
?., acc -> acc <<< 1
?#, acc -> acc <<< 1 ||| 1
end)
tree_count = fn right, down ->
0..div(lines - 1, down)
|> Enum.map(fn i ->
index = rem(i * line_length * down + rem(i * right, line_length), total_length)
bin_at(bin, index)
end)
|> Enum.count(& &1)
end
part_1 = tree_count.(3, 1)
part_2 =
[
[1, 1],
[5, 1],
[7, 1],
[1, 2]
]
|> Enum.map(fn [right, down] -> tree_count.(right, down) end)
|> Enum.reduce(part_1, &(&1 * &2))
{part_1, part_2}
end
end
|
lib/days/03.ex
| 0.85987 | 0.784071 |
03.ex
|
starcoder
|
defmodule Muscat.AugmentedMatrix do
alias Muscat.Matrix
alias Muscat.Fraction
import Muscat.Fraction, only: [is_zero_fraction: 1]
@type element :: Fraction.fraction_tuple() | integer()
@type matrix :: nonempty_list(Matrix.Cell.t())
@doc "Create augmented matrix by augmented matrix list"
@spec new(augmented_matrix :: nonempty_list(nonempty_list(element()))) :: matrix()
def new(augmented_matrix) do
if all_list?(augmented_matrix) and valid_list?(augmented_matrix) do
rows_count = length(augmented_matrix)
cols_count = augmented_matrix |> List.first() |> length()
for row <- Range.new(1, rows_count), col <- Range.new(1, cols_count) do
value =
augmented_matrix
|> Enum.at(row - 1)
|> Enum.at(col - 1)
|> Fraction.new()
%Matrix.Cell{row: row, col: col, value: value}
end
else
raise ArgumentError, "The given parameter can not generate the augmented matrix."
end
end
@doc "Create augmented matrix by coefficient matrix list and constant column list"
@spec new(
coefficient_matrix :: nonempty_list(nonempty_list(element())),
constant_column :: nonempty_list(element())
) :: matrix()
def new(coefficient_matrix, constant_column) do
if length(coefficient_matrix) == length(constant_column) do
coefficient_matrix
|> Enum.zip(constant_column)
|> Enum.map(fn {coefficients, constant} ->
coefficients ++ [constant]
end)
|> new()
else
raise ArgumentError, "The given parameter can not generate the augmented matrix."
end
end
defp all_list?(lists) do
lists |> Enum.map(&is_list/1) |> Enum.all?(& &1)
end
defp valid_list?(lists) do
case lists |> Enum.map(&length/1) |> Enum.uniq() do
[0] -> false
[_length] -> true
_ -> false
end
end
@doc """
Reduce a augmented matrix into `reduced row echelon form` and give the equation solution.
### Options
- `:value_type` - The result value type, `:float`(default), `:fraction`.
- `:precision` - If the `result_type` is `:float`, round the float.
"""
@type solution :: list(Fraction.t() | float())
@type rref_result ::
{:ok, solution()}
| {:error, :no_solution}
| {:error, :infinite_solutions}
| {:error, :approximate_solution}
@spec rref(augmented_matrix :: matrix()) :: rref_result()
@spec rref(augmented_matrix :: matrix(), opts :: keyword()) :: rref_result()
def rref(matrix, opts \\ []) do
with upper_triangular_matrix <- upper_triangular_matrix(matrix),
{:ok, solution_type} <- valid_solution_exists(upper_triangular_matrix) do
solution =
upper_triangular_matrix
|> fit_single_solution_matrix(solution_type, opts)
|> diagonal_matrix()
|> identity_matrix()
|> take_solution(opts)
{:ok, solution}
end
end
defp fit_single_solution_matrix(matrix, solution_type, opts) do
matrix
|> remove_zero_rows()
|> set_default_rows_if_needed(solution_type, opts)
|> sort_rows()
end
defp remove_zero_rows(matrix) do
matrix
|> Enum.group_by(& &1.row)
|> Enum.reject(fn {_row, cells} ->
Enum.all?(cells, &Fraction.is_zero_fraction(&1.value))
end)
|> Enum.map(fn {_row, cells} -> cells end)
|> List.flatten()
end
defp set_default_rows_if_needed(matrix, :single_solution, _), do: matrix
defp set_default_rows_if_needed(matrix, :infinite_solutions, opts) do
default_value = Keyword.get(opts, :default_value, 1) |> Fraction.new()
coefficient_cols = Matrix.col_count(matrix) - 1
total_rows = Range.new(1, coefficient_cols) |> Enum.to_list()
{new_matrix, exist_rows} = missing_main_diagonal_cell_idxs(matrix)
new_cells =
Enum.reduce(total_rows -- exist_rows, [], fn row_idx, acc ->
coefficients =
Range.new(1, coefficient_cols)
|> Enum.map(fn
^row_idx -> %Matrix.Cell{row: row_idx, col: row_idx, value: Fraction.new(1)}
col -> %Matrix.Cell{row: row_idx, col: col, value: Fraction.new(0)}
end)
constant = %Matrix.Cell{row: row_idx, col: coefficient_cols + 1, value: default_value}
[constant | coefficients] ++ acc
end)
new_cells ++ new_matrix
end
defp sort_rows(matrix) do
grouped_rows = Enum.group_by(matrix, & &1.row)
rows = Enum.map(grouped_rows, fn {_row, cells} -> first_non_zero_cell(cells) end)
Enum.zip(grouped_rows, rows)
|> Enum.map(fn {{_row, cells}, row} ->
Enum.map(cells, &Map.put(&1, :row, row))
end)
|> List.flatten()
|> Enum.sort_by(&{&1.row, &1.col})
end
defp missing_main_diagonal_cell_idxs(matrix) do
matrix
|> Enum.group_by(& &1.row)
|> Enum.reduce({matrix, []}, fn {row, cells}, {new_matrix, missing_rows} ->
case first_non_zero_cell(cells) do
^row ->
{new_matrix, [row | missing_rows]}
col ->
new = Enum.map(cells, &Map.put(&1, :row, col))
new_matrix = new_matrix |> Matrix.remove_row(cells) |> Matrix.add_row(new)
{new_matrix, [col | missing_rows]}
end
end)
end
defp first_non_zero_cell(row_cells) do
row_cells
|> Enum.sort_by(& &1.col)
|> Enum.reduce_while(0, fn %{col: col, value: value}, _ ->
if is_zero_fraction(value) do
{:cont, nil}
else
{:halt, col}
end
end)
end
# defp replace_zero_rows_by_default_if_needed(matrix, :single_solution, _), do: matrix
# defp replace_zero_rows_by_default_if_needed(matrix, :infinite_solutions, opts) do
# default_value = Keyword.get(opts, :default_value, Fraction.new(1))
# row_count = Matrix.row_count(matrix)
# matrix
# |> Enum.group_by(& &1.row)
# |> Enum.filter(fn {_row, cells} ->
# Enum.all?(cells, &Fraction.is_zero_fraction(&1.value))
# end)
# |> Enum.reduce(matrix, fn {row, cells}, acc ->
# constant_cell = Enum.at(cells, -1) |> Map.put(:value, default_value)
# main_cell = Enum.at(cells, row - 1) |> Map.put(:value, Fraction.new(1)) defp replace_zero_rows_by_default_if_needed(matrix, :single_solution, _), do: matrix
# defp replace_zero_rows_by_default_if_needed(matrix, :infinite_solutions, opts) do
# default_value = Keyword.get(opts, :default_value, Fraction.new(1))
# row_count = Matrix.row_count(matrix)
# matrix
# |> Enum.group_by(& &1.row)
# |> Enum.filter(fn {_row, cells} ->
# Enum.all?(cells, &Fraction.is_zero_fraction(&1.value))
# end)
# |> Enum.reduce(matrix, fn {row, cells}, acc ->
# constant_cell = Enum.at(cells, -1) |> Map.put(:value, default_value)
# main_cell = Enum.at(cells, row - 1) |> Map.put(:value, Fraction.new(1))
# cells =
# cells
# |> Enum.sort_by(& &1.col)
# |> List.replace_at(-1, constant_cell)
# |> List.replace_at(row - 1, main_cell)
# Matrix.update_row(acc, cells)
# end)
# end
# cells =
# cells
# |> Enum.sort_by(& &1.col)
# |> List.replace_at(-1, constant_cell)
# |> List.replace_at(row - 1, main_cell)
# Matrix.update_row(acc, cells)
# end)
# end
defp take_solution(identity_matrix, opts) do
default_value = Keyword.get(opts, :default_value, :any)
value_type = Keyword.get(opts, :value_type, :float)
identity_matrix
|> get_constant_column()
|> Enum.sort_by(& &1.row)
|> Enum.map(fn
%{value: :any} ->
default_value
%{value: fraction} ->
case value_type do
:float -> Fraction.to_float(fraction, opts)
fraction -> fraction
end
end)
end
defp upper_triangular_matrix(matrix) do
case Matrix.row_count(matrix) do
1 ->
matrix
row_count ->
Range.new(1, row_count - 1)
|> Enum.reduce(matrix, fn row, matrix ->
elementary_row_transform(matrix, row)
end)
|> replace_duplicated_by_zero()
end
end
defp elementary_row_transform(matrix, row) do
matrix = swap_rows_if_needed(matrix, row)
case Matrix.get_cell(matrix, row, row) do
%{value: value} when is_zero_fraction(value) ->
matrix
diagonal_cell ->
base_row = Matrix.get_row(matrix, row)
{other_cells, transform_cells} = Enum.split_with(matrix, &(&1.row <= row))
cells =
transform_cells
|> Enum.group_by(& &1.row)
|> Enum.map(fn {target_row, row_cells} ->
case Matrix.get_cell(matrix, target_row, row) do
%{value: value} when is_zero_fraction(value) ->
row_cells
%{value: target_value} ->
coefficient = Fraction.divide(target_value, diagonal_cell.value)
do_elementary_transform(coefficient, base_row, row_cells)
end
end)
|> List.flatten()
cells ++ other_cells
end
end
defp replace_duplicated_by_zero(matrix) do
matrix
|> filter_duplicated_rows()
|> Enum.reduce(matrix, fn row_cells, acc ->
Matrix.update_row(acc, Enum.map(row_cells, &Map.put(&1, :value, Fraction.new(0))))
end)
end
defp filter_duplicated_rows(matrix) do
matrix
|> Enum.group_by(& &1.row)
|> Enum.sort_by(fn {row, _cells} -> row end)
|> Enum.map(fn {_row, cells} -> Enum.sort_by(cells, & &1.col) end)
|> do_filter_duplicated()
end
defp do_filter_duplicated(rows, acc \\ [])
defp do_filter_duplicated([], acc), do: acc
defp do_filter_duplicated([cells | others], acc) do
{zero_cells, valid_cells} =
Enum.split_with(others, fn other_cells ->
cells
|> Enum.zip(other_cells)
|> Enum.all?(fn {%{value: a}, %{value: b}} -> Fraction.equal?(a, b) end)
end)
do_filter_duplicated(valid_cells, zero_cells ++ acc)
end
defp do_elementary_transform(coefficient, base_row, row_cells) do
row_cells
|> Enum.sort_by(& &1.col)
|> Enum.zip(base_row)
|> Enum.map(fn {row_cell, base_cell} ->
Matrix.update_cell(
row_cell,
&(&1 |> Fraction.minus(Fraction.multi(coefficient, base_cell.value)) |> Fraction.reduce())
)
end)
end
defp swap_rows_if_needed(matrix, row) do
case matrix |> Enum.reject(&(&1.row < row)) |> Matrix.max_abs_row_in_col(row) do
^row ->
matrix
:no_data ->
matrix
max_row ->
Matrix.swap_row(matrix, row, max_row)
end
end
defp valid_solution_exists(upper_triangular_matrix) do
constant_column = get_constant_column(upper_triangular_matrix)
coefficient_matrix = upper_triangular_matrix -- constant_column
augmented_rank = rank(upper_triangular_matrix)
coefficient_rank = rank(coefficient_matrix)
element_count = element_number(coefficient_matrix)
cond do
augmented_rank == coefficient_rank and
coefficient_rank == element_count ->
{:ok, :single_solution}
augmented_rank == coefficient_rank and
coefficient_rank < element_count ->
{:ok, :infinite_solutions}
augmented_rank > element_count ->
{:error, :approximate_solutions}
true ->
{:error, :no_solution}
end
end
def rank(matrix) do
matrix
|> remove_zero_rows()
|> Enum.map(& &1.row)
|> Enum.uniq()
|> length()
end
def element_number(coefficient_matrix) do
coefficient_matrix
|> Enum.group_by(& &1.col)
|> Enum.to_list()
|> length()
end
defp diagonal_matrix(upper_triangular_matrix) do
row_count = Matrix.row_count(upper_triangular_matrix)
Range.new(row_count, 1)
|> Enum.reduce(upper_triangular_matrix, fn row, matrix ->
eliminate_element(matrix, row)
end)
end
defp eliminate_element(matrix, row) do
base_cell = Matrix.get_cell(matrix, row, row)
col_cells = Matrix.get_col(matrix, row)
constant_column = get_constant_column(matrix)
base_constant = Enum.find(constant_column, &(&1.row == row))
{col_cells, constant_column} =
col_cells
|> Enum.zip(constant_column)
|> Enum.reduce({[], []}, fn
{^base_cell = col_cell, constant}, {col_cells, constant_column} ->
{[col_cell | col_cells], [constant | constant_column]}
{col_cell, constant}, {col_cells, constant_column}
when is_zero_fraction(col_cell.value) ->
{[col_cell | col_cells], [constant | constant_column]}
{col_cell, constant}, {col_cells, constant_column} ->
coefficient = Fraction.divide(col_cell.value, base_cell.value)
col_cell = do_eliminate_element(col_cell, coefficient, base_cell)
constant = do_eliminate_element(constant, coefficient, base_constant)
{[col_cell | col_cells], [constant | constant_column]}
end)
matrix
|> Matrix.update_col(col_cells)
|> Matrix.update_col(constant_column)
end
defp do_eliminate_element(cell, coefficient, target_cell) do
Matrix.update_cell(
cell,
&(&1 |> Fraction.minus(Fraction.multi(coefficient, target_cell.value)) |> Fraction.reduce())
)
end
defp identity_matrix(diagonal_matrix) do
diagonal_matrix
|> Enum.group_by(& &1.row)
|> Enum.reduce(diagonal_matrix, fn {row, row_cells}, matrix ->
%{value: base_value} = Matrix.get_cell(row_cells, row, row)
coefficient = Fraction.inverse(base_value)
row_cells =
Enum.map(row_cells, fn
%{value: value} = cell when is_zero_fraction(value) ->
cell
cell ->
Matrix.update_cell(cell, &(&1 |> Fraction.multi(coefficient) |> Fraction.reduce()))
end)
Matrix.update_row(matrix, row_cells)
end)
end
defp get_constant_column(matrix) do
col =
matrix
|> Enum.map(& &1.col)
|> Enum.max()
Matrix.get_col(matrix, col)
end
end
|
lib/muscat/augmented_matrix.ex
| 0.843879 | 0.547948 |
augmented_matrix.ex
|
starcoder
|
defmodule RlStudy.DP.ValueIterationPlanner do
alias RlStudy.DP.Planner
alias RlStudy.MDP.Environment
require Logger
@type t :: %RlStudy.DP.ValueIterationPlanner{
env: RlStudy.MDP.Environment.t(),
log: [] | [String.t()]
}
defstruct Planner.planner_data()
defimpl RlStudy.DP.Planner.Plan, for: RlStudy.DP.ValueIterationPlanner do
@spec plan(RlStudy.DP.ValueIterationPlanner.t(), float(), float()) :: float()
def plan(planner, gamma \\ 0.9, threshold \\ 0.0001) do
init_planner = Planner.initialize(planner)
Logger.info("planner: #{inspect(init_planner, pretty: true)}")
v =
Environment.states(init_planner.env)
|> Map.new(fn v -> {v, 0} end)
Logger.debug("v: #{inspect(v, pretty: true)}")
{:ok, updated_planner, updated_v} = calc(planner, gamma, threshold, v, 0)
Planner.dict_to_grid(updated_planner, updated_v)
end
defp calc(planner, gamma, threshold, v, delta) do
Logger.debug("v: #{inspect(v, pretty: true)}")
Logger.debug("delta: #{inspect(delta, pretty: true)}")
planner_updated = %{planner | log: planner.log ++ [Planner.dict_to_grid(planner, v)]}
%{v: v_updated, delta: delta_updated} =
Enum.reduce(v, %{v: v, delta: delta}, fn {v_state, _v_reward}, acc ->
if Environment.can_action_at(planner.env, v_state) do
max_reward = max_reward(planner_updated, gamma, acc.v, v_state)
Logger.debug("max_reward: #{inspect(max_reward, pretty: true)}")
Logger.debug("acc.delta: #{inspect(acc.delta, pretty: true)}")
Logger.debug("acc.v: #{inspect(acc.v, pretty: true)}")
Logger.debug("v_state: #{inspect(v_state, pretty: true)}")
delta_updating = Enum.max([acc.delta, Kernel.abs(max_reward - acc.v[v_state])])
v_updating = Map.update(acc.v, v_state, max_reward, fn _value -> max_reward end)
%{v: v_updating, delta: delta_updating}
else
%{v: acc.v, delta: acc.delta}
end
end)
if delta_updated >= threshold do
calc(planner_updated, gamma, threshold, v_updated, 0)
else
{:ok, planner_updated, v_updated}
end
end
defp max_reward(planner, gamma, v, state) do
Environment.actions()
|> Enum.map(fn action ->
transitions = Planner.transitions_at(planner, state, action)
Enum.reduce(transitions, 0, fn %{prob: prob, next_state: state, reward: reward}, r ->
r + prob * (reward + gamma * v[state])
end)
end)
|> Enum.max()
end
end
end
|
lib/dp/value_iteration_planner.ex
| 0.636353 | 0.640158 |
value_iteration_planner.ex
|
starcoder
|
defmodule Nectar.Query.Zone do
use Nectar.Query, model: Nectar.Zone
import Ecto, only: [assoc: 2]
def zoneable!(repo, %Nectar.Zone{type: "Country"} = _model, zoneable_id),
do: repo.get!(Nectar.Country, zoneable_id)
def zoneable!(repo, %Nectar.Zone{type: "State"} = _model, zoneable_id),
do: repo.get!(Nectar.State, zoneable_id)
def member_with_id(repo, %Nectar.Zone{type: "Country"} = model, zone_member_id) do
repo.one(from m in assoc(model, :country_zone_members),
where: m.id == ^zone_member_id)
end
def member_with_id(repo, %Nectar.Zone{type: "State"} = model, zone_member_id) do
repo.one(from m in assoc(model, :state_zone_members),
where: m.id == ^zone_member_id)
end
def zoneable_candidates(repo, %Nectar.Zone{type: "Country"} = model) do
existing_zoneable_ids = existing_zoneable_ids(repo, model)
repo.all(from c in Nectar.Country, where: not c.id in ^existing_zoneable_ids)
end
def zoneable_candidates(repo, %Nectar.Zone{type: "State"} = model) do
existing_zoneable_ids = existing_zoneable_ids(repo, model)
repo.all(from s in Nectar.State, where: not s.id in ^existing_zoneable_ids)
end
def member_ids_and_names(%Nectar.Zone{type: "Country"} = model) do
from v in assoc(model, :country_zone_members),
join: c in Nectar.Country, on: c.id == v.zoneable_id,
select: {v.id, c.name}
end
def member_ids_and_names(%Nectar.Zone{type: "State"} = model) do
from v in assoc(model, :state_zone_members),
join: c in Nectar.State, on: c.id == v.zoneable_id,
select: {v.id, c.name}
end
def member_ids_and_names(repo, model), do: repo.all(member_ids_and_names(model))
def members(%Nectar.Zone{type: "Country"} = model),
do: from v in assoc(model, :country_zone_members)
def members(%Nectar.Zone{type: "State"} = model),
do: from v in assoc(model, :state_zone_members)
def members(repo, model), do: repo.all(members(model))
defp existing_zoneable_ids(%Nectar.Zone{type: "State"} = model),
do: from cz in assoc(model, :state_zone_members), select: cz.zoneable_id
defp existing_zoneable_ids(%Nectar.Zone{type: "Country"} = model),
do: from cz in assoc(model, :country_zone_members), select: cz.zoneable_id
defp existing_zoneable_ids(repo, model),
do: repo.all(existing_zoneable_ids(model))
end
|
web/queries/zone.ex
| 0.558327 | 0.552721 |
zone.ex
|
starcoder
|
defmodule Geocalc.Calculator.Polygon do
@moduledoc false
alias Geocalc.Calculator
require Integer
@doc """
Check if point is inside a polygon
## Example
iex> import Geocalc.Calculator.Polygon
iex> polygon = [[1, 2], [3, 4], [5, 2], [3, 0]]
iex> point = [3, 2]
iex> point_in_polygon?(polygon, point)
true
## Example
iex> import Geocalc.Calculator.Polygon
iex> polygon = [[1, 2], [3, 4], [5, 2], [3, 0]]
iex> point = [1.5, 3]
iex> point_in_polygon?(polygon, point)
false
"""
def point_in_polygon?(polygon, point) do
polygon
|> point_in_bounding_box?(point)
|> point_in_polygon?(polygon, point)
end
def point_in_polygon?(false, _polygon, _point), do: false
def point_in_polygon?(true, polygon, point) do
polygon
|> to_segments()
|> Enum.reduce(0, fn segment, count ->
apply(__MODULE__, :ray_intersects_segment, add_epsilon(segment, point)) + count
end)
|> Integer.is_odd()
end
def to_segments([p1 | _] = polygon) do
polygon |> Enum.chunk_every(2, 1, [p1]) |> Enum.map(fn segment -> orient_segment(segment) end)
end
def orient_segment([a = [_ax, ay], b = [_bx, by]]) when by >= ay do
[a, b]
end
def orient_segment([b, a]) do
[a, b]
end
def add_epsilon(segment = [[_ax, ay], [_bx, by]], [px, py]) when py == ay or py == by do
[segment, [px, py + 0.00000001]]
end
def add_epsilon(segment, point), do: [segment, point]
def ray_intersects_segment([[_ax, ay], [_bx, by]], [_px, py]) when py < ay or py > by do
0
end
# px >= max(ax, bx)
def ray_intersects_segment([[ax, _ay], [bx, _by]], [px, _py])
when (ax >= bx and px >= ax) or (bx >= ax and px >= bx) do
0
end
# px < min(ax, bx)
def ray_intersects_segment([[ax, _ay], [bx, _by]], [px, _py])
when (ax <= bx and px < ax) or (bx <= ax and px < bx) do
1
end
def ray_intersects_segment([[ax, ay], [bx, by]], [px, py]) do
m_red = m_red(ax, ay, bx, by)
m_blue = m_blue(ax, ay, px, py)
case {m_blue, m_red} do
{:infinity, _} ->
1
{_, :infinity} ->
0
{m_blue, m_red} when m_blue >= m_red ->
1
_ ->
0
end
end
def m_red(ax, ay, bx, by) when ax != bx do
(by - ay) / (bx - ax)
end
def m_red(_, _, _, _) do
:infinity
end
def m_blue(ax, ay, px, py) when ax != px do
(py - ay) / (px - ax)
end
def m_blue(_, _, _, _) do
:infinity
end
def point_in_bounding_box?(polygon, point) do
polygon
|> Calculator.bounding_box_for_points()
|> Calculator.contains_point?(point)
end
end
|
lib/geocalc/calculator/polygon.ex
| 0.895128 | 0.742492 |
polygon.ex
|
starcoder
|
defmodule Function do
@moduledoc """
A set of functions for working with functions.
There are two types of captured functions: **external** and **local**.
External functions are functions residing in modules that are captured
with `&/1`, such as `&String.length/1`. Local functions are anonymous functions
defined with `fn/1` or with the capture operator `&/1` using `&1`, `&2`,
and so on as replacements.
"""
@type information ::
:arity
| :env
| :index
| :module
| :name
| :new_index
| :new_uniq
| :pid
| :type
| :uniq
@doc """
Captures the given function.
Inlined by the compiler.
## Examples
iex> Function.capture(String, :length, 1)
&String.length/1
"""
@since "1.7.0"
@spec capture(module, atom, arity) :: fun
def capture(module, function_name, arity) do
:erlang.make_fun(module, function_name, arity)
end
@doc """
Returns a keyword list with information about a function.
The returned keys (with the corresponding possible values) for
all types of functions (local and external) are the following:
* `:type` - `:local` (for anonymous functions) or `:external` (for
named functions).
* `:module` - an atom which is the module where the function is defined when
anonymous or the module which the function refers to when it's a named function.
* `:arity` - (integer) the number of arguments the function is to be called with.
* `:name` - (atom) the name of the function.
* `:env` - a list of the environment or free variables. For named
functions, the returned list is always empty.
When `fun` is an anonymous function (that is, the type is `:local`), the following
additional keys are returned:
* `:pid` - PID of the process that originally created the function.
* `:index` - (integer) an index into the module function table.
* `:new_index` - (integer) an index into the module function table.
* `:new_uniq` - (binary) a unique value for this function. It's
calculated from the compiled code for the entire module.
* `:uniq` - (integer) a unique value for this function. This integer is
calculated from the compiled code for the entire module.
**Note**: this function must be used only for debugging purposes.
Inlined by the compiler.
## Examples
iex> fun = fn x -> x end
iex> info = Function.info(fun)
iex> Keyword.get(info, :arity)
1
iex> Keyword.get(info, :type)
:local
iex> fun = &String.length/1
iex> info = Function.info(fun)
iex> Keyword.get(info, :type)
:external
iex> Keyword.get(info, :name)
:length
"""
@since "1.7.0"
@spec info(fun) :: [{information, term}]
def info(fun), do: :erlang.fun_info(fun)
@doc """
Returns a specific information about the function.
The returned information is a two-element tuple in the shape of
`{info, value}`.
For any function, the information asked for can be any of the atoms
`:module`, `:name`, `:arity`, `:env`, or `:type`.
For anonymous functions, there is also information about any of the
atoms `:index`, `:new_index`, `:new_uniq`, `:uniq`, and `:pid`.
For a named function, the value of any of these items is always the
atom `:undefined`.
For more information on each of the possible returned values, see
`info/1`.
Inlined by the compiler.
## Examples
iex> f = fn x -> x end
iex> Function.info(f, :arity)
{:arity, 1}
iex> Function.info(f, :type)
{:type, :local}
iex> fun = &String.length/1
iex> Function.info(fun, :name)
{:name, :length}
iex> Function.info(fun, :pid)
{:pid, :undefined}
"""
@since "1.7.0"
@spec info(fun, item) :: {item, term} when item: information
def info(fun, item), do: :erlang.fun_info(fun, item)
end
|
lib/elixir/lib/function.ex
| 0.887522 | 0.950732 |
function.ex
|
starcoder
|
defmodule Ibanity.Xs2a.FinancialInstitution do
@moduledoc """
[Financial institutions](https://documentation.ibanity.com/xs2a/api#financial-institution) API wrapper
"""
use Ibanity.Resource
defstruct id: nil,
sandbox: true,
name: nil,
self_link: nil,
bic: nil,
logo_url: nil,
max_requested_account_references: nil,
min_requested_account_references: nil,
primary_color: nil,
secondary_color: nil,
requires_credential_storage: nil,
country: nil,
future_dated_payments_allowed: nil,
requires_customer_ip_address: nil,
status: nil,
bulk_payments_enabled: nil,
payments_enabled: nil,
periodic_payments_enabled: nil,
bulk_payments_product_types: nil,
payments_product_types: nil,
periodic_payments_product_types: nil,
authorization_models: nil,
financial_institution_customer_reference_required: nil,
shared_brand_reference: nil,
shared_brand_name: nil,
maintenance_from: nil,
maintenance_to: nil,
maintenance_type: nil
@resource_type "financial_institution"
@sandbox_api_schema_path ["sandbox", "financialInstitutions"]
@find_api_schema_path ["xs2a", "financialInstitutions"]
@doc """
Lists all financial institutions in `sandbox` environment.
See `list/1`
"""
def list, do: list(%Request{})
@doc """
[Lists all financial institutions](https://documentation.ibanity.com/xs2a/api#list-financial-institutions).
If the request has a valid [customer access token](https://documentation.ibanity.com/xs2a/api#customer-access-token) set,
it will reach the `live` endpoint of the API and list financial institutions the customer linked to this token belongs to.
If it's not set it will reach the `sandbox` endpoint.
Returns `{:ok, collection}` where `collection` is a `Ibanity.Collection` where items are of type `Ibanity.Xs2a.FinancialInstitution`,
otherwise it returns `{:error, reason}`.
## Example
iex> FinancialInstitution.list
{:ok, %Ibanity.Collection{items: [%Ibanity.FinancialInstitution{...}], ...}
"""
def list(%Request{customer_access_token: nil} = request) do
request
|> Request.id(:id, "")
|> Client.execute(:get, ["xs2a", "financialInstitutions"])
end
def list(%Request{} = request) do
request
|> Client.execute(:get, ["xs2a", "customer", "financialInstitutions"])
end
@doc """
[Retrieves a financial institution](https://documentation.ibanity.com/xs2a/api#get-financial-institution).
If the argument is a binary, it will create and empty request and assign the value of the id to that argument.
If it's a request it will use it _as-is_.
If the request has a valid [customer access token](https://documentation.ibanity.com/xs2a/api#customer-access-token) set,
it will reach the `live` endpoint of the API. If it's not set it will reach the `sandbox` endpoint.
Returns `{:ok, institution}` if sucessful, `{:error, reason}` otherwise.
## Examples
iex> Ibanity.FinancialInstitution.find("55c09df6-0bdd-46ef-8e66-e5297e0e8a7f")
{:ok, %Ibanity.FinancialInstitution{id: "55c09df6-0bdd-46ef-8e66-e5297e0e8a7f", ...}}
iex> token
...> |> Request.customer_access_token
...> |> Request.id(:id, "<PASSWORD>")
...> |> FinancialInstitution.find
{:ok, %Ibanity.FinancialInstitution{id: "55c09df6-0bdd-46ef-8e66-e5297e0e8a7f", ...}}
"""
def find(id) when is_binary(id), do: find(%Request{resource_ids: [id: id]})
def find(%Request{} = request) do
request
|> Client.execute(:get, @find_api_schema_path)
end
@doc """
[Creates a new financial institution](https://documentation.ibanity.com/xs2a/api#create-financial-institution).
Note: work only in `sandbox` environment
Returns `{:ok, institution}` if sucessful, `{:error, reason}` otherwise.
## Example
iex> [
...> sandbox: true,
...> name: "MetaBank"
...> ]
...> |> Request.attributes
...> |> FinancialInstitution.create
{:ok, %Ibanity.FinancialInstitution{id: "4b52d43c-433d-41e0-96f2-c2e38a24b25e", ...}}
"""
def create(%Request{} = request) do
request
|> Request.id(:id, "")
|> Request.resource_type(@resource_type)
|> Client.execute(:post, @sandbox_api_schema_path)
end
@doc """
[Updates an existing financial institution](https://documentation.ibanity.com/xs2a/api#update-financial-institution).
Note: works only in `sandbox` environment
Returns `{:ok, institution}` if sucessful, `{:error, reason}` otherwise.
## Example
iex> [
...> sandbox: true,
...> name: "metaBank"
...> ]
...> |> Request.attributes
...> |> Request.id(:id, "4b52d43c-433d-41e0-96f2-c2e38a24b25e")
...> |> FinancialInstitution.create
{:ok, %Ibanity.FinancialInstitution{id: "4b52d43c-433d-41e0-96f2-c2e38a24b25e", ...}}
"""
def update(%Request{} = request) do
request
|> Request.resource_type(@resource_type)
|> Client.execute(:patch, @sandbox_api_schema_path)
end
@doc """
[Deletes a financial institution](https://documentation.ibanity.com/xs2a/api#delete-financial-institution).
If the argument is a binary, it will create and empty request and assign the value of the id to that argument.
If it's a request it will use it _as-is_.
Note: works only in `sandbox` environment
Returns `{:ok, institution}` if sucessful, `{:error, reason}` otherwise.
## Examples
iex> Ibanity.FinancialInstitution.delete("55c09df6-0bdd-46ef-8e66-e5297e0e8a7f")
{:ok, %Ibanity.FinancialInstitution{id: "55c09df6-0bdd-46ef-8e66-e5297e0e8a7f", ...}}
iex> |> Request.id(:id, "55c09df6-0bdd-46ef-8e66-e5297e0e8a7f")
...> |> FinancialInstitution.delete
{:ok, %Ibanity.FinancialInstitution{id: "55c09df6-0bdd-46ef-8e66-e5297e0e8a7f", ...}}
"""
def delete(id) when is_binary(id), do: delete(%Request{resource_ids: [id: id]})
def delete(%Request{} = request) do
request
|> Client.execute(:delete, @sandbox_api_schema_path)
end
@doc false
def key_mapping do
[
id: {~w(id), :string},
sandbox: {~w(attributes sandbox), :string},
name: {~w(attributes name), :string},
self_link: {~w(links self), :string},
bic: {~w(attributes bic), :string},
logo_url: {~w(attributes logoUrl), :string},
max_requested_account_references: {~w(attributes maxRequestedAccountReferences), :integer},
min_requested_account_references: {~w(attributes minRequestedAccountReferences), :integer},
primary_color: {~w(attributes primaryColor), :string},
secondary_color: {~w(attributes secondaryColor), :string},
requires_credential_storage: {~w(attributes requiresCredentialStorage), :boolean},
country: {~w(attributes country), :string},
future_dated_payments_allowed: {~w(attributes futureDatedPaymentsAllowed), :boolean},
requires_customer_ip_address: {~w(attributes requiresCustomerIpAddress), :boolean},
status: {~w(attributes status), :string},
bulk_payments_enabled: {~w(attributes bulkPaymentsEnabled), :boolean},
payments_enabled: {~w(attributes paymentsEnabled), :boolean},
periodic_payments_enabled: {~w(attributes periodicPaymentsEnabled), :boolean},
bulk_payments_product_types: {~w(attributes bulkPaymentsProductTypes), :string},
payments_product_types: {~w(attributes paymentsProductTypes), :string},
periodic_payments_product_types: {~w(attributes periodicPaymentsProductTypes), :string},
authorization_models: {~w(attributes authorizationModels), :string},
financial_institution_customer_reference_required: {~w(attributes financialInstitutionCustomerReferenceRequired), :boolean},
shared_brand_reference: {~w(attributes sharedBrandReference), :string},
shared_brand_name: {~w(attributes sharedBrandName), :string},
maintenance_from: {~w(attributes maintenanceFrom), :datetime},
maintenance_to: {~w(attributes maintenanceTo), :datetime},
maintenance_type: {~w(attributes maintenanceType), :string}
]
end
end
|
lib/ibanity/api/xs2a/financial_institution.ex
| 0.796609 | 0.405802 |
financial_institution.ex
|
starcoder
|
defmodule AWS.DynamoDBStreams do
@moduledoc """
Amazon DynamoDB
Amazon DynamoDB Streams provides API actions for accessing streams and
processing stream records.
To learn more about application development with Streams, see [Capturing Table Activity with DynamoDB
Streams](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Streams.html)
in the Amazon DynamoDB Developer Guide.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2012-08-10",
content_type: "application/x-amz-json-1.0",
credential_scope: nil,
endpoint_prefix: "streams.dynamodb",
global?: false,
protocol: "json",
service_id: "DynamoDB Streams",
signature_version: "v4",
signing_name: "dynamodb",
target_prefix: "DynamoDBStreams_20120810"
}
end
@doc """
Returns information about a stream, including the current status of the stream,
its Amazon Resource Name (ARN), the composition of its shards, and its
corresponding DynamoDB table.
You can call `DescribeStream` at a maximum rate of 10 times per second.
Each shard in the stream has a `SequenceNumberRange` associated with it. If the
`SequenceNumberRange` has a `StartingSequenceNumber` but no
`EndingSequenceNumber`, then the shard is still open (able to receive more
stream records). If both `StartingSequenceNumber` and `EndingSequenceNumber` are
present, then that shard is closed and can no longer receive more data.
"""
def describe_stream(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeStream", input, options)
end
@doc """
Retrieves the stream records from a given shard.
Specify a shard iterator using the `ShardIterator` parameter. The shard iterator
specifies the position in the shard from which you want to start reading stream
records sequentially. If there are no stream records available in the portion of
the shard that the iterator points to, `GetRecords` returns an empty list. Note
that it might take multiple calls to get to a portion of the shard that contains
stream records.
`GetRecords` can retrieve a maximum of 1 MB of data or 1000 stream records,
whichever comes first.
"""
def get_records(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetRecords", input, options)
end
@doc """
Returns a shard iterator.
A shard iterator provides information about how to retrieve the stream records
from within a shard. Use the shard iterator in a subsequent `GetRecords` request
to read the stream records from the shard.
A shard iterator expires 15 minutes after it is returned to the requester.
"""
def get_shard_iterator(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetShardIterator", input, options)
end
@doc """
Returns an array of stream ARNs associated with the current account and
endpoint.
If the `TableName` parameter is present, then `ListStreams` will return only the
streams ARNs for that table.
You can call `ListStreams` at a maximum rate of 5 times per second.
"""
def list_streams(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListStreams", input, options)
end
end
|
lib/aws/generated/dynamodb_streams.ex
| 0.865409 | 0.400251 |
dynamodb_streams.ex
|
starcoder
|
defmodule Multichain.Super do
@moduledoc """
This module combine basic Multichain api to perform common tasks, such as create address and sending asset using external keypairs.
This function collection also contain handy function which used by Finance admin such as issue asset, reissue asset, block an address, etc.
"""
alias Multichain.Http
@doc """
This function will return a keypair which address is imported to the node as watch only address. This is used if you want to generate address which node cannot have control of it.
Having this kind of address, only you as private key owner can spent the asset in it. The node doesn't store your private key and once you saw it it will never appear again. So ensure you record your private key somewhere secure.
This kind of address transfer security responsibility to you instead of the node server. The security of private key is handled externally.
"""
def create_external_address do
# 1 create keypair
case Http.jsonrpccall("createkeypairs", [1]) do
{:ok, result} ->
[keypair] = result["result"]
# 2. import address
case Http.jsonrpccall("importaddress", [keypair["address"], "", false]) do
# 3. grant permission sent
{:ok, _result2} ->
case Http.jsonrpccall("grant", [keypair["address"], "send,receive"]) do
{:ok, _result4} -> {:ok, keypair}
other -> other
end
other ->
other
end
other ->
other
end
end
@doc """
This function will return a list of all address who have issue permission.
This address can issue any new asset.
"""
def who_can_issue() do
case Http.jsonrpccall("listpermissions", ["issue"]) do
{:ok, result} -> {:ok, result["result"]}
error -> error
end
end
@doc """
This function will reissue another quantity from existing assets.
If you want to create new type of asset, use `Multichain.api/2` and its respective parameter based on Multichain API documentation.
"""
def print_money(assetname, qty) do
case who_can_issue() do
{:ok, result} ->
if length(result) == 0 do
{:error, "No issuer founded"}
else
[first | _] = result
issuer = first["address"]
case Http.jsonrpccall("issuemorefrom", [issuer, issuer, assetname, qty, 0]) do
{:ok, result} -> {:ok, result}
error -> error
end
end
error ->
error
end
end
@doc """
This function is used to transfer asset from external address, owning the private key.
If the key is handled externally, you can transfer asset from one address to another using this method. If it is internal address, use `transfer/3` instead.
"""
def transfer(assetcode, from, to, qty, privkey) do
case Http.jsonrpccall("createrawsendfrom", [from, %{to => %{assetcode => qty}}]) do
{:ok, %{"error" => nil, "id" => nil, "result" => result}} ->
case Http.jsonrpccall("signrawtransaction", [result, [], [privkey]]) do
{:ok, %{"error" => nil, "id" => nil, "result" => %{"complete" => true, "hex" => hex}}} ->
case Http.jsonrpccall("sendrawtransaction", [hex]) do
{:ok, %{"error" => nil, "id" => nil, "result" => trxid}} -> {:ok, trxid}
other -> other
end
other ->
other
end
other ->
other
end
end
@doc """
This function is used to transfer asset from internal address.
If the address is exist on `list_internal_address/0` then we can use this function to transfer asset.
"""
def transfer(assetcode, from, to, qty) do
case Http.jsonrpccall("sendassetfrom", [from, to, assetcode, qty]) do
{:ok, result} -> {:ok, result["result"]}
error -> error
end
end
@doc """
This function will return all address manage and owned by this node.
If you want to see all address watched by this node but owned by external, use `list_external_address/0`
"""
def list_internal_address() do
case Multichain.api("getaddresses", [true]) do
{:ok, result} ->
hasil = result["result"] |> Enum.filter(fn x -> x["ismine"] == true end)
{:ok, %{"count" => length(hasil), "result" => hasil}}
other ->
other
end
end
@doc """
This function will return all address watched by this node but not belong to node's wallet, which means we cannot transfer any asset unless we know the private key
If you want to see all address which can be used without private key, use `list_internal_address/0`
"""
def list_external_address() do
case Multichain.api("getaddresses", [true]) do
{:ok, result} ->
hasil = result["result"] |> Enum.filter(fn x -> x["ismine"] == false end)
{:ok, %{"count" => length(hasil), "result" => hasil}}
other ->
other
end
end
@doc """
This function generate internal address which can be used without private key. This will also give send and receive permission to the address.
If you want to create address without permission you can use `Multichain.api("getnewaddress", [])`
"""
def create_internal_address() do
case Http.jsonrpccall("getnewaddress", []) do
{:ok, result} ->
case grant_send_receive(result["result"]) do
{:ok, _} -> {:ok, result["result"]}
other -> other
end
error ->
error
end
end
@doc """
This function top up any code from primary Node's wallet. Also the address which usually have issue permission.
"""
def topup(address, assetcode, qty) do
case Http.jsonrpccall("sendasset", [address, assetcode, qty]) do
{:ok, result} -> {:ok, result["result"]}
error -> error
end
end
@doc """
This function is used to revoke send permission of a particular address.
"""
def block(address) do
case Http.jsonrpccall("revoke", [address, "send"]) do
{:ok, result} -> {:ok, result["result"]}
error -> error
end
end
@doc """
This function is used to give send permission to address which previously has been blocked.
"""
def unblock(address) do
case Http.jsonrpccall("grant", [address, "send"]) do
{:ok, result} -> {:ok, result["result"]}
error -> error
end
end
@doc """
This is a helper to grant_send and receive to particular address.
"""
def grant_send_receive(address) do
case Http.jsonrpccall("grant", [address, "send,receive"]) do
{:ok, result} -> {:ok, result["result"]}
error -> error
end
end
@doc """
This is a helper function to check whether an address have send permission or not.
You can unblock (give send permission) by using `unblock/1`
"""
def blockstatus(address) do
case Multichain.api("listpermissions", ["*", address]) do
{:ok, result} -> find_send_permission(result["result"])
error -> error
end
end
@doc """
This is a helper function to check whether an address have send permission or not.
You can unblock (give send permission) by using `unblock/1`
"""
def publish_stream(addr, streamname, key, value, privkey) do
compose =
case Multichain.api("createrawsendfrom", [
addr,
%{},
[%{"for" => streamname, "key" => key, "data" => Base.encode16(value)}]
]) do
{:ok, %{"error" => _error, "id" => _id, "result" => result}} ->
case Multichain.api("signrawtransaction", [result, [], [privkey], nil]) do
{:ok, %{"error" => nil, "id" => nil, "result" => %{"complete" => true, "hex" => hex}}} ->
Multichain.api("sendrawtransaction", [hex])
other ->
other
end
other ->
other
end
end
def get_stream_data!(stream, txid) do
case Multichain.api("getstreamitem", [stream, txid]) do
{:ok, %{"error" => nil, "id" => nil, "result" => result}} -> result
other -> other
end
end
def get_stream_data(stream, txid) do
case Multichain.api("getstreamitem", [stream, txid]) do
{:ok, %{"error" => nil, "id" => nil, "result" => result}} ->
case Base.decode16(result["data"], case: :lower) do
{:ok, string} -> string
_ -> :error
end
other ->
other
end
end
# ------------------------------------------------Private Area ----------------------------------
defp find_send_permission(list) do
case Enum.filter(list, fn x -> x["type"] == "send" end) do
[] -> true
_ -> false
end
end
end
|
lib/multichainsuper.ex
| 0.747247 | 0.414958 |
multichainsuper.ex
|
starcoder
|
defmodule DiscordBot.Gateway.Heartbeat do
@moduledoc """
Handles the heartbeat protocol for a single websocket.
Utilizes a `DiscordBot.Broker`, to which a `DiscordBot.Gateway.Connection`
is actively posting events in order to schedule and provide
heartbeat messages over the websocket.
By default, Discord will requires that a heartbeat to be sent over each
connection at a specified interval. In addition, Discord may request that
an additional heartbeat be sent at any time, out-of-band of the normal
schedule, to be used for ping tracking. Discord will also acknowledge
scheduled heartbeats with an ACK event over the websocket.
This GenServer provides a scheduling mechanism for heartbeat messages,
as well as a provider for out-of-band heartbeats. In addition, it tracks
the acknowledgements for these heartbeats, and is the primary mechanism
for determining if a `DiscordBot.Gateway.Connection` is zombied or failed.
If this occurrs, the connection will be restarted automatically.
"""
use GenServer
require Logger
alias DiscordBot.Broker
alias DiscordBot.Broker.Event
defmodule State do
@enforce_keys [:status, :broker]
@moduledoc false
defstruct [
:status,
:target,
:target_ref,
:interval,
:sender,
:broker,
:acked,
:last_ack_time,
:last_heartbeat_time,
:ping
]
@type status :: atom
@type target :: pid
@type target_ref :: reference
@type interval :: number
@type sender :: pid
@type broker :: pid
@type acked :: boolean
@type last_ack_time :: DateTime.t()
@type last_heartbeat_time :: DateTime.t()
@type ping :: integer
@type t :: %__MODULE__{
status: status,
target: target,
target_ref: target_ref,
interval: interval,
sender: sender,
broker: broker,
acked: acked,
last_ack_time: last_ack_time,
last_heartbeat_time: last_heartbeat_time,
ping: ping
}
end
@doc """
Starts the heartbeat provider.
Options (required):
- `:broker` - a `DiscordBot.Broker` process to listen to for events.
"""
def start_link(opts) do
broker = Keyword.get(opts, :broker, Elixir.Broker)
state = %State{
status: :waiting,
target: nil,
target_ref: nil,
interval: nil,
sender: nil,
broker: broker,
acked: false,
last_ack_time: nil,
last_heartbeat_time: nil,
ping: nil
}
GenServer.start_link(__MODULE__, state, opts)
end
@doc """
Gets the current status of the heartbeat `provider`.
Returns either `:waiting:`, if the provider is inactive,
or `:running:`, if the provider is actively providing heartbeats.
"""
@spec status?(pid) :: :running | :waiting
def status?(provider) do
GenServer.call(provider, {:status})
end
@doc """
Returns the process that the provider is working for,
or `nil` if there is none.
"""
@spec target?(pid) :: pid | nil
def target?(provider) do
GenServer.call(provider, {:target})
end
@doc """
Returns the interval of the current scheduled heartbeat,
or `nil` if there is none.
"""
@spec interval?(pid) :: integer | nil
def interval?(provider) do
GenServer.call(provider, {:interval})
end
@doc """
Returns whether the most recently sent heartbeat has been acknowledged.
"""
@spec acknowledged?(pid) :: bool
def acknowledged?(provider) do
GenServer.call(provider, {:acknowledged})
end
@doc """
Returns the time of the most recent heartbeat acknowledgement.
"""
@spec last_ack_time?(pid) :: DateTime.t() | nil
def last_ack_time?(provider) do
GenServer.call(provider, {:last_ack_time})
end
@doc """
Returns the time of the most recent heartbeat.
"""
@spec last_heartbeat_time?(pid) :: DateTime.t() | nil
def last_heartbeat_time?(provider) do
GenServer.call(provider, {:last_heartbeat_time})
end
@doc """
Gets the most recently measured ping value, or `nil` if no such value exists.
"""
@spec ping?(pid) :: integer | nil
def ping?(provider) do
GenServer.call(provider, {:ping})
end
@doc """
Schedules the provider to send a heartbeat message
every `interval` milliseconds.
"""
@spec schedule(pid, integer) :: :ok | {:overwrote, pid}
def schedule(provider, interval) do
GenServer.call(provider, {:schedule, interval})
end
@doc """
Schedules the provider to send a heartbeat message
every `interval` milliseconds, to the process `pid`.
"""
@spec schedule(pid, integer, pid) :: :ok | {:overwrote, pid}
def schedule(provider, interval, pid) do
GenServer.call(provider, {:schedule, interval, pid})
end
@doc """
Acknowledges the most recent heartbeat.
"""
@spec acknowledge(pid) :: :ok
def acknowledge(provider) do
GenServer.call(provider, :acknowledge)
end
## Handlers
def init(state) do
Broker.subscribe(state.broker, :hello)
Broker.subscribe(state.broker, :heartbeat)
Broker.subscribe(state.broker, :heartbeat_ack)
{:ok, state}
end
def handle_call({:status}, _from, state) do
{:reply, state.status, state}
end
def handle_call({:target}, _from, state) do
{:reply, state.target, state}
end
def handle_call({:interval}, _from, state) do
{:reply, state.interval, state}
end
def handle_call({:acknowledged}, _from, state) do
{:reply, state.acked, state}
end
def handle_call({:last_ack_time}, _from, state) do
{:reply, state.last_ack_time, state}
end
def handle_call({:last_heartbeat_time}, _from, state) do
{:reply, state.last_heartbeat_time, state}
end
def handle_call({:ping}, _from, state) do
{:reply, state.ping, state}
end
def handle_call({:schedule, interval}, {from, _ref}, %State{status: :waiting} = state) do
new_state = start_heartbeat(state, from, interval)
{:reply, :ok, new_state}
end
def handle_call({:schedule, interval}, {from, _ref}, %State{status: :running} = state) do
new_state = start_heartbeat(state, from, interval)
{:reply, {:overwrote, state.target}, new_state}
end
def handle_call({:schedule, interval, pid}, _from, %State{status: :waiting} = state) do
new_state = start_heartbeat(state, pid, interval)
{:reply, :ok, new_state}
end
def handle_call({:schedule, interval, pid}, _from, %State{status: :running} = state) do
new_state = start_heartbeat(state, pid, interval)
{:reply, {:overwrote, state.target}, new_state}
end
def handle_call(:acknowledge, _from, state) do
{:reply, :ok, acknowledge_internal(state)}
end
def handle_info(%Event{publisher: pid, message: message, topic: :hello}, state) do
interval = message.heartbeat_interval
new_state = start_heartbeat(state, pid, interval)
{:noreply, new_state}
end
def handle_info(%Event{publisher: pid, topic: :heartbeat}, state) do
if state.status == :running and pid == state.target do
Logger.info("Discord requested a heartbeat to be sent out-of-band. Responding...")
send(pid, :heartbeat)
end
{:noreply, state}
end
def handle_info(%Event{topic: :heartbeat_ack}, state) do
{:noreply, acknowledge_internal(state)}
end
def handle_info({:DOWN, _ref, :process, _object, _reason}, state) do
new_state = go_idle(state)
{:noreply, new_state}
end
def handle_info(:heartbeat, state) do
cond do
state.target == nil ->
{:noreply, state}
state.acked ->
send(state.target, :heartbeat)
sender = Process.send_after(self(), :heartbeat, state.interval)
new_state = %{
state
| sender: sender,
acked: false,
last_heartbeat_time: DateTime.utc_now()
}
{:noreply, new_state}
true ->
Logger.error(
"Discord did not acknowledge a heartbeat for an entire cycle. Closing the affected connection and reestablishing."
)
send(state.target, {:disconnect, 4_000})
{:noreply, go_idle(state)}
end
end
defp start_heartbeat(state, pid, interval) do
idle_state = go_idle(state)
ref = Process.monitor(pid)
sender = Process.send_after(self(), :heartbeat, interval)
%{
idle_state
| status: :running,
target: pid,
interval: interval,
target_ref: ref,
sender: sender,
acked: true,
last_ack_time: nil,
last_heartbeat_time: nil,
ping: nil
}
end
defp go_idle(state) do
%{
state
| status: :waiting,
target: nil,
interval: nil,
target_ref: nil,
sender: nil,
last_ack_time: nil,
last_heartbeat_time: nil,
ping: nil
}
end
defp acknowledge_internal(state) do
utc_now = DateTime.utc_now()
%{
state
| acked: true,
last_ack_time: utc_now,
ping: DateTime.diff(utc_now, state.last_heartbeat_time, :millisecond)
}
end
end
|
apps/discordbot/lib/discordbot/gateway/heartbeat.ex
| 0.917423 | 0.402774 |
heartbeat.ex
|
starcoder
|
defmodule Elasticsearch.Index.Bulk do
@moduledoc """
Functions for creating bulk indexing requests.
"""
alias Elasticsearch.{
DataStream,
Document
}
require Logger
@doc """
Encodes a given variable into an Elasticsearch bulk request. The variable
must implement `Elasticsearch.Document`.
## Examples
iex> Bulk.encode(%Post{id: "my-id"}, "my-index")
{:ok, \"\"\"
{"create":{"_type":"post","_index":"my-index","_id":"my-id"}}
{"title":null,"author":null}
\"\"\"}
iex> Bulk.encode(123, "my-index")
{:error,
%Protocol.UndefinedError{description: "",
protocol: Elasticsearch.Document, value: 123}}
"""
@spec encode(struct, String.t()) ::
{:ok, String.t()}
| {:error, Error.t()}
def encode(struct, index) do
{:ok, encode!(struct, index)}
rescue
exception ->
{:error, exception}
end
@doc """
Same as `encode/1`, but returns the request and raises errors.
## Example
iex> Bulk.encode!(%Post{id: "my-id"}, "my-index")
\"\"\"
{"create":{"_type":"post","_index":"my-index","_id":"my-id"}}
{"title":null,"author":null}
\"\"\"
iex> Bulk.encode!(123, "my-index")
** (Protocol.UndefinedError) protocol Elasticsearch.Document not implemented for 123. This protocol is implemented for: Post
"""
def encode!(struct, index) do
header = header("create", index, struct)
document =
struct
|> Document.encode()
|> Poison.encode!()
"#{header}\n#{document}\n"
end
@doc """
Uploads all the data from the list of `sources` to the given index.
Data for each `source` will be fetched using the configured `:store`.
"""
@spec upload(String.t(), Elasticsearch.Store.t(), list) :: :ok | {:error, [map]}
def upload(index_name, store, sources, errors \\ [])
def upload(_index_name, _store, [], []), do: :ok
def upload(_index_name, _store, [], errors), do: {:error, errors}
def upload(index_name, store, [source | tail] = _sources, errors) do
errors =
source
|> DataStream.stream(store)
|> Stream.map(&encode!(&1, index_name))
|> Stream.chunk_every(config()[:bulk_page_size])
|> Stream.map(&Elasticsearch.put("/#{index_name}/_bulk", Enum.join(&1)))
|> Enum.reduce(errors, &collect_errors/2)
upload(index_name, tail, errors)
end
defp collect_errors({:ok, %{"errors" => true} = response}, errors) do
new_errors =
response["items"]
|> Enum.filter(&(&1["create"]["error"] != nil))
|> Enum.map(& &1["create"])
|> Enum.map(&Elasticsearch.Exception.exception(response: &1))
new_errors ++ errors
end
defp collect_errors({:error, error}, errors) do
[error | errors]
end
defp collect_errors(_response, errors) do
errors
end
defp header(type, index, struct) do
attrs = %{
"_index" => index,
"_type" => Document.type(struct),
"_id" => Document.id(struct)
}
header =
%{}
|> Map.put(type, attrs)
|> put_parent(type, struct)
Poison.encode!(header)
end
defp put_parent(header, type, struct) do
parent = Document.parent(struct)
if parent do
put_in(header[type]["_parent"], parent)
else
header
end
end
defp config do
Application.get_all_env(:elasticsearch)
end
end
|
lib/elasticsearch/indexing/bulk.ex
| 0.86785 | 0.41944 |
bulk.ex
|
starcoder
|
defmodule AWS.OpsWorks do
@moduledoc """
AWS OpsWorks
Welcome to the *AWS OpsWorks Stacks API Reference*.
This guide provides descriptions, syntax, and usage examples for AWS OpsWorks
Stacks actions and data types, including common parameters and error codes.
AWS OpsWorks Stacks is an application management service that provides an
integrated experience for overseeing the complete application lifecycle. For
information about this product, go to the [AWS OpsWorks](http://aws.amazon.com/opsworks/) details page.
## SDKs and CLI
The most common way to use the AWS OpsWorks Stacks API is by using the AWS
Command Line Interface (CLI) or by using one of the AWS SDKs to implement
applications in your preferred language. For more information, see:
* [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html)
* [AWS SDK for Java](https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/opsworks/AWSOpsWorksClient.html)
* [AWS SDK for .NET](https://docs.aws.amazon.com/sdkfornet/latest/apidocs/html/N_Amazon_OpsWorks.htm)
* [AWS SDK for PHP 2](https://docs.aws.amazon.com/aws-sdk-php-2/latest/class-Aws.OpsWorks.OpsWorksClient.html)
* [AWS SDK for Ruby](http://docs.aws.amazon.com/sdkforruby/api/) * [AWS SDK for
Node.js](http://aws.amazon.com/documentation/sdkforjavascript/)
* [AWS SDK for Python(Boto)](http://docs.pythonboto.org/en/latest/ref/opsworks.html)
## Endpoints
AWS OpsWorks Stacks supports the following endpoints, all HTTPS. You must
connect to one of the following endpoints. Stacks can only be accessed or
managed within the endpoint in which they are created.
* opsworks.us-east-1.amazonaws.com
* opsworks.us-east-2.amazonaws.com
* opsworks.us-west-1.amazonaws.com
* opsworks.us-west-2.amazonaws.com
* opsworks.ca-central-1.amazonaws.com (API only; not available in
the AWS console)
* opsworks.eu-west-1.amazonaws.com
* opsworks.eu-west-2.amazonaws.com
* opsworks.eu-west-3.amazonaws.com
* opsworks.eu-central-1.amazonaws.com
* opsworks.ap-northeast-1.amazonaws.com
* opsworks.ap-northeast-2.amazonaws.com
* opsworks.ap-south-1.amazonaws.com
* opsworks.ap-southeast-1.amazonaws.com
* opsworks.ap-southeast-2.amazonaws.com
* opsworks.sa-east-1.amazonaws.com
## Chef Versions
When you call `CreateStack`, `CloneStack`, or `UpdateStack` we recommend you use
the `ConfigurationManager` parameter to specify the Chef version. The
recommended and default value for Linux stacks is currently 12. Windows stacks
use Chef 12.2. For more information, see [Chef Versions](https://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook-chef11.html).
You can specify Chef 12, 11.10, or 11.4 for your Linux stack. We recommend
migrating your existing Linux stacks to Chef 12 as soon as possible.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2013-02-18",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "opsworks",
global?: false,
protocol: "json",
service_id: "OpsWorks",
signature_version: "v4",
signing_name: "opsworks",
target_prefix: "OpsWorks_20130218"
}
end
@doc """
Assign a registered instance to a layer.
* You can assign registered on-premises instances to any layer type.
* You can assign registered Amazon EC2 instances only to custom
layers.
* You cannot use this action with instances that were created with
AWS OpsWorks Stacks.
**Required Permissions**: To use this action, an AWS Identity and Access
Management (IAM) user must have a Manage permissions level for the stack or an
attached policy that explicitly grants permissions. For more information on user
permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def assign_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AssignInstance", input, options)
end
@doc """
Assigns one of the stack's registered Amazon EBS volumes to a specified
instance.
The volume must first be registered with the stack by calling `RegisterVolume`.
After you register the volume, you must call `UpdateVolume` to specify a mount
point before calling `AssignVolume`. For more information, see [Resource Management](https://docs.aws.amazon.com/opsworks/latest/userguide/resources.html).
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def assign_volume(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AssignVolume", input, options)
end
@doc """
Associates one of the stack's registered Elastic IP addresses with a specified
instance.
The address must first be registered with the stack by calling
`RegisterElasticIp`. For more information, see [Resource Management](https://docs.aws.amazon.com/opsworks/latest/userguide/resources.html).
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def associate_elastic_ip(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AssociateElasticIp", input, options)
end
@doc """
Attaches an Elastic Load Balancing load balancer to a specified layer.
AWS OpsWorks Stacks does not support Application Load Balancer. You can only use
Classic Load Balancer with AWS OpsWorks Stacks. For more information, see
[Elastic Load Balancing](https://docs.aws.amazon.com/opsworks/latest/userguide/layers-elb.html).
You must create the Elastic Load Balancing instance separately, by using the
Elastic Load Balancing console, API, or CLI. For more information, see [ Elastic Load Balancing Developer
Guide](https://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/Welcome.html).
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def attach_elastic_load_balancer(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AttachElasticLoadBalancer", input, options)
end
@doc """
Creates a clone of a specified stack.
For more information, see [Clone a Stack](https://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-cloning.html).
By default, all parameters are set to the values used by the parent stack.
**Required Permissions**: To use this action, an IAM user must have an attached
policy that explicitly grants permissions. For more information about user
permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def clone_stack(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CloneStack", input, options)
end
@doc """
Creates an app for a specified stack.
For more information, see [Creating Apps](https://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html).
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def create_app(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateApp", input, options)
end
@doc """
Runs deployment or stack commands.
For more information, see [Deploying Apps](https://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-deploying.html)
and [Run Stack Commands](https://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-commands.html).
**Required Permissions**: To use this action, an IAM user must have a Deploy or
Manage permissions level for the stack, or an attached policy that explicitly
grants permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def create_deployment(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateDeployment", input, options)
end
@doc """
Creates an instance in a specified stack.
For more information, see [Adding an Instance to a Layer](https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-add.html).
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def create_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateInstance", input, options)
end
@doc """
Creates a layer.
For more information, see [How to Create a Layer](https://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-create.html).
You should use **CreateLayer** for noncustom layer types such as PHP App Server
only if the stack does not have an existing layer of that type. A stack can have
at most one instance of each noncustom layer; if you attempt to create a second
instance, **CreateLayer** fails. A stack can have an arbitrary number of custom
layers, so you can call **CreateLayer** as many times as you like for that layer
type.
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def create_layer(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateLayer", input, options)
end
@doc """
Creates a new stack.
For more information, see [Create a New Stack](https://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-edit.html).
**Required Permissions**: To use this action, an IAM user must have an attached
policy that explicitly grants permissions. For more information about user
permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def create_stack(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateStack", input, options)
end
@doc """
Creates a new user profile.
**Required Permissions**: To use this action, an IAM user must have an attached
policy that explicitly grants permissions. For more information about user
permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def create_user_profile(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateUserProfile", input, options)
end
@doc """
Deletes a specified app.
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def delete_app(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteApp", input, options)
end
@doc """
Deletes a specified instance, which terminates the associated Amazon EC2
instance.
You must stop an instance before you can delete it.
For more information, see [Deleting Instances](https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-delete.html).
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def delete_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteInstance", input, options)
end
@doc """
Deletes a specified layer.
You must first stop and then delete all associated instances or unassign
registered instances. For more information, see [How to Delete a Layer](https://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-delete.html).
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def delete_layer(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteLayer", input, options)
end
@doc """
Deletes a specified stack.
You must first delete all instances, layers, and apps or deregister registered
instances. For more information, see [Shut Down a Stack](https://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-shutting.html).
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def delete_stack(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteStack", input, options)
end
@doc """
Deletes a user profile.
**Required Permissions**: To use this action, an IAM user must have an attached
policy that explicitly grants permissions. For more information about user
permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def delete_user_profile(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteUserProfile", input, options)
end
@doc """
Deregisters a specified Amazon ECS cluster from a stack.
For more information, see [ Resource Management](https://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-ecscluster.html#workinglayers-ecscluster-delete).
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack or an attached policy that explicitly grants
permissions. For more information on user permissions, see
[https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def deregister_ecs_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeregisterEcsCluster", input, options)
end
@doc """
Deregisters a specified Elastic IP address.
The address can then be registered by another stack. For more information, see
[Resource Management](https://docs.aws.amazon.com/opsworks/latest/userguide/resources.html).
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def deregister_elastic_ip(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeregisterElasticIp", input, options)
end
@doc """
Deregister a registered Amazon EC2 or on-premises instance.
This action removes the instance from the stack and returns it to your control.
This action cannot be used with instances that were created with AWS OpsWorks
Stacks.
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def deregister_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeregisterInstance", input, options)
end
@doc """
Deregisters an Amazon RDS instance.
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def deregister_rds_db_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeregisterRdsDbInstance", input, options)
end
@doc """
Deregisters an Amazon EBS volume.
The volume can then be registered by another stack. For more information, see
[Resource Management](https://docs.aws.amazon.com/opsworks/latest/userguide/resources.html).
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def deregister_volume(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeregisterVolume", input, options)
end
@doc """
Describes the available AWS OpsWorks Stacks agent versions.
You must specify a stack ID or a configuration manager. `DescribeAgentVersions`
returns a list of available agent versions for the specified stack or
configuration manager.
"""
def describe_agent_versions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeAgentVersions", input, options)
end
@doc """
Requests a description of a specified set of apps.
This call accepts only one resource-identifying parameter.
**Required Permissions**: To use this action, an IAM user must have a Show,
Deploy, or Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information about user permissions, see
[Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_apps(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeApps", input, options)
end
@doc """
Describes the results of specified commands.
This call accepts only one resource-identifying parameter.
**Required Permissions**: To use this action, an IAM user must have a Show,
Deploy, or Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information about user permissions, see
[Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_commands(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeCommands", input, options)
end
@doc """
Requests a description of a specified set of deployments.
This call accepts only one resource-identifying parameter.
**Required Permissions**: To use this action, an IAM user must have a Show,
Deploy, or Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information about user permissions, see
[Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_deployments(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDeployments", input, options)
end
@doc """
Describes Amazon ECS clusters that are registered with a stack.
If you specify only a stack ID, you can use the `MaxResults` and `NextToken`
parameters to paginate the response. However, AWS OpsWorks Stacks currently
supports only one cluster per layer, so the result set has a maximum of one
element.
**Required Permissions**: To use this action, an IAM user must have a Show,
Deploy, or Manage permissions level for the stack or an attached policy that
explicitly grants permission. For more information about user permissions, see
[Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
This call accepts only one resource-identifying parameter.
"""
def describe_ecs_clusters(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEcsClusters", input, options)
end
@doc """
Describes [Elastic IP addresses](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html).
This call accepts only one resource-identifying parameter.
**Required Permissions**: To use this action, an IAM user must have a Show,
Deploy, or Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information about user permissions, see
[Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_elastic_ips(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeElasticIps", input, options)
end
@doc """
Describes a stack's Elastic Load Balancing instances.
This call accepts only one resource-identifying parameter.
**Required Permissions**: To use this action, an IAM user must have a Show,
Deploy, or Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information about user permissions, see
[Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_elastic_load_balancers(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeElasticLoadBalancers", input, options)
end
@doc """
Requests a description of a set of instances.
This call accepts only one resource-identifying parameter.
**Required Permissions**: To use this action, an IAM user must have a Show,
Deploy, or Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information about user permissions, see
[Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_instances(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeInstances", input, options)
end
@doc """
Requests a description of one or more layers in a specified stack.
This call accepts only one resource-identifying parameter.
**Required Permissions**: To use this action, an IAM user must have a Show,
Deploy, or Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information about user permissions, see
[Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_layers(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeLayers", input, options)
end
@doc """
Describes load-based auto scaling configurations for specified layers.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must have a Show,
Deploy, or Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information about user permissions, see
[Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_load_based_auto_scaling(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeLoadBasedAutoScaling", input, options)
end
@doc """
Describes a user's SSH information.
**Required Permissions**: To use this action, an IAM user must have
self-management enabled or an attached policy that explicitly grants
permissions. For more information about user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_my_user_profile(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeMyUserProfile", input, options)
end
@doc """
Describes the operating systems that are supported by AWS OpsWorks Stacks.
"""
def describe_operating_systems(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeOperatingSystems", input, options)
end
@doc """
Describes the permissions for a specified stack.
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_permissions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribePermissions", input, options)
end
@doc """
Describe an instance's RAID arrays.
This call accepts only one resource-identifying parameter.
**Required Permissions**: To use this action, an IAM user must have a Show,
Deploy, or Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information about user permissions, see
[Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_raid_arrays(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeRaidArrays", input, options)
end
@doc """
Describes Amazon RDS instances.
**Required Permissions**: To use this action, an IAM user must have a Show,
Deploy, or Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information about user permissions, see
[Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
This call accepts only one resource-identifying parameter.
"""
def describe_rds_db_instances(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeRdsDbInstances", input, options)
end
@doc """
Describes AWS OpsWorks Stacks service errors.
**Required Permissions**: To use this action, an IAM user must have a Show,
Deploy, or Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information about user permissions, see
[Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
This call accepts only one resource-identifying parameter.
"""
def describe_service_errors(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeServiceErrors", input, options)
end
@doc """
Requests a description of a stack's provisioning parameters.
**Required Permissions**: To use this action, an IAM user must have a Show,
Deploy, or Manage permissions level for the stack or an attached policy that
explicitly grants permissions. For more information about user permissions, see
[Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_stack_provisioning_parameters(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"DescribeStackProvisioningParameters",
input,
options
)
end
@doc """
Describes the number of layers and apps in a specified stack, and the number of
instances in each state, such as `running_setup` or `online`.
**Required Permissions**: To use this action, an IAM user must have a Show,
Deploy, or Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information about user permissions, see
[Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_stack_summary(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeStackSummary", input, options)
end
@doc """
Requests a description of one or more stacks.
**Required Permissions**: To use this action, an IAM user must have a Show,
Deploy, or Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information about user permissions, see
[Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_stacks(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeStacks", input, options)
end
@doc """
Describes time-based auto scaling configurations for specified instances.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must have a Show,
Deploy, or Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information about user permissions, see
[Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_time_based_auto_scaling(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeTimeBasedAutoScaling", input, options)
end
@doc """
Describe specified users.
**Required Permissions**: To use this action, an IAM user must have an attached
policy that explicitly grants permissions. For more information about user
permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_user_profiles(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeUserProfiles", input, options)
end
@doc """
Describes an instance's Amazon EBS volumes.
This call accepts only one resource-identifying parameter.
**Required Permissions**: To use this action, an IAM user must have a Show,
Deploy, or Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information about user permissions, see
[Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_volumes(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeVolumes", input, options)
end
@doc """
Detaches a specified Elastic Load Balancing instance from its layer.
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def detach_elastic_load_balancer(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DetachElasticLoadBalancer", input, options)
end
@doc """
Disassociates an Elastic IP address from its instance.
The address remains registered with the stack. For more information, see
[Resource Management](https://docs.aws.amazon.com/opsworks/latest/userguide/resources.html).
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def disassociate_elastic_ip(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisassociateElasticIp", input, options)
end
@doc """
Gets a generated host name for the specified layer, based on the current host
name theme.
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def get_hostname_suggestion(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetHostnameSuggestion", input, options)
end
@doc """
This action can be used only with Windows stacks.
Grants RDP access to a Windows instance for a specified time period.
"""
def grant_access(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GrantAccess", input, options)
end
@doc """
Returns a list of tags that are applied to the specified stack or layer.
"""
def list_tags(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTags", input, options)
end
@doc """
Reboots a specified instance.
For more information, see [Starting, Stopping, and Rebooting Instances](https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-starting.html).
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def reboot_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RebootInstance", input, options)
end
@doc """
Registers a specified Amazon ECS cluster with a stack.
You can register only one cluster with a stack. A cluster can be registered with
only one stack. For more information, see [ Resource Management](https://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-ecscluster.html).
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack or an attached policy that explicitly grants
permissions. For more information on user permissions, see [ Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def register_ecs_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RegisterEcsCluster", input, options)
end
@doc """
Registers an Elastic IP address with a specified stack.
An address can be registered with only one stack at a time. If the address is
already registered, you must first deregister it by calling
`DeregisterElasticIp`. For more information, see [Resource Management](https://docs.aws.amazon.com/opsworks/latest/userguide/resources.html).
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def register_elastic_ip(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RegisterElasticIp", input, options)
end
@doc """
Registers instances that were created outside of AWS OpsWorks Stacks with a
specified stack.
We do not recommend using this action to register instances. The complete
registration operation includes two tasks: installing the AWS OpsWorks Stacks
agent on the instance, and registering the instance with the stack.
`RegisterInstance` handles only the second step. You should instead use the AWS
CLI `register` command, which performs the entire registration operation. For
more information, see [ Registering an Instance with an AWS OpsWorks Stacks Stack](https://docs.aws.amazon.com/opsworks/latest/userguide/registered-instances-register.html).
Registered instances have the same requirements as instances that are created by
using the `CreateInstance` API. For example, registered instances must be
running a supported Linux-based operating system, and they must have a supported
instance type. For more information about requirements for instances that you
want to register, see [ Preparing the Instance](https://docs.aws.amazon.com/opsworks/latest/userguide/registered-instances-register-registering-preparer.html).
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def register_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RegisterInstance", input, options)
end
@doc """
Registers an Amazon RDS instance with a stack.
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def register_rds_db_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RegisterRdsDbInstance", input, options)
end
@doc """
Registers an Amazon EBS volume with a specified stack.
A volume can be registered with only one stack at a time. If the volume is
already registered, you must first deregister it by calling `DeregisterVolume`.
For more information, see [Resource Management](https://docs.aws.amazon.com/opsworks/latest/userguide/resources.html).
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def register_volume(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RegisterVolume", input, options)
end
@doc """
Specify the load-based auto scaling configuration for a specified layer.
For more information, see [Managing Load with Time-based and Load-based Instances](https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-autoscaling.html).
To use load-based auto scaling, you must create a set of load-based auto scaling
instances. Load-based auto scaling operates only on the instances from that set,
so you must ensure that you have created enough instances to handle the maximum
anticipated load.
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def set_load_based_auto_scaling(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "SetLoadBasedAutoScaling", input, options)
end
@doc """
Specifies a user's permissions.
For more information, see [Security and Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/workingsecurity.html).
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def set_permission(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "SetPermission", input, options)
end
@doc """
Specify the time-based auto scaling configuration for a specified instance.
For more information, see [Managing Load with Time-based and Load-based Instances](https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-autoscaling.html).
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def set_time_based_auto_scaling(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "SetTimeBasedAutoScaling", input, options)
end
@doc """
Starts a specified instance.
For more information, see [Starting, Stopping, and Rebooting Instances](https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-starting.html).
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def start_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartInstance", input, options)
end
@doc """
Starts a stack's instances.
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def start_stack(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartStack", input, options)
end
@doc """
Stops a specified instance.
When you stop a standard instance, the data disappears and must be reinstalled
when you restart the instance. You can stop an Amazon EBS-backed instance
without losing data. For more information, see [Starting, Stopping, and Rebooting
Instances](https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-starting.html).
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def stop_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopInstance", input, options)
end
@doc """
Stops a specified stack.
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def stop_stack(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopStack", input, options)
end
@doc """
Apply cost-allocation tags to a specified stack or layer in AWS OpsWorks Stacks.
For more information about how tagging works, see
[Tags](https://docs.aws.amazon.com/opsworks/latest/userguide/tagging.html) in
the AWS OpsWorks User Guide.
"""
def tag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TagResource", input, options)
end
@doc """
Unassigns a registered instance from all layers that are using the instance.
The instance remains in the stack as an unassigned instance, and can be assigned
to another layer as needed. You cannot use this action with instances that were
created with AWS OpsWorks Stacks.
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack or an attached policy that explicitly grants
permissions. For more information about user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def unassign_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UnassignInstance", input, options)
end
@doc """
Unassigns an assigned Amazon EBS volume.
The volume remains registered with the stack. For more information, see
[Resource Management](https://docs.aws.amazon.com/opsworks/latest/userguide/resources.html).
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def unassign_volume(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UnassignVolume", input, options)
end
@doc """
Removes tags from a specified stack or layer.
"""
def untag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UntagResource", input, options)
end
@doc """
Updates a specified app.
**Required Permissions**: To use this action, an IAM user must have a Deploy or
Manage permissions level for the stack, or an attached policy that explicitly
grants permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def update_app(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateApp", input, options)
end
@doc """
Updates a registered Elastic IP address's name.
For more information, see [Resource Management](https://docs.aws.amazon.com/opsworks/latest/userguide/resources.html).
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def update_elastic_ip(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateElasticIp", input, options)
end
@doc """
Updates a specified instance.
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def update_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateInstance", input, options)
end
@doc """
Updates a specified layer.
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def update_layer(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateLayer", input, options)
end
@doc """
Updates a user's SSH public key.
**Required Permissions**: To use this action, an IAM user must have
self-management enabled or an attached policy that explicitly grants
permissions. For more information about user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def update_my_user_profile(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateMyUserProfile", input, options)
end
@doc """
Updates an Amazon RDS instance.
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def update_rds_db_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateRdsDbInstance", input, options)
end
@doc """
Updates a specified stack.
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def update_stack(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateStack", input, options)
end
@doc """
Updates a specified user profile.
**Required Permissions**: To use this action, an IAM user must have an attached
policy that explicitly grants permissions. For more information about user
permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def update_user_profile(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateUserProfile", input, options)
end
@doc """
Updates an Amazon EBS volume's name or mount point.
For more information, see [Resource Management](https://docs.aws.amazon.com/opsworks/latest/userguide/resources.html).
**Required Permissions**: To use this action, an IAM user must have a Manage
permissions level for the stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User Permissions](https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def update_volume(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateVolume", input, options)
end
end
|
lib/aws/generated/ops_works.ex
| 0.878419 | 0.58261 |
ops_works.ex
|
starcoder
|
defmodule Poison do
readme_path = [__DIR__, "..", "README.md"] |> Path.join() |> Path.expand()
@external_resource readme_path
@moduledoc readme_path |> File.read!() |> String.trim()
alias Poison.{Decode, DecodeError, Decoder}
alias Poison.{EncodeError, Encoder}
alias Poison.{ParseError, Parser}
@doc """
Encode a value to JSON.
iex> Poison.encode([1, 2, 3])
{:ok, "[1,2,3]"}
"""
@spec encode(Encoder.t(), Encoder.options()) ::
{:ok, iodata}
| {:error, Exception.t()}
def encode(value, options \\ %{}) do
{:ok, encode!(value, options)}
rescue
exception in [EncodeError] ->
{:error, exception}
end
@doc """
Encode a value to JSON, raises an exception on error.
iex> Poison.encode!([1, 2, 3])
"[1,2,3]"
"""
@spec encode!(Encoder.t(), Encoder.options()) :: iodata | no_return
def encode!(value, options \\ %{})
def encode!(value, options) when is_list(options) do
encode!(value, Map.new(options))
end
def encode!(value, options) do
if options[:iodata] do
Encoder.encode(value, options)
else
value |> Encoder.encode(options) |> IO.iodata_to_binary()
end
end
@doc """
Decode JSON to a value.
iex> Poison.decode("[1,2,3]")
{:ok, [1, 2, 3]}
"""
@spec decode(iodata) ::
{:ok, Parser.t()}
| {:error, Exception.t()}
@spec decode(iodata, Decoder.options()) ::
{:ok, Parser.t()}
| {:error, Exception.t()}
def decode(iodata, options \\ %{}) do
{:ok, decode!(iodata, options)}
rescue
exception in [ParseError, DecodeError] ->
{:error, exception}
end
@doc """
Decode JSON to a value, raises an exception on error.
iex> Poison.decode!("[1,2,3]")
[1, 2, 3]
"""
@spec decode!(iodata) :: Parser.t() | no_return
def decode!(value) do
Parser.parse!(value, %{})
end
@spec decode!(iodata, Decoder.options()) :: Decoder.t() | no_return
def decode!(value, options) when is_list(options) do
decode!(value, Map.new(options))
end
def decode!(value, %{as: as} = options) when as != nil do
value
|> Parser.parse!(options)
|> Decode.transform(options)
|> Decoder.decode(options)
end
def decode!(value, options) do
Parser.parse!(value, options)
end
end
|
lib/poison.ex
| 0.838779 | 0.436322 |
poison.ex
|
starcoder
|
defmodule Meeseeks.Select do
@moduledoc false
alias Meeseeks.{Accumulator, Context, Document, Error, Result, Selector}
@return? Context.return_key()
@matches Context.matches_key()
@nodes Context.nodes_key()
@type queryable :: Document.t() | Result.t()
@type selectors :: Selector.t() | [Selector.t()]
# All
@spec fetch_all(queryable, selectors, Context.t()) :: {:ok, [Result.t()]} | {:error, Error.t()}
def fetch_all(queryable, selectors, context) do
case all(queryable, selectors, context) do
[] -> {:error, Error.new(:select, :no_match)}
results -> {:ok, results}
end
end
@spec all(queryable, selectors, Context.t()) :: [Result.t()]
def all(queryable, selectors, context) do
context = Context.add_accumulator(context, %Accumulator.All{})
select(queryable, selectors, context)
end
# One
@spec fetch_one(queryable, selectors, Context.t()) :: {:ok, Result.t()} | {:error, Error.t()}
def fetch_one(queryable, selectors, context) do
case one(queryable, selectors, context) do
nil -> {:error, Error.new(:select, :no_match)}
result -> {:ok, result}
end
end
@spec one(queryable, selectors, Context.t()) :: Result.t() | nil
def one(queryable, selectors, context) do
context = Context.add_accumulator(context, %Accumulator.One{})
select(queryable, selectors, context)
end
# Select
@spec select(queryable, selectors, Context.t()) :: any
def select(queryable, selectors, context)
def select(_queryable, string, _context) when is_binary(string) do
raise Error.new(:select, :invalid_selectors, %{
description:
"Expected selectors, received a string- did you mean to wrap the string in the `css` or `xpath` macro?",
string: string
})
end
def select(queryable, selectors, context) do
context =
context
|> Context.prepare_for_selection()
|> Context.ensure_accumulator!()
walk(queryable, selectors, context)
end
# Walk
defp walk(%Document{} = document, selectors, context) do
document
|> Document.get_nodes()
|> walk_nodes(document, selectors, context)
|> Context.return_accumulator()
end
defp walk(%Result{id: id, document: document}, selectors, context) do
ids = [id | Document.descendants(document, id)]
document
|> Document.get_nodes(ids)
|> walk_nodes(document, selectors, context)
|> Context.return_accumulator()
end
# Walk Nodes
defp walk_nodes(_, _, _, %{@return? => true} = context) do
context
end
defp walk_nodes([], document, _, %{@matches => matches} = context) when map_size(matches) > 0 do
filter_and_walk(matches, document, context)
end
defp walk_nodes([], _, _, context) do
context
end
defp walk_nodes(_, document, [], %{@matches => matches} = context) when map_size(matches) > 0 do
filter_and_walk(matches, document, context)
end
defp walk_nodes(_, _, [], context) do
context
end
defp walk_nodes(nodes, document, selectors, context) do
context =
Enum.reduce(nodes, Context.clear_matches(context), fn node, context ->
walk_node(node, document, selectors, context)
end)
walk_nodes([], document, [], context)
end
# Walk Node
defp walk_node(_, _, _, %{@return? => true} = context) do
context
end
defp walk_node(nil, _, _, context) do
context
end
defp walk_node(_, _, [], context) do
context
end
defp walk_node(node, document, [selector | selectors], context) do
context = walk_node(node, document, selector, context)
walk_node(node, document, selectors, context)
end
defp walk_node(%Document.Element{} = node, document, %Selector.Element{} = selector, context) do
case Selector.match(selector, node, document, context) do
false -> context
{false, context} -> context
true -> handle_match(node, document, selector, context)
{true, context} -> handle_match(node, document, selector, context)
end
end
defp walk_node(_node, _document, %Selector.Element{} = _selector, context) do
context
end
defp walk_node(node, document, selector, context) do
case Selector.match(selector, node, document, context) do
false -> context
{false, context} -> context
true -> handle_match(node, document, selector, context)
{true, context} -> handle_match(node, document, selector, context)
end
end
# Handle Match
defp handle_match(node, document, selector, context) do
combinator = Selector.combinator(selector)
filters = Selector.filters(selector)
case {combinator, filters} do
# Add to accumulator if there is no combinator and no filters
{nil, nil} ->
Context.add_to_accumulator(context, document, node.id)
# Add to accumulator if there is no combinator and empty filters
{nil, []} ->
Context.add_to_accumulator(context, document, node.id)
# Add to @matches so all matching nodes can be filtered before to
# continuing
_ ->
Context.add_to_matches(context, selector, node)
end
end
# Filter and Walk
defp filter_and_walk(matching, document, context) do
# For each set of nodes matching a selector
Enum.reduce(matching, context, fn {selector, nodes}, context ->
filters = Selector.filters(selector)
nodes = Enum.reverse(nodes)
# Filter the nodes based on the selector's filters
{nodes, context} = filter_nodes(filters, nodes, document, context)
walk_filtered(nodes, document, selector, context)
end)
end
defp walk_filtered(nodes, document, selector, context) do
# For each remaining node either
Enum.reduce(nodes, context, fn node, context ->
case Selector.combinator(selector) do
# Add the node to the accumulator if there is no combinator
nil ->
Context.add_to_accumulator(context, document, node.id)
# Or walk the combinator
combinator ->
walk_combinator(combinator, node, document, context)
end
end)
end
defp filter_nodes(nil, nodes, _, context) do
{nodes, context}
end
defp filter_nodes([], nodes, _, context) do
{nodes, context}
end
defp filter_nodes(filters, nodes, document, context) when is_list(filters) do
Enum.reduce(filters, {nodes, context}, fn filter, {nodes, context} ->
filter_nodes(filter, nodes, document, context)
|> reverse_filtered_nodes()
end)
end
defp filter_nodes(filter, nodes, document, context) do
context = Map.put(context, @nodes, nodes)
Enum.reduce(nodes, {[], context}, fn node, {nodes, context} ->
case Selector.match(filter, node, document, context) do
false -> {nodes, context}
{false, context} -> {nodes, context}
true -> {[node | nodes], context}
{true, context} -> {[node | nodes], context}
end
end)
end
defp reverse_filtered_nodes({nodes, context}) do
{Enum.reverse(nodes), context}
end
# Walk Combinator
defp walk_combinator(combinator, node, document, context) do
case Selector.Combinator.next(combinator, node, document) do
nil ->
context
nodes when is_list(nodes) ->
selector = Selector.Combinator.selector(combinator)
walk_nodes(nodes, document, selector, context)
node ->
selector = Selector.Combinator.selector(combinator)
walk_nodes([node], document, selector, context)
end
end
end
|
lib/meeseeks/select.ex
| 0.840095 | 0.46217 |
select.ex
|
starcoder
|
defmodule AWS.Logs do
@moduledoc """
You can use Amazon CloudWatch Logs to monitor, store, and access your log
files from EC2 instances, Amazon CloudTrail, or other sources. You can then
retrieve the associated log data from CloudWatch Logs using the Amazon
CloudWatch console, the CloudWatch Logs commands in the AWS CLI, the
CloudWatch Logs API, or the CloudWatch Logs SDK.
You can use CloudWatch Logs to:
<ul> <li> **Monitor Logs from Amazon EC2 Instances in Real-time**: You can
use CloudWatch Logs to monitor applications and systems using log data. For
example, CloudWatch Logs can track the number of errors that occur in your
application logs and send you a notification whenever the rate of errors
exceeds a threshold you specify. CloudWatch Logs uses your log data for
monitoring; so, no code changes are required. For example, you can monitor
application logs for specific literal terms (such as
"NullReferenceException") or count the number of occurrences of a literal
term at a particular position in log data (such as "404" status codes in an
Apache access log). When the term you are searching for is found,
CloudWatch Logs reports the data to a Amazon CloudWatch metric that you
specify.
</li> <li> **Monitor Amazon CloudTrail Logged Events**: You can create
alarms in Amazon CloudWatch and receive notifications of particular API
activity as captured by CloudTrail and use the notification to perform
troubleshooting.
</li> <li> **Archive Log Data**: You can use CloudWatch Logs to store your
log data in highly durable storage. You can change the log retention
setting so that any log events older than this setting are automatically
deleted. The CloudWatch Logs agent makes it easy to quickly send both
rotated and non-rotated log data off of a host and into the log service.
You can then access the raw log data when you need it.
</li> </ul>
"""
@doc """
Cancels the specified export task.
The task must be in the `PENDING` or `RUNNING` state.
"""
def cancel_export_task(client, input, options \\ []) do
request(client, "CancelExportTask", input, options)
end
@doc """
Creates an export task, which allows you to efficiently export data from a
log group to an Amazon S3 bucket.
This is an asynchronous call. If all the required information is provided,
this operation initiates an export task and responds with the ID of the
task. After the task has started, you can use `DescribeExportTasks` to get
the status of the export task. Each account can only have one active
(`RUNNING` or `PENDING`) export task at a time. To cancel an export task,
use `CancelExportTask`.
You can export logs from multiple log groups or multiple time ranges to the
same S3 bucket. To separate out log data for each export task, you can
specify a prefix that will be used as the Amazon S3 key prefix for all
exported objects.
"""
def create_export_task(client, input, options \\ []) do
request(client, "CreateExportTask", input, options)
end
@doc """
Creates a log group with the specified name.
You can create up to 5000 log groups per account.
You must use the following guidelines when naming a log group:
<ul> <li> Log group names must be unique within a region for an AWS
account.
</li> <li> Log group names can be between 1 and 512 characters long.
</li> <li> Log group names consist of the following characters: a-z, A-Z,
0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), and '.' (period).
</li> </ul>
"""
def create_log_group(client, input, options \\ []) do
request(client, "CreateLogGroup", input, options)
end
@doc """
Creates a log stream for the specified log group.
There is no limit on the number of log streams that you can create for a
log group.
You must use the following guidelines when naming a log stream:
<ul> <li> Log stream names must be unique within the log group.
</li> <li> Log stream names can be between 1 and 512 characters long.
</li> <li> The ':' (colon) and '*' (asterisk) characters are not allowed.
</li> </ul>
"""
def create_log_stream(client, input, options \\ []) do
request(client, "CreateLogStream", input, options)
end
@doc """
Deletes the specified destination, and eventually disables all the
subscription filters that publish to it. This operation does not delete the
physical resource encapsulated by the destination.
"""
def delete_destination(client, input, options \\ []) do
request(client, "DeleteDestination", input, options)
end
@doc """
Deletes the specified log group and permanently deletes all the archived
log events associated with the log group.
"""
def delete_log_group(client, input, options \\ []) do
request(client, "DeleteLogGroup", input, options)
end
@doc """
Deletes the specified log stream and permanently deletes all the archived
log events associated with the log stream.
"""
def delete_log_stream(client, input, options \\ []) do
request(client, "DeleteLogStream", input, options)
end
@doc """
Deletes the specified metric filter.
"""
def delete_metric_filter(client, input, options \\ []) do
request(client, "DeleteMetricFilter", input, options)
end
@doc """
Deletes the specified retention policy.
Log events do not expire if they belong to log groups without a retention
policy.
"""
def delete_retention_policy(client, input, options \\ []) do
request(client, "DeleteRetentionPolicy", input, options)
end
@doc """
Deletes the specified subscription filter.
"""
def delete_subscription_filter(client, input, options \\ []) do
request(client, "DeleteSubscriptionFilter", input, options)
end
@doc """
Lists all your destinations. The results are ASCII-sorted by destination
name.
"""
def describe_destinations(client, input, options \\ []) do
request(client, "DescribeDestinations", input, options)
end
@doc """
Lists the specified export tasks. You can list all your export tasks or
filter the results based on task ID or task status.
"""
def describe_export_tasks(client, input, options \\ []) do
request(client, "DescribeExportTasks", input, options)
end
@doc """
Lists the specified log groups. You can list all your log groups or filter
the results by prefix. The results are ASCII-sorted by log group name.
"""
def describe_log_groups(client, input, options \\ []) do
request(client, "DescribeLogGroups", input, options)
end
@doc """
Lists the log streams for the specified log group. You can list all the log
streams or filter the results by prefix. You can also control how the
results are ordered.
This operation has a limit of five transactions per second, after which
transactions are throttled.
"""
def describe_log_streams(client, input, options \\ []) do
request(client, "DescribeLogStreams", input, options)
end
@doc """
Lists the specified metric filters. You can list all the metric filters or
filter the results by log name, prefix, metric name, and metric namespace.
The results are ASCII-sorted by filter name.
"""
def describe_metric_filters(client, input, options \\ []) do
request(client, "DescribeMetricFilters", input, options)
end
@doc """
Lists the subscription filters for the specified log group. You can list
all the subscription filters or filter the results by prefix. The results
are ASCII-sorted by filter name.
"""
def describe_subscription_filters(client, input, options \\ []) do
request(client, "DescribeSubscriptionFilters", input, options)
end
@doc """
Lists log events from the specified log group. You can list all the log
events or filter the results using a filter pattern, a time range, and the
name of the log stream.
By default, this operation returns as many log events as can fit in 1MB (up
to 10,000 log events), or all the events found within the time range that
you specify. If the results include a token, then there are more log events
available, and you can get additional results by specifying the token in a
subsequent call.
"""
def filter_log_events(client, input, options \\ []) do
request(client, "FilterLogEvents", input, options)
end
@doc """
Lists log events from the specified log stream. You can list all the log
events or filter using a time range.
By default, this operation returns as many log events as can fit in a
response size of 1MB (up to 10,000 log events). If the results include
tokens, there are more log events available. You can get additional log
events by specifying one of the tokens in a subsequent call.
"""
def get_log_events(client, input, options \\ []) do
request(client, "GetLogEvents", input, options)
end
@doc """
Lists the tags for the specified log group.
To add tags, use `TagLogGroup`. To remove tags, use `UntagLogGroup`.
"""
def list_tags_log_group(client, input, options \\ []) do
request(client, "ListTagsLogGroup", input, options)
end
@doc """
Creates or updates a destination. A destination encapsulates a physical
resource (such as a Kinesis stream) and enables you to subscribe to a
real-time stream of log events of a different account, ingested using
`PutLogEvents`. Currently, the only supported physical resource is a Amazon
Kinesis stream belonging to the same account as the destination.
A destination controls what is written to its Amazon Kinesis stream through
an access policy. By default, `PutDestination` does not set any access
policy with the destination, which means a cross-account user cannot call
`PutSubscriptionFilter` against this destination. To enable this, the
destination owner must call `PutDestinationPolicy` after `PutDestination`.
"""
def put_destination(client, input, options \\ []) do
request(client, "PutDestination", input, options)
end
@doc """
Creates or updates an access policy associated with an existing
destination. An access policy is an [IAM policy
document](http://docs.aws.amazon.com/IAM/latest/UserGuide/policies_overview.html)
that is used to authorize claims to register a subscription filter against
a given destination.
"""
def put_destination_policy(client, input, options \\ []) do
request(client, "PutDestinationPolicy", input, options)
end
@doc """
Uploads a batch of log events to the specified log stream.
You must include the sequence token obtained from the response of the
previous call. An upload in a newly created log stream does not require a
sequence token. You can also get the sequence token using
`DescribeLogStreams`.
The batch of events must satisfy the following constraints:
<ul> <li> The maximum batch size is 1,048,576 bytes, and this size is
calculated as the sum of all event messages in UTF-8, plus 26 bytes for
each log event.
</li> <li> None of the log events in the batch can be more than 2 hours in
the future.
</li> <li> None of the log events in the batch can be older than 14 days or
the retention period of the log group.
</li> <li> The log events in the batch must be in chronological ordered by
their timestamp (the time the event occurred, expressed as the number of
milliseconds since Jan 1, 1970 00:00:00 UTC).
</li> <li> The maximum number of log events in a batch is 10,000.
</li> <li> A batch of log events in a single request cannot span more than
24 hours. Otherwise, the operation fails.
</li> </ul>
"""
def put_log_events(client, input, options \\ []) do
request(client, "PutLogEvents", input, options)
end
@doc """
Creates or updates a metric filter and associates it with the specified log
group. Metric filters allow you to configure rules to extract metric data
from log events ingested through `PutLogEvents`.
The maximum number of metric filters that can be associated with a log
group is 100.
"""
def put_metric_filter(client, input, options \\ []) do
request(client, "PutMetricFilter", input, options)
end
@doc """
Sets the retention of the specified log group. A retention policy allows
you to configure the number of days you want to retain log events in the
specified log group.
"""
def put_retention_policy(client, input, options \\ []) do
request(client, "PutRetentionPolicy", input, options)
end
@doc """
Creates or updates a subscription filter and associates it with the
specified log group. Subscription filters allow you to subscribe to a
real-time stream of log events ingested through `PutLogEvents` and have
them delivered to a specific destination. Currently, the supported
destinations are:
<ul> <li> An Amazon Kinesis stream belonging to the same account as the
subscription filter, for same-account delivery.
</li> <li> A logical destination that belongs to a different account, for
cross-account delivery.
</li> <li> An Amazon Kinesis Firehose stream that belongs to the same
account as the subscription filter, for same-account delivery.
</li> <li> An AWS Lambda function that belongs to the same account as the
subscription filter, for same-account delivery.
</li> </ul> There can only be one subscription filter associated with a log
group.
"""
def put_subscription_filter(client, input, options \\ []) do
request(client, "PutSubscriptionFilter", input, options)
end
@doc """
Adds or updates the specified tags for the specified log group.
To list the tags for a log group, use `ListTagsLogGroup`. To remove tags,
use `UntagLogGroup`.
For more information about tags, see [Tag Log Groups in Amazon CloudWatch
Logs](http://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/log-group-tagging.html)
in the *Amazon CloudWatch Logs User Guide*.
"""
def tag_log_group(client, input, options \\ []) do
request(client, "TagLogGroup", input, options)
end
@doc """
Tests the filter pattern of a metric filter against a sample of log event
messages. You can use this operation to validate the correctness of a
metric filter pattern.
"""
def test_metric_filter(client, input, options \\ []) do
request(client, "TestMetricFilter", input, options)
end
@doc """
Removes the specified tags from the specified log group.
To list the tags for a log group, use `ListTagsLogGroup`. To add tags, use
`UntagLogGroup`.
"""
def untag_log_group(client, input, options \\ []) do
request(client, "UntagLogGroup", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "logs"}
host = get_host("logs", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "Logs_20140328.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/logs.ex
| 0.888057 | 0.786336 |
logs.ex
|
starcoder
|
defmodule Adventofcode.Day11ChronalCharge do
use Adventofcode
@grid_size 300
def most_powered_three_by_three(input) do
input
|> parse
|> build_grid
|> fuel_squares_largest(3)
|> Tuple.to_list()
|> Enum.take(2)
|> Enum.join(",")
end
def most_powered_any_size(input) do
input
|> parse
|> build_grid
|> fuel_squares_largest_total_power
|> Tuple.to_list()
|> Enum.take(3)
|> Enum.join(",")
end
defp parse(input) when is_number(input), do: input
defp parse(input) when is_binary(input), do: String.to_integer(input)
defp build_grid(grid_serial_number) do
for y <- 1..@grid_size do
for x <- 1..@grid_size, do: power_level(grid_serial_number, {x, y})
end
end
defp power_level(grid_serial_number, {x, y}) do
rack_id = x + 10
power = (rack_id * y + grid_serial_number) * rack_id
zero_to_nine = power |> div(100) |> rem(10)
zero_to_nine - 5
end
# Credits to <NAME> for the algorithm
# https://github.com/joechiarella/advent_of_code/blob/master/lib/aoc2018_11.ex
defp fuel_squares_largest_total_power(grid) do
1..@grid_size
|> Enum.map(&fuel_squares_largest(grid, &1))
|> Enum.max_by(&elem(&1, 3))
end
defp fuel_squares_largest(grid, size) do
grid
|> Enum.map(&sum_row(&1, size))
|> List.zip()
|> Enum.map(&sum_row(Tuple.to_list(&1), size))
|> List.flatten()
|> Enum.with_index(1)
|> Enum.max_by(&elem(&1, 0))
|> do_fuel_squares_largest(size)
end
defp do_fuel_squares_largest({max, index}, size) do
x = div(index, @grid_size - (size - 1)) + 1
y = rem(index, @grid_size - (size - 1))
{x, y, size, max}
end
defp sum_row(row, size) do
current = row |> Enum.take(size) |> Enum.sum()
adds = Enum.drop(row, size)
[current | moving_sum(current, adds, row)]
end
defp moving_sum(_, [], _), do: []
defp moving_sum(prev, [add | add_tail], [sub | sub_tail]) do
current = prev + add - sub
[current | moving_sum(current, add_tail, sub_tail)]
end
end
|
lib/day_11_chronal_charge.ex
| 0.745584 | 0.528473 |
day_11_chronal_charge.ex
|
starcoder
|
defmodule Performance.BencheeCase do
@moduledoc """
An ExUnit case that will set up a benchee run for you, in a more readable way
"""
use ExUnit.CaseTemplate
require Logger
using opts do
otp_app = Keyword.fetch!(opts, :otp_app)
endpoints = Keyword.get(opts, :endpoints, [])
topic_prefixes = Keyword.get(opts, :topic_prefixes, [])
topics = Keyword.get(opts, :topics, [])
log_level = Keyword.get(opts, :log_level, :warn)
override_defaults = Keyword.get(opts, :override_defaults, false)
quote do
use Divo
require Logger
import ExProf.Macro
alias Performance.Cve
alias Performance.Kafka
alias Performance.SetupConfig
@moduletag :performance
@moduletag log_level: unquote(log_level)
@moduletag timeout: :infinity
@otp_app unquote(otp_app)
@endpoints unquote(endpoints)
@topic_prefixes unquote(topic_prefixes)
@topics unquote(topics)
def benchee_run(opts) do
{jobs, rest} = Keyword.split(opts, [:under_test])
defaults = [
before_scenario: [&reset_iteration/1],
before_each: [&log_iteration/1],
after_each: [],
after_scenario: []
]
{hooks, options} = Keyword.split(rest, Keyword.keys(defaults))
wrapped_hooks =
Performance.BencheeCase.__merge_hooks__(hooks, defaults, unquote(override_defaults))
Benchee.run(
%{"under_test" => jobs[:under_test]},
wrapped_hooks ++ options
)
end
defp tune_consumer_parameters(params) do
Kafka.tune_consumer_parameters(@otp_app, params)
end
defp reset_iteration(inputs) do
Agent.update(:counter, fn _s -> 1 end)
inputs
end
defp log_iteration(inputs) do
iteration = Agent.get_and_update(:counter, fn s -> {s, s + 1} end)
Logger.info("Iteration #{inspect(iteration)}")
inputs
end
defp create_kafka_topics(dataset) do
from_prefixes = Kafka.setup_topics(@topic_prefixes, dataset, @endpoints)
from_names = Kafka.setup_topics(@topics, @endpoints)
combined = Tuple.to_list(from_prefixes) ++ Tuple.to_list(from_names)
List.to_tuple(combined)
end
defp load_messages(dataset, topic, messages, chunk_size \\ 10_000) do
Kafka.load_messages(@endpoints, dataset, topic, messages, length(messages), 10_000)
end
defp get_message_count(topic, num_partitions \\ 1) do
Kafka.get_message_count(@endpoints, topic, num_partitions)
end
defp delete_kafka_topics(dataset) do
Kafka.delete_topics(@topic_prefixes, dataset, @endpoints)
Kafka.delete_topics(@topics, @endpoints)
end
end
end
setup_all do
Agent.start(fn -> 1 end, name: :counter)
:ok
end
setup tags do
Logger.configure(level: tags[:log_level])
end
def __merge_hooks__(hooks, defaults, override \\ false) do
merge_hooks(hooks, defaults, override)
|> chain_hooks()
|> Keyword.new()
end
defp merge_hooks(hooks, defaults, override) do
Keyword.merge(defaults, hooks, fn _key, def_v, hook_v ->
wrapped_hook_v = List.wrap(hook_v)
case override do
false -> def_v ++ wrapped_hook_v
true -> wrapped_hook_v
end
end)
end
defp chain_hooks(hooks) do
Enum.map(hooks, fn {hook_name, hook_functions} ->
chained_hook = fn inputs ->
Enum.reduce(hook_functions, inputs, &call_hook/2)
end
{hook_name, chained_hook}
end)
end
defp call_hook(hook, input) do
hook.(input)
end
end
|
apps/performance/lib/performance/benchee_case.ex
| 0.64131 | 0.417628 |
benchee_case.ex
|
starcoder
|
defmodule Deckhub.Hearthstone do
@moduledoc """
Context for dealing with all Hearthstone game data such as cards, card backs, and heroes.
"""
import Ecto.Query, warn: false
alias Deckhub.Repo
alias Deckhub.Hearthstone.Card
alias Deckhub.Hearthstone.Term
@doc """
Returns the list of cards.
## Examples
```
iex> Deckhub.Hearthstone.list_cards()
[%Card{}, ...]
```
"""
def list_cards do
Repo.all(Card)
end
@doc """
Returns the list of terms in the database.
## Examples
```
iex> Deckhub.Hearthstone.list_terms()
[%Term{}, ...]
```
"""
def list_terms do
Repo.all(Term)
end
@doc """
Gets a single card by either its `dbf_id` or its `slug_name`.
Raises `Ecto.NoResultsError` if the Card does not exist.
## Examples
```
iex> get_card!(662)
%Card{slug_name: "frostbolt", dbf_id: 662, ...}
iex> get_card!("frostbolt")
%Card{slug_name: "frostbolt", dbf_id: 662, ...}
iex> get_card!("not-a-card")
** (Ecto.NoResultsError)
```
"""
def get_card!(dbf_id) when is_integer(dbf_id), do: Repo.get_by!(Card, dbf_id: dbf_id)
def get_card!(slug_name) when is_binary(slug_name), do: Repo.get_by!(Card, slug_name: slug_name)
@doc """
Gets a term by its `key`.
Raises `Ecto.NoResultsError` if the `Term` does not exist.
## Examples
```
iex> Deckhub.Hearthstone.get_term!("some-term")
%Term{}
```
"""
def get_term!(key), do: Repo.get_by!(Term, key: key)
@doc """
Creates a card.
## Examples
```
iex> create_card(%{field: value})
{:ok, %Card{}}
iex> create_card(%{field: bad_value})
{:error, %Ecto.Changeset{}}
```
"""
def create_card(attrs \\ %{}) do
%Card{}
|> Card.changeset(attrs)
|> Repo.insert()
end
def create_term(attrs \\ %{}) do
%Term{}
|> Term.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a card.
## Examples
```
iex> update_card(card, %{field: new_value})
{:ok, %Card{}}
iex> update_card(card, %{field: bad_value})
{:error, %Ecto.Changeset{}}
```
"""
def update_card(%Card{} = card, attrs) do
card
|> Card.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a Card.
## Examples
```
iex> delete_card(card)
{:ok, %Card{}}
iex> delete_card(card)
{:error, %Ecto.Changeset{}}
```
"""
def delete_card(%Card{} = card) do
Repo.delete(card)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking card changes.
## Examples
```
iex> change_card(card)
%Ecto.Changeset{source: %Card{}}
```
"""
def change_card(%Card{} = card) do
Card.changeset(card, %{})
end
end
|
lib/deckhub/hearthstone/hearthstone.ex
| 0.860779 | 0.781456 |
hearthstone.ex
|
starcoder
|
defmodule Tox.Interval do
@moduledoc """
An `Interval` struct and functions.
A time interval is the intervening time between two time points. The amount of
intervening time is expressed by a combination of `DateTime`/`DateTime`,
`Datetime`/`Period` or `Period`/`DateTime`.
The key `boundaries` indicates whether the `start` and the `ending` belong to
the interval.
Valid values for `boundaries` are:
* `:open`: `start` and `ending` are excluded
* `:closed`: `start` and `ending` are included
* `:left_open`: `start` is excluded and `ending` is included
* `:right_open`: (default) `start` is included and `ending` is excluded
## Examples
The default `:right_open`:
iex> datetime = DateTime.from_naive!(~N[2020-04-10 00:00:00], "America/Rainy_River")
iex> {:ok, interval} = Tox.Interval.new(
...> datetime, Tox.Period.new!(day: 1)
...> )
iex> interval
#Tox.Interval<[2020-04-10T00:00:00-05:00/P1D[>
iex> Tox.Interval.contains?(interval, datetime)
true
iex> Tox.Interval.contains?(interval, Tox.DateTime.shift(datetime, day: 1))
false
With `boundaries` set to `:open`:
iex> datetime = DateTime.from_naive!(~N[2020-04-10 00:00:00], "America/Rainy_River")
iex> {:ok, interval} = Tox.Interval.new(
...> datetime, Tox.Period.new!(day: 1), :open
...> )
iex> interval
#Tox.Interval<]2020-04-10T00:00:00-05:00/P1D[>
iex> Tox.Interval.contains?(interval, datetime)
false
iex> Tox.Interval.contains?(interval, Tox.DateTime.shift(datetime, day: 1))
false
With `boundaries` set to `:left_open`:
iex> datetime = DateTime.from_naive!(~N[2020-04-10 00:00:00], "America/Rainy_River")
iex> {:ok, interval} = Tox.Interval.new(
...> datetime, Tox.Period.new!(day: 1), :left_open
...> )
iex> interval
#Tox.Interval<]2020-04-10T00:00:00-05:00/P1D]>
iex> Tox.Interval.contains?(interval, datetime)
false
iex> Tox.Interval.contains?(interval, Tox.DateTime.shift(datetime, day: 1))
true
With `boundaries` set to `:closed`:
iex> datetime = DateTime.from_naive!(~N[2020-04-10 00:00:00], "America/Rainy_River")
iex> {:ok, interval} = Tox.Interval.new(
...> datetime, Tox.Period.new!(day: 1), :closed
...> )
iex> interval
#Tox.Interval<[2020-04-10T00:00:00-05:00/P1D]>
iex> Tox.Interval.contains?(interval, datetime)
true
iex> Tox.Interval.contains?(interval, Tox.DateTime.shift(datetime, day: 1))
true
"""
alias Tox.Period
@type boundary :: DateTime.t() | Period.t()
@type t :: %__MODULE__{
start: boundary(),
ending: boundary(),
boundaries: Tox.boundaries()
}
defstruct start: nil, ending: nil, boundaries: :right_open
@doc """
Creates a new interval.
See [`module documentation`](#content) for more informations.
## Examples
iex> {:ok, interval} = Tox.Interval.new(
...> DateTime.from_naive!(~N[2020-01-01 00:00:00], "Europe/Berlin"),
...> Tox.Period.new!(month: 1)
...> )
iex> interval
#Tox.Interval<[2020-01-01T00:00:00+01:00/P1M[>
iex> Tox.Interval.new(
...> Tox.Period.new!(month: 1),
...> Tox.Period.new!(month: 1)
...> )
{:error, :invalid_interval}
"""
@spec new(boundary(), boundary(), Tox.boundaries()) :: {:ok, t()} | {:error, :invalid_interval}
def new(start, ending, boundaries \\ :right_open) do
case is_valid?(start, ending, boundaries) do
true -> {:ok, struct(__MODULE__, start: start, ending: ending, boundaries: boundaries)}
false -> {:error, :invalid_interval}
end
end
@doc """
Creates a new interval or raises an error.
See [`module documentation`](#content) for more informations.
## Examples
iex> Tox.Interval.new!(
...> DateTime.from_naive!(~N[2020-01-01 00:00:00], "Europe/Berlin"),
...> Tox.Period.new!(month: 1)
...> )
#Tox.Interval<[2020-01-01T00:00:00+01:00/P1M[>
"""
@spec new!(boundary(), boundary(), Tox.boundaries()) :: t()
def new!(start, ending, boundaries \\ :right_open) do
case new(start, ending, boundaries) do
{:ok, interval} ->
interval
{:error, reason} ->
raise ArgumentError,
"cannot create a new interval with #{inspect(start)}, " <>
"#{inspect(ending)}, and #{inspect(boundaries)} " <>
"reason: #{inspect(reason)}"
end
end
@doc """
Returns the datetime on which the interval ends.
The interval boundaries are not influence the returned datetime.
## Examples
iex> interval = Tox.Interval.new!(
...> DateTime.from_naive!(~N[2020-01-01 00:00:00], "Europe/Berlin"),
...> Tox.Period.new!(month: 1)
...> )
iex> Tox.Interval.ending_datetime(interval)
#DateTime<2020-02-01 00:00:00+01:00 CET Europe/Berlin>
iex> interval = Tox.Interval.new!(
...> Tox.Period.new!(month: 1),
...> DateTime.from_naive!(~N[2020-01-01 00:00:00], "Europe/Berlin")
...> )
iex> Tox.Interval.ending_datetime(interval)
#DateTime<2020-01-01 00:00:00+01:00 CET Europe/Berlin>
"""
@spec ending_datetime(t()) :: DateTime.t()
def ending_datetime(%{start: start, ending: ending}), do: ending_datetime(start, ending)
defp ending_datetime(_start, %DateTime{} = ending), do: ending
defp ending_datetime(%DateTime{} = start, %Period{} = ending) do
Tox.DateTime.shift(start, Period.to_durations(ending))
end
@doc """
Returns the datetime on which the interval starts.
The interval boundaries are not influence the returned datetime.
## Examples
iex> interval = Tox.Interval.new!(
...> Tox.Period.new!(month: 1),
...> DateTime.from_naive!(~N[2020-01-01 00:00:00], "Europe/Berlin")
...> )
iex> Tox.Interval.start_datetime(interval)
#DateTime<2019-12-01 00:00:00+01:00 CET Europe/Berlin>
iex> interval = Tox.Interval.new!(
...> DateTime.from_naive!(~N[2020-01-01 00:00:00], "Europe/Berlin"),
...> Tox.Period.new!(month: 1)
...> )
iex> Tox.Interval.start_datetime(interval)
#DateTime<2020-01-01 00:00:00+01:00 CET Europe/Berlin>
"""
@spec start_datetime(t()) :: DateTime.t()
def start_datetime(%{start: start, ending: ending}), do: start_datetime(start, ending)
defp start_datetime(%DateTime{} = start, _ending), do: start
defp start_datetime(%Period{} = start, %DateTime{} = ending) do
Tox.DateTime.shift(ending, Period.to_durations(start, :neg))
end
@doc """
Returns the next interval.
## Examples
iex> interval = Tox.Interval.new!(
...> DateTime.from_naive!(~N[2020-01-01 00:00:00], "Europe/Berlin"),
...> Tox.Period.new!(month: 1)
...> )
iex> Tox.Interval.next(interval)
#Tox.Interval<[2020-02-01T00:00:00+01:00/P1M[>
iex> interval = Tox.Interval.new!(
...> Tox.Period.new!(month: 1),
...> DateTime.from_naive!(~N[2020-01-01 00:00:00], "Europe/Berlin")
...> )
iex> Tox.Interval.next(interval)
#Tox.Interval<[P1M/2020-02-01T00:00:00+01:00[>
iex> interval = Tox.Interval.new!(
...> DateTime.from_naive!(~N[2020-01-01 00:00:00], "Europe/Berlin"),
...> DateTime.from_naive!(~N[2020-01-02 00:00:00], "Europe/Berlin")
...> )
iex> Tox.Interval.next(interval)
#Tox.Interval<[2020-01-02T00:00:00+01:00/2020-01-03T00:00:00+01:00[>
"""
@spec next(t()) :: t()
def next(%{start: start, ending: ending, boundaries: boundaries}) do
{new_start, new_ending} = next(start, ending)
new!(new_start, new_ending, boundaries)
end
defp next(%DateTime{} = start, %Period{} = ending) do
{Tox.DateTime.shift(start, Period.to_durations(ending)), ending}
end
defp next(%Period{} = start, %DateTime{} = ending) do
{start, Tox.DateTime.shift(ending, Period.to_durations(start))}
end
defp next(%DateTime{} = start, %DateTime{} = ending) do
diff = DateTime.diff(ending, start, :microsecond)
{DateTime.add(start, diff, :microsecond), DateTime.add(ending, diff, :microsecond)}
end
@doc """
Returns true when the `datetime` is in the given `interval`.
Whether the statrt and end belong to the interval is determined by the value
for `boundaries`. See the documentation at the top.
## Examples
iex> interval = Tox.Interval.new!(
...> DateTime.from_naive!(~N[2020-02-01 00:00:00], "Europe/Berlin"),
...> Tox.Period.new!(month: 1)
...> )
iex> datetime = DateTime.from_naive!(~N[2020-01-01 00:00:00], "Europe/Berlin")
iex> Tox.Interval.contains?(interval, datetime)
false
"""
@spec contains?(t(), DateTime.t()) :: boolean()
def contains?(interval, datetime) do
Tox.DateTime.between?(
datetime,
start_datetime(interval),
ending_datetime(interval),
interval.boundaries
)
end
@doc """
Returns `{:ok, amount}` where amount is the time since the start of the
interval.
If the interval does not contains the given `datetime` an `:error` will be
returned.
## Examples
iex> now = DateTime.utc_now()
iex> interval =
...> Tox.Interval.new!(
...> Tox.DateTime.shift(now, hour: -1),
...> Tox.Period.new!(hour: 2, minute: 10)
...> )
iex> Tox.Interval.since_start(interval, now)
{:ok, 3600}
iex> Tox.Interval.since_start(interval, Tox.DateTime.shift(now, hour: 10))
:error
"""
@spec since_start(t(), DateTime.t(), System.time_unit()) :: {:ok, integer()} | :error
def since_start(period, datetime, unit \\ :second) do
case contains?(period, datetime) do
true -> {:ok, DateTime.diff(datetime, start_datetime(period), unit)}
false -> :error
end
end
@doc """
Returns `{:ok, amount}` where amount is the time until the ending of the
interval.
If the interval does not contains the given `datetime` an `:error` will be
returned.
## Examples
iex> now = DateTime.utc_now()
iex> interval =
...> Tox.Interval.new!(
...> Tox.DateTime.shift(now, hour: -1),
...> Tox.Period.new!(hour: 2, minute: 10)
...> )
iex> Tox.Interval.until_ending(interval, now)
{:ok, 4200}
iex> Tox.Interval.until_ending(interval, Tox.DateTime.shift(now, hour: 10))
:error
"""
@spec until_ending(t(), DateTime.t(), System.time_unit()) :: {:ok, integer()} | :error
def until_ending(period, datetime, unit \\ :second) do
case contains?(period, datetime) do
true -> {:ok, DateTime.diff(ending_datetime(period), datetime, unit)}
false -> :error
end
end
# Helpers
defp is_valid?(%start_module{} = start, %ending_module{} = ending, boundaries)
when boundaries in [:open, :closed, :left_open, :right_open] do
case {start_module, ending_module} do
{Period, Period} -> false
{DateTime, DateTime} -> DateTime.diff(start, ending) < 0
{DateTime, Period} -> true
{Period, DateTime} -> true
end
end
defp is_valid?(_start, _ending, _boundaries), do: false
defimpl Inspect do
@spec inspect(Tox.Interval.t(), Inspect.Opts.t()) :: String.t()
def inspect(interval, _opts) do
"#Tox.Interval<#{to_string(interval)}>"
end
end
defimpl String.Chars do
@spec to_string(Tox.Interval.t()) :: String.t()
def to_string(%{start: start, ending: ending, boundaries: boundaries}) do
string = "#{boundary_to_string(start)}/#{boundary_to_string(ending)}"
case boundaries do
:closed -> "[#{string}]"
:open -> "]#{string}["
:left_open -> "]#{string}]"
:right_open -> "[#{string}["
end
end
defp boundary_to_string(%DateTime{} = datetime), do: DateTime.to_iso8601(datetime)
defp boundary_to_string(%Period{} = period), do: String.Chars.to_string(period)
end
end
|
lib/tox/interval.ex
| 0.937311 | 0.698207 |
interval.ex
|
starcoder
|
defmodule RuleParser.Helper do
@moduledoc """
Helper functions for making parser work easy
"""
import NimbleParsec
@max_nested 3
@doc """
Ignore white space and tab, and make it optional
"""
@spec ignore_space() :: NimbleParsec.t()
def ignore_space do
parse_ws()
|> ignore()
end
@doc """
Ignore separator
"""
@spec ignore_sep(binary()) :: NimbleParsec.t()
def ignore_sep(sep), do: ignore(string(sep))
@doc """
Ignore bracket and the inner space
"""
@spec ignore_bracket(char(), NimbleParsec.t(), char()) :: NimbleParsec.t()
def ignore_bracket(left, combinator, right) do
with_bracket =
ignore(ascii_char([left]))
|> concat(ignore_space())
|> concat(combinator)
|> concat(ignore_space())
|> ignore(ascii_char([right]))
choice([with_bracket, combinator])
end
@doc """
Ignore both lowercase name and uppercase name for keyword
"""
@spec ignore_keyword(binary()) :: NimbleParsec.t()
def ignore_keyword(name) do
upper = String.upcase(name)
lower = String.downcase(name)
ignore(choice([string(upper), string(lower)]))
end
@doc """
Ignore predefined prefixes
"""
@spec ignore_prefix(list(binary())) :: NimbleParsec.t()
def ignore_prefix([]), do: empty()
def ignore_prefix(names),
do: ignore(choice(Enum.map(names, &string(&1)))) |> concat(ignore_sep("."))
@doc """
parse white space or tab
"""
@spec parse_ws() :: NimbleParsec.t()
def parse_ws, do: ascii_string([?\s, ?\t], min: 1) |> repeat()
@doc """
We fixed the 1st char must be a-z, so for opts if min/max is given, please consider to shift with 1.
"""
@spec parse_tag(list()) :: NimbleParsec.t()
def parse_tag(range \\ [?a..?z, ?_]) do
p =
ascii_string([?a..?z], max: 1)
|> optional(ascii_string(range, min: 1))
result =
Enum.reduce(1..@max_nested, p, fn _, acc ->
acc
|> optional(ascii_string([?.], max: 1))
|> ascii_string([?a..?z], max: 1)
|> optional(ascii_string(range, min: 1))
end)
result |> reduce({:parser_result_to_string, []})
end
@doc """
Match integer
"""
@spec parse_integer() :: NimbleParsec.t()
def parse_integer do
integer(min: 1)
end
@doc """
Match string with quote, and inner quote, e.g. ~S("this is \"hello world\"")
"""
@spec parse_string(char()) :: NimbleParsec.t()
def parse_string(quote_char \\ ?") do
ignore(ascii_char([quote_char]))
|> repeat_while(
choice([
"\\#{<<quote_char>>}" |> string() |> replace(quote_char),
utf8_char([])
]),
{:parser_result_not_quote, [quote_char]}
)
|> ignore(ascii_char([?"]))
|> reduce({List, :to_string, []})
end
@doc """
Match an atom with space
"""
@spec parse_atom() :: NimbleParsec.t()
def parse_atom() do
ignore(ascii_char([?:]))
|> concat(parse_tag())
|> reduce({:parser_result_to_atom, []})
end
@doc """
Match a list of predefined ops
"""
@spec parse_ops([String.t()]) :: NimbleParsec.t()
def parse_ops(ops) do
choice(Enum.map(ops, fn op -> op_replace(op) end))
end
def parser_result_not_quote(<<quote_char::size(8), _::binary>>, context, _, _, char)
when quote_char == char,
do: {:halt, context}
def parser_result_not_quote(_, context, _, _, _), do: {:cont, context}
def parser_result_to_string([start]), do: start
def parser_result_to_string(v) when is_list(v), do: Enum.join(v)
def parser_result_to_atom([v]), do: String.to_atom(v)
# private function
defp op_replace("=" = op), do: string(op) |> replace("==")
defp op_replace(op), do: string(op)
end
|
lib/helper.ex
| 0.722331 | 0.410461 |
helper.ex
|
starcoder
|
defmodule Galena.Producer do
@moduledoc """
**Galena.Producer** is a customized `GenStage` producer which uses
`GenStage.BroadcastDispatcher` as dispatcher.
### Definition
```elixir
defmodule MyProducer do
use Galena.Producer
def handle_produce({topic, message}) do
{topic, message}
end
end
```
### Start Up
```elixir
{:ok, producer} = MyProducer.start_link([], [name: :producer])
```
### Data Ingestion
```elixir
MyProducer.ingest(:producer, data)
```
"""
@type topic :: any
@type message :: any
@type data :: any
@doc """
It will be executed just before a message is sent to the consumers (or producer-consumers).
The input of the function can be whatever type.
The output of that function has to be a tuple where the first parameter will be the topic and the second one the message.
"""
@callback handle_produce(data) :: {topic, message}
@sleep_time 1
defmacro __using__(_) do
quote do
@behaviour Galena.Producer
use GenStage
require Logger
def start_link(state, opts) do
GenStage.start_link(__MODULE__, state, opts)
end
def ingest(producer, data) do
pid = self()
case producer do
^pid -> Process.send_after(pid, {:message, data}, @sleep_time)
_ -> GenStage.cast(producer, {:message, data})
end
end
def init(_state) do
{:producer, %{}, dispatcher: GenStage.BroadcastDispatcher}
end
def handle_demand(demand, state) do
{:noreply, [], state}
end
def handle_cast({:message, data}, state) do
{:noreply, [handle_produce(data)], state}
end
def handle_info({:message, data}, state) do
{:noreply, [handle_produce(data)], state}
end
def handle_info(_msg, state) do
{:noreply, [], state}
end
end
end
@doc """
This function is the responsible of the data ingestion by the chosen producer. The data can be whatever.
"""
def ingest(producer, data)
end
|
lib/galena/producer.ex
| 0.668339 | 0.746647 |
producer.ex
|
starcoder
|
defmodule Imagineer.Image.PNG.Filter.Basic.Paeth do
import Imagineer.Image.PNG.Helpers, only: [null_binary: 1]
@doc """
Takes in the uncompressed binary representation of a row, the unfiltered row
row above it, and the number of bytes per pixel. Decodes according to the
Paeth filter.
For more information, see the PNG documentation for the [Paeth filter type]
(http://www.w3.org/TR/PNG-Filters.html#Filter-type-4-Paeth)
## Examples
iex> unfiltered_prior_row = <<18, 39, 117, 39, 201, 7>>
iex> filtered_row = <<86, 5, 226, 185, 146, 181>>
iex> Imagineer.Image.PNG.Filter.Basic.Paeth.unfilter(filtered_row, unfiltered_prior_row, 3)
<<104, 44, 87, 33, 91, 188>>
iex> unfiltered_prior_row = <<18, 39, 117, 39, 201, 7>>
iex> filtered_row = <<86, 5, 226, 245, 146, 181>>
iex> Imagineer.Image.PNG.Filter.Basic.Paeth.unfilter(filtered_row, unfiltered_prior_row, 2)
<<104, 44, 87, 33, 91, 188>>
"""
def unfilter(row, prior_row, bytes_per_pixel) do
# For the first pixel, which has no upper left or left, we fill them in as
# null-filled binaries (`<<0>>`.)
upper_left_ghost_pixel = left_ghost_pixel = null_binary(bytes_per_pixel)
unfilter(row, prior_row, left_ghost_pixel, upper_left_ghost_pixel, bytes_per_pixel, [])
|> Enum.join()
end
# In the base case, we'll have a reversed list of binaries, each containing
# the unfiltered bytes of their respective pixel
defp unfilter(<<>>, <<>>, _left_pixel, _upper_left_pixel, _bytes_per_pixel, unfiltered_pixels) do
Enum.reverse(unfiltered_pixels)
end
defp unfilter(row, prior_row, left_pixel, upper_left_pixel, bytes_per_pixel, unfiltered_pixels) do
<<row_pixel::bytes-size(bytes_per_pixel), row_rest::binary>> = row
<<above_pixel::bytes-size(bytes_per_pixel), prior_row_rest::binary>> = prior_row
unfiltered_pixel = unfilter_pixel(row_pixel, left_pixel, above_pixel, upper_left_pixel)
unfilter(row_rest, prior_row_rest, unfiltered_pixel, above_pixel, bytes_per_pixel, [
unfiltered_pixel | unfiltered_pixels
])
end
defp unfilter_pixel(row_pixel, left_pixel, above_pixel, upper_left_pixel) do
unfilter_pixel(row_pixel, left_pixel, above_pixel, upper_left_pixel, [])
|> Enum.join()
end
# In the base case, we'll have run through each of the bytes and have a
# reversed list of unfiltered bytes
defp unfilter_pixel(<<>>, <<>>, <<>>, <<>>, unfiltered_pixel_bytes) do
Enum.reverse(unfiltered_pixel_bytes)
end
# Paeth(x) + PaethPredictor(Raw(x-bpp), Prior(x), Prior(x-bpp))
defp unfilter_pixel(
<<filtered_pixel_byte::integer-size(8), filtered_pixel_rest::binary>>,
<<left_pixel_byte::integer-size(8), left_pixel_rest::binary>>,
<<above_pixel_byte::integer-size(8), above_pixel_rest::binary>>,
<<upper_left_pixel_byte::integer-size(8), upper_left_pixel_rest::binary>>,
unfiltered_pixel_bytes
) do
nearest_byte = predictor(left_pixel_byte, above_pixel_byte, upper_left_pixel_byte)
unfiltered_byte = <<filtered_pixel_byte + nearest_byte>>
unfilter_pixel(
filtered_pixel_rest,
left_pixel_rest,
above_pixel_rest,
upper_left_pixel_rest,
[unfiltered_byte | unfiltered_pixel_bytes]
)
end
@doc """
The Paeth prediction is calculated as `left + above - upper_left`.
This function returns the value nearest to the Paeth prediction, breaking ties
in the order of left, above, upper_left.
For more information, see the PNG documentation for the [Paeth filter type]
(http://www.w3.org/TR/PNG-Filters.html#Filter-type-4-Paeth)
## Example
iex> Imagineer.Image.PNG.Filter.Basic.Paeth.predictor(37, 84, 1)
84
iex> Imagineer.Image.PNG.Filter.Basic.Paeth.predictor(118, 128, 125)
118
iex> Imagineer.Image.PNG.Filter.Basic.Paeth.predictor(37, 84, 61)
61
"""
def predictor(left, above, upper_left) do
prediction = left + above - upper_left
nearest_to_prediction(prediction, left, above, upper_left)
end
defp nearest_to_prediction(prediction, left, above, upper_left)
when abs(prediction - left) <= abs(prediction - above) and
abs(prediction - left) <= abs(prediction - upper_left) do
left
end
defp nearest_to_prediction(prediction, _left, above, upper_left)
when abs(prediction - above) <= abs(prediction - upper_left) do
above
end
defp nearest_to_prediction(_prediction, _left, _above, upper_left) do
upper_left
end
end
|
lib/imagineer/image/png/filter/basic/paeth.ex
| 0.826607 | 0.505127 |
paeth.ex
|
starcoder
|
defmodule Base.Sink do
alias Membrane.Buffer
defmacro __using__(_opts) do
quote do
use Membrane.Sink
import Base.Sink, only: [def_options_with_default: 1, def_options_with_default: 0]
end
end
defmacro def_options_with_default(further_options \\ []) do
quote do
def_options [
unquote_splicing(further_options),
tick: [
type: :integer,
spec: pos_integer,
description:
"Positive integer, describing number of ticks after which the message to calculate the throughput should be send"
],
how_many_tries: [
type: :integer,
spec: pos_integer,
description: "Positive integer, indicating how many meassurements should be made"
],
numerator_of_probing_factor: [
type: :integer,
spec: pos_integer,
description:
"Numerator of the probing factor: X/Y meaning that X out of Y message passing times will be saved in the state."
],
denominator_of_probing_factor: [
type: :integer,
spec: pos_integer,
description:
"Denominator of the probing factor: X/Y meaning that X out of Y message passing times will be saved in the state."
],
should_produce_plots?: [
type: :boolean,
description:
"True, if the .svg files containing the plots of the passing times for the messages should be printed, false otherwise"
],
supervisor_pid: [
type: :pid,
description:
"PID of the process which should be informed about the metrics gathered during the test. After the test is finished, that process
will receive {:result_metrics, metrics_list} message."
],
chosen_metrics: [
type: :list,
description: "List of atoms corresponding to available metrics"
]
]
end
end
def handle_init(opts) do
state = %{
opts: opts,
metrics: %{throughput: 0, passing_time_avg: 0, passing_time_std: 0, generator_frequency: 0},
single_try_state: %{
message_count: 0,
start_time: 0,
sum: 0,
squares_sum: 0,
times: []
},
global_state: %{result_metrics: [], tries_counter: 0, status: :playing}
}
{:ok, state}
end
def handle_write(:input, buffer, _context, state) when state.global_state.status == :playing do
state =
if state.single_try_state.message_count == 0 do
Process.send_after(self(), :tick, state.opts.tick)
%{
state
| single_try_state: %{
state.single_try_state
| start_time: Membrane.Time.monotonic_time()
}
}
else
state
end
time = Membrane.Time.monotonic_time() - buffer.pts
state =
if :rand.uniform(state.opts.denominator_of_probing_factor) <=
state.opts.numerator_of_probing_factor do
single_try_state =
Map.update!(
state.single_try_state,
:times,
&[{buffer.pts - state.single_try_state.start_time, time} | &1]
)
%{state | single_try_state: single_try_state}
else
state
end
single_try_state = Map.update!(state.single_try_state, :message_count, &(&1 + 1))
single_try_state = Map.update!(single_try_state, :sum, &(&1 + time))
single_try_state = Map.update!(single_try_state, :squares_sum, &(&1 + time * time))
{{:ok, []}, %{state | single_try_state: single_try_state}}
end
def handle_write(
:input,
%Buffer{payload: :flush, metadata: generator_frequency},
_ctx,
state
)
when state.global_state.status == :flushing do
passing_time_avg = state.single_try_state.sum / state.single_try_state.message_count
passing_time_std =
:math.sqrt(
(state.single_try_state.squares_sum +
state.single_try_state.message_count * passing_time_avg * passing_time_avg -
2 * passing_time_avg * state.single_try_state.sum) /
(state.single_try_state.message_count - 1)
)
state = %{
state
| metrics: %{
state.metrics
| passing_time_avg: passing_time_avg,
passing_time_std: passing_time_std,
generator_frequency: generator_frequency
}
}
write_demanded_metrics(state)
# the first run is the warm-up run
specification =
if state.global_state.tries_counter == 0 do
:the_same
else
check_normality(
state.single_try_state.times,
passing_time_avg,
passing_time_std,
state.metrics.throughput,
generator_frequency
)
end
actions =
if state.global_state.tries_counter == state.opts.how_many_tries do
send(
state.opts.supervisor_pid,
:finished
)
[]
else
[notify: {:play, specification}]
end
format = [
bar_color: [IO.ANSI.white(), IO.ANSI.green_background()],
blank_color: IO.ANSI.red_background()
]
throughput_string = " THROUGHPUT: #{:erlang.float_to_binary(state.metrics.throughput, decimals: 2)} msg/s "
ProgressBar.render(
state.global_state.tries_counter + 1,
state.opts.how_many_tries + 1,
throughput_string,
format
)
state = %{
state
| single_try_state: %{
state.single_try_state
| message_count: 0,
sum: 0,
squares_sum: 0,
times: []
},
global_state: %{
state.global_state
| tries_counter: state.global_state.tries_counter + 1,
status: :playing
}
}
{{:ok, actions}, state}
end
def handle_write(:input, _msg, _ctx, state) when state.global_state.status == :flushing do
{{:ok, []}, state}
end
def handle_other(:tick, _ctx, state) do
elapsed =
(Membrane.Time.monotonic_time() - state.single_try_state.start_time) /
Membrane.Time.second()
throughput = state.single_try_state.message_count / elapsed
{actions, state} =
{[notify: :flush], %{state | global_state: %{state.global_state | status: :flushing}}}
state = %{state | metrics: %{state.metrics | throughput: throughput}}
{{:ok, actions}, state}
end
defp check_normality(
_times,
passing_time_avg,
passing_time_std,
_throughput,
_generator_frequency
) do
cond do
# average passing time of a message is greater than 20ms which is unacceptable, therfore we need to slow down the message generation
passing_time_avg > 20_000_000 ->
:slower
# average passing time of a message is less than 20ms, but the standard deviation is relatively too high
passing_time_std > 10_000_000 and passing_time_std > 0.5 * passing_time_avg ->
:slower
true ->
:faster
end
end
defp write_demanded_metrics(state) do
new_metrics =
state.opts.chosen_metrics
|> Enum.map(fn key -> {key, Bunch.Access.get_in(state, key)} end)
|> Map.new()
send(
state.opts.supervisor_pid,
{:new_metrics, new_metrics}
)
end
end
|
lib/Base/Sink.ex
| 0.659734 | 0.506774 |
Sink.ex
|
starcoder
|
defmodule ResxBase.Encoder do
@moduledoc """
Encode data resources into a RFC 4648 encoding.
### Encoding
The type of encoding is specified by using the `:encoding` option.
Resx.Resource.transform(resource, ResxBase.Encoder, encoding: :base64)
The list of available encoding formats to choose from are:
* `:base16` - By default this works the same as `Base.encode16/1`.
Optionally the case can be specified using the `:case` option, this can
be either `:lower` (for lowercase output) or `:upper` (for uppercase
output).
* `:base32` - By default this works the same as `Base.encode32/1`.
* `:base64` - By default this works the same as `Base.encode64/1`.
* `:hex32` - By default this works the same as `Base.hex_encode32/1`.
* `:url64` - By default this works the same as `Base.url_encode64/1`.
All encodings also take the configuration options specified in `ResxBase`.
### Streams
Streams are encoded by forming a complete sequence and separating each
encoded sequence with the necessary amount of padding characters.
e.g. If you encoded `"hello world"` as a single base64 sequence you would
end up with the encoded data: `"aGVsbG8gd29ybGQ="`. However if it was a
stream consisting of `["hello", " ", "world"]`, it would be encoded as 3
individual sequences resulting in the encoded data: `"aGVsbG8=IA==d29ybGQ="`
"""
use Resx.Transformer
alias Resx.Resource.Content
@impl Resx.Transformer
def transform(resource, opts) do
encode = case opts[:encoding] do
:base16 ->
case opts[:case] || :upper do
:lower -> &ResxBase.encode16_lower(&1, opts)
:upper -> &ResxBase.encode16_upper(&1, opts)
end
|> encoder
base32 when base32 in [:base32, :hex32] ->
encoding_opts = opts ++ [pad_chr: "=", multiple: 8]
case base32 do
:base32 -> &ResxBase.encode32(&1, encoding_opts)
:hex32 -> &ResxBase.hex_encode32(&1, encoding_opts)
end
|> encoder
base64 when base64 in [:base64, :url64] ->
encoding_opts = opts ++ [pad_chr: "=", multiple: 4]
case base64 do
:base64 -> &ResxBase.encode64(&1, encoding_opts)
:url64 -> &ResxBase.url_encode64(&1, encoding_opts)
end
|> encoder
encoding -> fn _ -> { :error, { :internal, "Unknown encoding format: #{inspect(encoding)}" } } end
end
encode.(resource)
end
defp encoder(fun) do
fn resource = %{ content: content } ->
content = Content.Stream.new(content)
{ :ok, %{ resource | content: %{ content | data: Stream.map(content, fun) } } }
end
end
end
|
lib/resx_base/encoder.ex
| 0.917469 | 0.479626 |
encoder.ex
|
starcoder
|
defmodule Modbux.Request do
@moduledoc """
Request helper, functions that handles Client & Master request messages.
"""
alias Modbux.Helper
@spec pack({:fc | :phr | :rc | :rhr | :ri | :rir, integer, integer, maybe_improper_list | integer}) ::
<<_::48, _::_*8>>
def pack({:rc, slave, address, count}) do
reads(:d, slave, 1, address, count)
end
def pack({:ri, slave, address, count}) do
reads(:d, slave, 2, address, count)
end
def pack({:rhr, slave, address, count}) do
reads(:a, slave, 3, address, count)
end
def pack({:rir, slave, address, count}) do
reads(:a, slave, 4, address, count)
end
def pack({:fc, slave, address, value}) when is_integer(value) do
write(:d, slave, 5, address, value)
end
def pack({:phr, slave, address, value}) when is_integer(value) do
write(:a, slave, 6, address, value)
end
def pack({:fc, slave, address, values}) when is_list(values) do
writes(:d, slave, 15, address, values)
end
def pack({:phr, slave, address, values}) when is_list(values) do
writes(:a, slave, 16, address, values)
end
@spec parse(<<_::24, _::_*8>>) ::
{:einval | :error | :fc | :phr | :rc | :rhr | :ri | :rir, byte, char, [any] | char}
def parse(<<slave, 1, address::16, count::16>>) do
{:rc, slave, address, count}
end
def parse(<<slave, 2, address::16, count::16>>) do
{:ri, slave, address, count}
end
def parse(<<slave, 3, address::16, count::16>>) do
{:rhr, slave, address, count}
end
def parse(<<slave, 4, address::16, count::16>>) do
{:rir, slave, address, count}
end
def parse(<<slave, 5, address::16, 0x00, 0x00>>) do
{:fc, slave, address, 0}
end
def parse(<<slave, 5, address::16, 0xFF, 0x00>>) do
{:fc, slave, address, 1}
end
def parse(<<slave, 6, address::16, value::16>>) do
{:phr, slave, address, value}
end
# Another slave response an error.
def parse(<<slave, 15, address::16, count::16, bytes, data::binary>>) do
^bytes = Helper.byte_count(count)
values = Helper.bin_to_bitlist(count, data)
{:fc, slave, address, values}
end
def parse(<<slave, 16, address::16, count::16, bytes, data::binary>>) do
^bytes = 2 * count
values = Helper.bin_to_reglist(count, data)
{:phr, slave, address, values}
end
# Exceptions clauses
def parse(<<slave, fc, error_code, _b_tail::binary>>) when fc in 129..144 do
{:error, slave, fc, error_code}
end
def parse(<<slave, fc, error_code, _b_tail::binary>>) do
{:einval, slave, fc, error_code}
end
@spec length({:fc | :phr | :rc | :rhr | :ri | :rir, any, any, any}) :: integer
def length({:rc, _slave, _address, _count}) do
6
end
def length({:ri, _slave, _address, _count}) do
6
end
def length({:rhr, _slave, _address, _count}) do
6
end
def length({:rir, _slave, _address, _count}) do
6
end
def length({:fc, _slave, _address, value}) when is_integer(value) do
6
end
def length({:phr, _slave, _address, value}) when is_integer(value) do
6
end
def length({:fc, _slave, _address, values}) when is_list(values) do
7 + Helper.byte_count(Enum.count(values))
end
def length({:phr, _slave, _address, values}) when is_list(values) do
7 + 2 * Enum.count(values)
end
defp reads(_type, slave, function, address, count) do
<<slave, function, address::16, count::16>>
end
defp write(:d, slave, function, address, value) do
<<slave, function, address::16, Helper.bool_to_byte(value), 0x00>>
end
defp write(:a, slave, function, address, value) do
<<slave, function, address::16, value::16>>
end
defp writes(:d, slave, function, address, values) do
count = Enum.count(values)
bytes = Helper.byte_count(count)
data = Helper.bitlist_to_bin(values)
<<slave, function, address::16, count::16, bytes, data::binary>>
end
defp writes(:a, slave, function, address, values) do
count = Enum.count(values)
bytes = 2 * count
data = Helper.reglist_to_bin(values)
<<slave, function, address::16, count::16, bytes, data::binary>>
end
end
|
lib/helpers/request.ex
| 0.645008 | 0.435962 |
request.ex
|
starcoder
|
defmodule Panpipe do
@moduledoc """
An Elixir wrapper around Pandoc.
The `Panpipe.Pandoc` module implements a wrapper around the Pandoc CLI.
The `Panpipe.AST.Node` behaviour defines the functions implemented by all
nodes of a Panpipe AST.
"""
alias Panpipe.Pandoc
defdelegate pandoc(input_or_opts, opts \\ nil), to: Pandoc, as: :call
defdelegate pandoc!(input_or_opts, opts \\ nil), to: Pandoc, as: :call!
defdelegate transform(node, fun), to: Panpipe.AST.Node
@doc """
Creates the Panpipe AST representation of some input.
It accepts the same arguments as `Panpipe.Pandoc.call/2` which will be called
implicitly to get the Pandoc AST representation.
The result is returned in an `ok` tuple.
"""
def ast(input_or_opts, opts \\ nil) do
with {:ok, pandoc_ast} <- Pandoc.ast(input_or_opts, opts) do
{:ok, Panpipe.Pandoc.AST.Node.to_panpipe(pandoc_ast)}
end
end
@doc """
Calls `ast/2` and delivers the result directly in success case, otherwise raises an error.
"""
def ast!(input_or_opts, opts \\ nil) do
with {:ok, result} <- ast(input_or_opts, opts) do
result
else
{:error, error} -> raise error
end
end
@doc """
Creates an `Panpipe.AST.Node` of some input without the surrounding `Document` structure.
"""
def ast_fragment(input_or_opts, opts \\ nil) do
with {:ok, pandoc_ast} <- ast(input_or_opts, opts) do
case pandoc_ast do
%Panpipe.Document{children: [%Panpipe.AST.Para{children: [fragment]}]} ->
{:ok, fragment}
%Panpipe.Document{children: [fragment]} ->
{:ok, fragment}
%Panpipe.Document{children: children} when is_list(children) ->
{:ok, %Panpipe.AST.Plain{children: children}}
_ -> {:error, "unable to extract ast_fragment from #{inspect pandoc_ast}"}
end
end
end
@doc """
Calls `ast_fragment/2` and delivers the result directly in success case, otherwise raises an error.
"""
def ast_fragment!(input_or_opts, opts \\ nil) do
with {:ok, result} <- ast_fragment(input_or_opts, opts) do
result
else
{:error, error} -> raise error
end
end
Enum.each Panpipe.Pandoc.output_formats, fn output_format ->
@doc """
Calls `pandoc/1` with the option `to: :#{to_string(output_format)}` automatically set.
It also accepts `Panpipe.AST.Node`s. `pandoc/1` will then be called with
Pandoc AST form of the node.
By default the converted output by Pandoc always ends with a newline. This
can not be what you want, esp. when you convert small fragments by passing
nodes directly. For this reason Panpipe will remove this newline by default
for inline nodes, but keeps them on block nodes. You can control whether
they should be removed manually with the `remove_trailing_newline` option.
Note: This function only works with directly passed strings or nodes. If you
want to convert a file using the `input` option, you'll have to read the file
first manually or use `pandoc/1` directly.
"""
def unquote(String.to_atom("to_" <> to_string(output_format)))(input, opts \\ []) do
Panpipe.Pandoc.Conversion.convert(input,
Keyword.put(opts, :to, unquote(output_format)))
end
end
end
|
lib/panpipe.ex
| 0.781205 | 0.672308 |
panpipe.ex
|
starcoder
|
defmodule LibPE do
@moduledoc """
Implementation of the Windows PE executable format for reading and writing PE binaries.
Most struct member names are taken directly from the windows documentation:
https://docs.microsoft.com/en-us/windows/win32/debug/pe-format
This library has been created specifically to archieve the following:
* Update the PE checksum in `erl.exe` after making changes
* Insert a Microsoft manifest file after compilation: https://docs.microsoft.com/de-de/windows/win32/sbscs/application-manifests
* Insert an Executable Icon after compilation
"""
@exe_header_size 4
defstruct [
:meta,
:meta2,
:msdos_stub,
:format,
:machine,
:timestamp,
:object_offset,
:object_entry_count,
:coff_header,
:coff_flags,
:coff_sections,
:rest
]
@spec parse_file(binary()) :: {:ok, %LibPE{}}
def parse_file(filename) do
parse_string(File.read!(filename))
end
@spec parse_string(binary()) :: {:ok, %LibPE{}}
def parse_string(
<<"MZ", meta::binary-size(6), @exe_header_size::little-size(16), meta2::binary-size(50),
offset::little-size(32), rest::binary>> = full_image
) do
# exe header is always 64 bytes
stub_size = offset - @exe_header_size * 16
<<msdos_stub::binary-size(stub_size), coff::binary>> = rest
pe =
%LibPE{meta: meta, meta2: meta2, msdos_stub: msdos_stub, rest: ""}
|> parse_coff(coff, full_image)
end_offset = byte_size(encode(pe))
pe =
if end_offset < byte_size(full_image) do
%LibPE{pe | rest: binary_part(full_image, end_offset, byte_size(full_image) - end_offset)}
else
pe
end
{:ok, pe}
end
@spec encode(%LibPE{}) :: binary()
def encode(%LibPE{rest: rest, coff_sections: sections, coff_header: header} = pe) do
image = encode_header(pe)
# Compare with Section.parse_section()
# We assume the `raw_data` is the actual "virtual" payload
# So we fit it here (padding or cutting) into the raw size because
# often the real payload is smaller than the file space, but sometimes it can
# be larger as well (when the tail is only made of zeros)
image =
Enum.reduce(sections, image, fn sec, image ->
binary_pad_trailing(image, sec.pointer_to_raw_data) <> sec.raw_data
end)
image =
if header.certificate_data != nil do
{start, _size} = header.certificate_table
binary_pad_trailing(image, start) <> header.certificate_data
else
image
end
image <> rest
end
defp encode_header(%LibPE{meta: meta, meta2: meta2, msdos_stub: msdos_stub} = pe) do
stub_size = byte_size(msdos_stub)
offset = stub_size + @exe_header_size * 16
coff = encode_coff(pe)
<<"MZ", meta::binary-size(6), @exe_header_size::little-size(16), meta2::binary-size(50),
offset::little-size(32), msdos_stub::binary-size(stub_size), coff::binary>>
end
defp parse_coff(pe, <<"PE\0\0", rest::binary>>, full_image) do
parse_coff(%LibPE{pe | format: :pe}, rest, full_image)
end
defp parse_coff(
pe = %LibPE{},
<<machine::little-size(16), number_of_sections::little-size(16),
timestamp::little-size(32), object_offset::little-size(32),
object_entry_count::little-size(32), coff_header_size::little-size(16),
coff_flags::little-size(16), rest::binary>>,
full_image
) do
<<header::binary-size(coff_header_size), rest::binary>> = rest
header = LibPE.OptionalHeader.parse(header, full_image)
{sections, _rest} = LibPE.Section.parse(rest, number_of_sections, full_image)
%LibPE{
pe
| machine: LibPE.MachineType.decode(machine),
timestamp: timestamp,
object_offset: object_offset,
object_entry_count: object_entry_count,
coff_header: header,
coff_flags: LibPE.Characteristics.decode(coff_flags),
coff_sections: sections
}
end
defp encode_coff(%LibPE{format: :pe} = pe) do
"PE\0\0" <> encode_coff(%LibPE{pe | format: nil})
end
defp encode_coff(%LibPE{
machine: machine,
timestamp: timestamp,
object_offset: object_offset,
object_entry_count: object_entry_count,
coff_header: header,
coff_flags: coff_flags,
coff_sections: sections
}) do
machine = LibPE.MachineType.encode(machine)
coff_flags = LibPE.Characteristics.encode(coff_flags)
header = LibPE.OptionalHeader.encode(header)
coff_header_size = byte_size(header)
number_of_sections = length(sections)
sections =
Enum.map(sections, &LibPE.Section.encode/1)
|> Enum.join()
<<machine::little-size(16), number_of_sections::little-size(16), timestamp::little-size(32),
object_offset::little-size(32), object_entry_count::little-size(32),
coff_header_size::little-size(16), coff_flags::little-size(16), header::binary,
sections::binary>>
end
@doc """
Update the PE image checksum of a PE file.
"""
def update_checksum(%LibPE{} = pe) do
tmp_pe =
%LibPE{pe | coff_header: %LibPE.OptionalHeader{pe.coff_header | checksum: 0}}
|> encode()
# size = byte_size(tmp_pe) + byte_size(LibPE.OptionalHeader.encode_checksum(pe.coff_header))
size = byte_size(tmp_pe)
# correcting size for the missing checksum field
new_checksum = LibPE.Checksum.checksum(tmp_pe, size)
%LibPE{pe | coff_header: %LibPE.OptionalHeader{pe.coff_header | checksum: new_checksum}}
end
@doc """
Update the section & certificate layout after a section size has
been changed
"""
@quad_word_size 64
def update_layout(%LibPE{coff_sections: sections, coff_header: header} = pe) do
offset = byte_size(encode_header(pe))
%LibPE.OptionalHeader{file_alignment: file_alignment, section_alignment: virtual_alignment} =
header
{sections, offsets} =
Enum.map_reduce(sections, {offset, offset}, fn %LibPE.Section{
virtual_data: virtual_data
} = sec,
{virtual, raw} ->
virtual = ceil(virtual / virtual_alignment) * virtual_alignment
raw = ceil(raw / file_alignment) * file_alignment
virtual_size = byte_size(virtual_data)
raw_size = byte_size(String.trim_trailing(virtual_data, "\0"))
raw_size = ceil(raw_size / file_alignment) * file_alignment
raw_data =
if virtual_size < raw_size do
binary_pad_trailing(virtual_data, raw_size, sec.padding)
else
binary_part(virtual_data, 0, raw_size)
end
sec = %LibPE.Section{
sec
| pointer_to_raw_data: raw,
size_of_raw_data: raw_size,
virtual_address: virtual,
virtual_size: virtual_size,
raw_data: raw_data
}
{sec, {virtual + virtual_size, raw + raw_size}}
end)
header =
if header.certificate_data != nil do
{_virtual, raw} = offsets
start = ceil(raw / @quad_word_size) * @quad_word_size
%LibPE.OptionalHeader{
header
| certificate_table: {start, byte_size(header.certificate_data)}
}
else
%LibPE.OptionalHeader{header | certificate_table: {0, 0}}
end
# Only updating the physical tables `.rsrc` and `.reloc`
header =
[
# export_table: ".edata",
# import_table: ".idata",
resource_table: ".rsrc",
# exception_table: ".pdata",
base_relocation_table: ".reloc"
# debug: ".debug",
# tls_table: ".tls"
]
|> Enum.reduce(header, fn {field_name, section_name}, header ->
case Enum.find(sections, fn %LibPE.Section{name: name} -> name == section_name end) do
nil ->
Map.put(header, field_name, {0, 0})
%LibPE.Section{virtual_address: virtual_address, virtual_size: virtual_size} ->
Map.put(header, field_name, {virtual_address, virtual_size})
end
end)
header =
case List.last(sections) do
%LibPE.Section{virtual_address: addr, virtual_size: size} ->
%LibPE.OptionalHeader{
header
| size_of_image: ceil((addr + size) / virtual_alignment) * virtual_alignment
}
nil ->
header
end
%LibPE{pe | coff_sections: sections, coff_header: header}
end
def get_resources(%LibPE{coff_sections: sections}) do
case Enum.find(sections, fn %LibPE.Section{name: name} -> name == ".rsrc" end) do
%LibPE.Section{virtual_data: virtual_data, virtual_address: virtual_address} ->
LibPE.ResourceTable.parse(virtual_data, virtual_address)
nil ->
nil
end
end
def set_resources(%LibPE{coff_sections: sections} = pe, resources = %LibPE.ResourceTable{}) do
# need to ensure that the virtual_address is up-to-date
pe = update_layout(pe)
# now fetching and setting the resource
idx = Enum.find_index(sections, fn %LibPE.Section{name: name} -> name == ".rsrc" end)
section = %LibPE.Section{virtual_address: virtual_address} = Enum.at(sections, idx)
data = LibPE.ResourceTable.encode(resources, virtual_address)
section = %LibPE.Section{section | virtual_data: data}
sections = List.update_at(sections, idx, fn _ -> section end)
# updating offsets coming after the ".rsrc" section
%LibPE{pe | coff_sections: sections}
|> update_layout()
end
def set_resource(
pe,
resource_type,
data,
codepage \\ 0,
language \\ 1033
) do
resources =
get_resources(pe)
|> LibPE.ResourceTable.set_resource(resource_type, data, codepage, language)
set_resources(pe, resources)
end
@doc false
def binary_pad_trailing(binary, size, padding \\ <<0>>)
def binary_pad_trailing(binary, size, padding) when byte_size(binary) < size do
bytesize = size - byte_size(binary)
padding = String.duplicate(padding, ceil(bytesize / byte_size(padding)) * byte_size(padding))
binary <> binary_part(padding, 0, bytesize)
end
def binary_pad_trailing(binary, _size, _padding) do
binary
end
@doc false
def binary_extract(binary, start, size) do
content = binary_part(binary, start, size)
binary =
binary_part(binary, 0, start) <>
binary_pad_trailing("", size) <>
binary_part(binary, start + size, byte_size(binary) - (start + size))
{binary, content}
end
end
|
lib/libpe.ex
| 0.798265 | 0.451085 |
libpe.ex
|
starcoder
|
defmodule PINXS.Transfers.Transfer do
alias PINXS.HTTP.API
alias __MODULE__
@moduledoc """
Proived functions for creating and working with transfers
"""
@derive [Jason.Encoder]
defstruct [
:amount,
:bank_account,
:created_at,
:currency,
:description,
:paid_at,
:recipient,
:reference,
:status,
:token,
:total_credits,
:total_debits
]
@type t :: %__MODULE__{
amount: integer(),
bank_account: nil | PINXS.BankAccounts.BankAccount,
created_at: nil | String.t(),
currency: nil | String.t(),
description: nil | String.t(),
paid_at: nil | String.t(),
recipient: String.t(),
reference: nil | String.t(),
status: nil | String.t(),
token: nil | String.t(),
total_credits: nil | integer(),
total_debits: nil | integer()
}
@doc """
Create a transfer
"""
def create(%Transfer{currency: "AUD"} = transfer, config) do
API.post("/transfers", transfer, __MODULE__, config)
end
def create(%Transfer{} = transfer, config) do
Map.put(transfer, :currency, "AUD")
|> create(config)
end
@doc """
Gets a transfer
"""
def get(transfer_token, config) do
API.get("/transfers/#{transfer_token}", __MODULE__, config)
end
@doc """
Gets a paginated list of transfers
"""
def get_all(config) do
API.get("/transfers", __MODULE__, config)
end
@doc """
Gets a specific pages of transfers
"""
def get_all(page, config) when is_integer(page) do
API.get("/transfers?page=#{page}", __MODULE__, config)
end
def get_line_items(transfer_token, config) do
API.get("/transfers/#{transfer_token}/line_items?per_page=500", __MODULE__, config)
end
def get_line_items(transfer_token, page, config) do
API.get("/transfers/#{transfer_token}/line_items?page=#{page}&per_page=500", __MODULE__, config)
end
@doc """
Retrieve transfers based on search criteria
## Search options
```
%{
query: "",
start_date: "YYYY/MM/DD", # 2013/01/01
end_date: "YYYY/MM/DD", # 2013/12/25
sort: "", # field to sort by, default `created_at`
direction: 1 # 1 or -1
}
```
"""
def search(query_map, %Tesla.Client{} = config) do
API.search("/transfers/search", Map.to_list(query_map), __MODULE__, config)
end
def search(query_map, config) do
API.search("/transfers/search", query_map, __MODULE__, config)
end
end
|
lib/transfers/transfer.ex
| 0.735071 | 0.587056 |
transfer.ex
|
starcoder
|
defmodule ArtemisWeb.ViewHelper.Async do
use Phoenix.HTML
import ArtemisWeb.ViewHelper.Print
import ArtemisWeb.ViewHelper.Status
@moduledoc """
View helpers for rendering data asynchronously using Phoenix LiveView
NOTE: This module contains async functions. Also see
`apps/artemis_web/lib/artemis_web.ex` for async related macros.
"""
@doc """
Render a template asynchronously.
### Optional - Support for Async Data
Supports a function to be evaluated after page load. Can be passed as either
a {Module, :function} tuple, function, or anonymous function. Any other data
will simulate an asynchronous data call, returning the response immediately.
The results of the async data call is available in the `@async_data` assigned
variable. When loading the value is `nil`.
The default value for this option is `nil`.
Example:
<%= async_render(@conn, assigns, "index/_example.html", async_data: fn _callback_pid, _assigns -> "Async data: Hello World" end) %>
<%= async_render @conn, assigns, "index/_example.html", async_data: &hello_world_data/2 %>
<%= async_render(@conn, assigns, "index/_example.html", async_data: {ArtemisWeb.HomeView, :hello_world_data}) %1> %>
<%= async_render(@conn, assigns, "index/_example.html", async_data: "Fake async data to be returned") %>
"""
def async_render(conn, assigns, template, options \\ []) do
id = Keyword.get(options, :id, template)
session = async_convert_assigns_to_session(assigns, template, options)
Phoenix.LiveView.Helpers.live_render(conn, ArtemisWeb.AsyncRenderLive, id: id, session: session)
end
@doc """
Helper for converting an assigns into session
"""
def async_convert_assigns_to_session(assigns, template, options \\ []) do
module = Keyword.get(options, :module, assigns[:view_module] || assigns[:module])
async_data = Keyword.get(options, :async_data, assigns[:async_data])
assigns
|> Enum.into(%{})
|> Map.delete(:conn)
|> Enum.map(fn {key, value} -> {Artemis.Helpers.to_string(key), value} end)
|> Enum.into(%{})
|> Map.put("async_data", maybe_serialize_async_data(async_data))
|> Map.put("module", module)
|> Map.put("template", template)
end
defp maybe_serialize_async_data(value) when is_function(value), do: ArtemisWeb.AsyncRenderLive.serialize(value)
defp maybe_serialize_async_data(value), do: value
@doc """
Return whether async data is loaded. Can be used in a template `if` statement
to evaluate sections waiting on async data
Example
<%= if async_loaded?(assigns) do %>
Custom HTML here
<% end %>
"""
def async_loaded?(assigns) do
async_status = Map.get(assigns, :async_status)
cond do
async_status == :loading -> false
true -> true
end
end
@doc """
Render async render information
"""
def render_page_data_info(assigns, options \\ []) do
async_status = Map.get(assigns, :async_status)
loading? = Enum.member?([:loading, :reloading], Map.get(assigns, :async_status))
color = if async_status == :reloading, do: "orange", else: "green"
icon = if loading?, do: "ui icon sync alternate rotating #{color}", else: "ui icon check green"
updated_at = Keyword.get(options, :updated_at) || Timex.now()
content_tag(:div, class: "page-data-info") do
[
render_async_data_info(assigns),
content_tag(:div, content_tag(:i, "", class: icon)),
content_tag(:div, "Last updated on #{render_date_time_with_seconds(updated_at)}")
]
end
end
@doc """
Render reloading information
"""
def render_async_data_info(assigns) do
case Map.get(assigns, :async_status) do
:loading -> render_status_label("Loading", color: "green")
:reloading -> render_status_label("Updating", color: "orange")
_ -> ""
end
end
@doc """
Render a reloading icon
"""
def render_async_reloading_icon(assigns) do
if Map.get(assigns, :async_status) == :reloading do
content_tag(:div, "", class: "ui active centered inline loader")
end
end
@doc """
Return async data field
"""
def get_async_data_field(assigns, field) do
Artemis.Helpers.deep_get(assigns, [:async_data, field])
end
@doc """
Return either @conn or @socket depending on which is set
"""
def get_conn_or_socket(assigns) do
cond do
value = Map.get(assigns, :conn) -> value
value = Map.get(assigns, :conn_or_socket) -> value
value = Map.get(assigns, :socket) -> value
true -> nil
end
end
end
|
apps/artemis_web/lib/artemis_web/view_helpers/async.ex
| 0.838366 | 0.457621 |
async.ex
|
starcoder
|
defmodule Relax.Resource do
use Behaviour
@moduledoc """
Provides functionality and defines a behaviour to help build jsonapi.org
resource endpoints.
## Using
When used, `Relax.Resource` works as an parent module that adds
common functionality and behaviours and plugs to your module.
### Submodules
When using the module you can pass either an `only` or a `except` option to
determine what "actions" are available on the resource. By default all
actions are included.
use Relax.Resource, except: [:delete]
use Relax.Resource, only: [:fetch_all, :fetch_one]
Each included action adds another use statement:
* `fetch_all` - `use Relax.Resource.FetchAll`
* `fetch_one` - `use Relax.Resource.FetchOne`
* `create` - `use Relax.Resource.Create`
* `update` - `use Relax.Resource.Update`
* `delete` - `use Relax.Resource.Delete`
Please see each action's documentation for usage details.
### Provided Plugs
Relax.Resource provides 2 plug functions, `resource` and `not found`.
* `plug :resource` - Required to dispatch requests to the appropriate action.
* `plug :not_found` - Optionally returns a 404 for all un-halted conns.
Example:
plug :resource
plug :not_found
### Plug.Builder vs Plug.Router
By default `use Relax.Resource` will also `use Plug.Builder`, however if
you wish to capture non-standard routes you can pass the `plug: :router`
option to the use statement and use Plug.Router along side your normal
resource routes.
defmodule MyResource do
use Relax.Resource, plug: :router
plug :resource
plug :match
plug :dispatch
post ":id/activate" do
# Work with conn and id directly
end
end
## Behaviour
This module also defines a behaviour defining the callbacks needed by all
action types. The behaviour is added when you use this module.
"""
@doc """
Defines the model (struct) this resource is exposes.
This is typically a module using Ecto.Model, but may be any struct. Example:
def model, do: MyApp.Models.Post
"""
defcallback model() :: module
@doc """
Defines the module using Ecto.Repo to be queried by this resource.
This may be defined in each resource, but by default the `:relax`
application `repo` config value is used.
Per resource example:
def repo, do: MyApp.Repo
Config example (config.exs):
config :relax,
repo: MyApp.Repo
"""
defcallback repo() :: module
@doc false
defmacro __using__(opts) do
plug_module = case opts[:plug] do
nil -> Plug.Builder
:builder -> Plug.Builder
:router -> Plug.Router
end
quote location: :keep do
use unquote(plug_module)
use Relax.Responders
@behaviour Relax.Resource
# Use each action behavior as appropriate.
unquote(Relax.Resource.use_action_behaviours(opts))
plug JaSerializer.ContentTypeNegotiation
plug Plug.Parsers, parsers: [:json], json_decoder: Poison
plug JaSerializer.Deserializer
plug Relax.Resource.Nested
# Define plug endpoint that dispatches to each action behavior.
def resource(conn, _opts) do
do_resource(conn, conn.method, conn.path_info)
end
# Define plug endpoint that 404s and returns not found
def not_found(conn, opts) do
Relax.NotFound.call(conn, Dict.merge(opts, type: :resource))
end
unquote(Relax.Resource.default_repo(opts))
unquote(Relax.Resource.default_model(opts))
@before_compile Relax.Resource
end
end
@doc false
def use_action_behaviours(opts) do
available = [:fetch_all, :fetch_one, :create, :update, :delete]
allowed = (opts[:only] || available -- (opts[:except] || []))
quote bind_quoted: [allowed: allowed] do
if :fetch_all in allowed, do: use Relax.Resource.FetchAll
if :fetch_one in allowed, do: use Relax.Resource.FetchOne
if :create in allowed, do: use Relax.Resource.Create
if :update in allowed, do: use Relax.Resource.Update
if :delete in allowed, do: use Relax.Resource.Delete
end
end
@doc false
def default_repo(opts) do
if opts[:ecto] == false do
quote do
def repo, do: :none
end
else
quote do
if Application.get_env(:relax, :repo) do
def repo, do: Application.get_env(:relax, :repo)
defoverridable [repo: 0]
end
end
end
end
@doc false
def default_model(opts) do
if opts[:ecto] == false do
quote do
def model, do: :none
end
end
end
@doc false
defmacro __before_compile__(_env) do
quote do
# If nothing matches, next plug
def do_resource(conn, _, _), do: conn
end
end
end
|
lib/relax/resource.ex
| 0.814938 | 0.453564 |
resource.ex
|
starcoder
|
defmodule Erlef.Agenda do
@moduledoc false
@board_ics "https://user.fm/calendar/v1-d950fe3b2598245f424e3ddbff1a674a/Board%20Public.ics"
# 2 minutes
@check_interval 120_000
use GenServer
# Client
@spec start_link(Keyword.t()) :: :ignore | {:error, term()} | {:ok, pid()}
def start_link(_opts) do
GenServer.start_link(__MODULE__, {sys_now(), @check_interval})
end
@spec get_combined() :: {:ok, String.t()} | {:error, term()}
def get_combined() do
case :ets.lookup(__MODULE__, :all) do
[{:all, ics}] ->
{:ok, ics}
[] ->
case get_calendars() do
ics when is_binary(ics) -> {:ok, ics}
end
end
end
# Server
@impl true
def init(opts) do
{:ok, opts, {:continue, :init}}
end
@impl true
def handle_continue(:init, state) do
_tid = :ets.new(__MODULE__, [:named_table, :protected, {:write_concurrency, true}])
:ets.insert(__MODULE__, {:all, get_calendars()})
schedule_check(state)
{:noreply, state}
end
@impl true
def handle_info(:check, state) do
:ets.insert(__MODULE__, {:all, get_calendars()})
schedule_check(state)
{:noreply, state}
end
def handle_info(_, state), do: {:noreply, state}
# Private
defp get_calendars() do
wgs = Erlef.Groups.list_working_groups()
wgs
|> Enum.filter(fn wg -> not is_nil(wg.meta.public_calendar) end)
|> Enum.map(fn wg -> wg.meta.public_calendar end)
|> get_feeds()
end
defp get_feeds(links) do
[@board_ics | links]
|> Enum.reduce([], fn l, acc ->
case Erlef.HTTP.perform(:get, l, [], "", []) do
{:ok, res} ->
[res.body | acc]
_ ->
acc
end
end)
|> Enum.map(&split_and_trim/1)
|> combine_all()
end
defp split_and_trim(ics_str),
do: Enum.map(String.split(ics_str, "\n"), fn s -> String.trim(s) end)
defp combine_all(cals) do
cals
|> Enum.flat_map(&get_events_and_timezones/1)
|> Enum.flat_map(fn x -> x end)
|> Enum.reverse()
|> Enum.join("\n")
|> wrap()
end
defp wrap(body) do
"""
BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
METHOD:PUBLISH
PRODID:-//Erlef/1.0/EN
X-WR-CALNAME:ErlEF Public Calendars
#{body}
END:VCALENDAR
"""
end
defp get_events_and_timezones(lines), do: get_events_and_timezones(lines, [])
defp get_events_and_timezones([], acc), do: acc
defp get_events_and_timezones([<<"BEGIN:VTIMEZONE">> = line | lines], acc) do
{lines, timezone} = collect_timezone(lines, [line])
get_events_and_timezones(lines, [timezone | acc])
end
defp get_events_and_timezones([<<"BEGIN:VEVENT">> = line | lines], acc) do
{lines, event} = collect_event(lines, [line])
get_events_and_timezones(lines, [event | acc])
end
defp get_events_and_timezones([_ | lines], acc), do: get_events_and_timezones(lines, acc)
defp collect_event([<<"END:VEVENT">> = line | lines], acc), do: {lines, [line | acc]}
defp collect_event([line | lines], acc), do: collect_event(lines, [line | acc])
defp collect_timezone([<<"END:VTIMEZONE">> = line | lines], acc), do: {lines, [line | acc]}
defp collect_timezone([line | lines], acc), do: collect_timezone(lines, [line | acc])
defp schedule_check({start, interval}) do
:erlang.send_after(next_check(start, interval), self(), :check)
end
defp sys_now() do
:erlang.monotonic_time(:millisecond)
end
defp next_check(start, interval) do
interval - rem(sys_now() - start, interval)
end
end
|
lib/erlef/agenda.ex
| 0.592784 | 0.463444 |
agenda.ex
|
starcoder
|
defmodule Authoritex.TestCase do
@moduledoc """
Shared tests for Authoritex modules
`Authoritex.TestCase` ensures that an authority module implements the
`Authoritex` behvaiour and that all of its functions behave as expected.
To run the shared tests, `use Authoritex.TestCase, opts` within your
test module, where `opts` contains:
* `module` -- The module you're testing
* `code` -- The code returned by the module's `code/0` callback
* `description` -- The description returned by the module's `description/0` callback
* `test_uris` -- A list of URIs that should be resolvable by the module, referencing
the same resource
* `bad_uri` -- A URI that is in the correct format but does not point to a resource
* `expected` -- A keyword list containing the attributes of the resource referenced
by the `test_uris`.
* `search_result_term` -- A term or search query that will include the resource
referenced by the `test_uris` in its results
* `search_count_term` -- A term or search query that will produce at least two
pages of results
* `default_results` (optional) -- The default maximum number of results returned
by a search (default: `30`)
* `specified_results` (optional) -- A non-default number of results that can be
used for testing `search/2` (default: `50`)
```
See this package's test suite for detailed examples.
"""
use ExUnit.CaseTemplate
using(use_opts) do
quote bind_quoted: [
module: use_opts[:module],
code: use_opts[:code],
description: use_opts[:description],
test_uris: use_opts[:test_uris],
bad_uri: use_opts[:bad_uri],
expected_id: get_in(use_opts, [:expected, :id]),
expected_label: get_in(use_opts, [:expected, :label]),
expected_qualified_label: get_in(use_opts, [:expected, :qualified_label]),
expected_hint: get_in(use_opts, [:expected, :hint]),
expected_fetch_hint:
Keyword.get(
use_opts[:expected],
:fetch_hint,
get_in(use_opts, [:expected, :hint])
),
search_result_term: use_opts[:search_result_term],
search_count_term: use_opts[:search_count_term],
default_results: use_opts[:default_results] || 30,
explicit_results: use_opts[:explicit_results] || 50
] do
use ExUnit.Case, async: true
use ExVCR.Mock, adapter: ExVCR.Adapter.Hackney
test "implements the Authoritex behaviour" do
assert unquote(module).__info__(:attributes)
|> get_in([:behaviour])
|> Enum.member?(Authoritex)
end
test "can_resolve?/1" do
unquote(test_uris)
|> Enum.each(fn uri ->
assert unquote(module).can_resolve?(uri)
end)
refute unquote(module).can_resolve?("info:fake/uri")
end
describe "introspection" do
test "code/0" do
assert unquote(module).code() == unquote(code)
end
test "description/0" do
assert unquote(module).description() == unquote(description)
end
end
describe "fetch/1" do
test "success" do
use_cassette "#{unquote(code)}_fetch_success", match_requests_on: [:query] do
unquote(test_uris)
|> Enum.each(fn uri ->
assert unquote(module).fetch(uri) ==
{:ok,
%{
id: unquote(expected_id),
label: unquote(expected_label),
qualified_label: unquote(expected_qualified_label),
hint: unquote(expected_fetch_hint)
}}
end)
end
end
test "failure" do
use_cassette "#{unquote(code)}_fetch_failure", match_requests_on: [:query] do
assert unquote(module).fetch(unquote(bad_uri)) == {:error, 404}
end
end
end
describe "search/2" do
test "results" do
use_cassette "#{unquote(code)}_search_results", match_requests_on: [:query] do
with {:ok, results} <- unquote(module).search(unquote(search_count_term)) do
assert length(results) == unquote(default_results)
end
with {:ok, results} <-
unquote(module).search(unquote(search_count_term), unquote(explicit_results)) do
assert length(results) == unquote(explicit_results)
end
with {:ok, results} <- unquote(module).search(unquote(search_result_term)) do
assert Enum.member?(results, %{
id: unquote(expected_id),
label: unquote(expected_label),
hint: unquote(expected_hint)
})
end
end
end
test "no results" do
use_cassette "#{unquote(code)}_search_results_empty", match_requests_on: [:query] do
assert {:ok, []} = unquote(module).search("M1551ng")
end
end
end
end
end
end
|
test/support/authoritex_case.ex
| 0.905432 | 0.911022 |
authoritex_case.ex
|
starcoder
|
defmodule Record do
@moduledoc """
Module to work, define and import records.
Records are simply tuples where the first element is an atom:
iex> Record.record? { User, "jose", 27 }
true
This module provides conveniences for working with records at
compilation time, where compile-time field names are used to
manipulate the tuples, providing fast operations on top of
the tuples compact structure.
In Elixir, records are used in two situations:
1. To work with short, internal data. See `Inspect.Algebra`
implementation for a good example;
2. To interface with Erlang records;
The macros `defrecord/3` and `defrecordp/3` can be used to create
records while `extract/2` can be used to extract records from Erlang
files.
"""
@doc """
Extracts record information from an Erlang file.
Returns a quoted expression containing the fields as a list
of tuples. It expects the record name to be an atom and the
library path to be a string at expansion time.
## Examples
iex> Record.extract(:file_info, from_lib: "kernel/include/file.hrl")
[size: :undefined, type: :undefined, access: :undefined, atime: :undefined,
mtime: :undefined, ctime: :undefined, mode: :undefined, links: :undefined,
major_device: :undefined, minor_device: :undefined, inode: :undefined,
uid: :undefined, gid: :undefined]
"""
defmacro extract(name, opts) when is_atom(name) and is_list(opts) do
Macro.escape Record.Extractor.extract(name, opts)
end
@doc """
Checks if the given `data` is a record of `kind`.
This is implemented as a macro so it can be used in guard clauses.
## Examples
iex> record = { User, "jose", 27 }
iex> Record.record?(record, User)
true
"""
defmacro record?(data, kind) do
case __CALLER__.in_guard? do
true ->
quote do
is_tuple(unquote(data)) and tuple_size(unquote(data)) > 0
and :erlang.element(1, unquote(data)) == unquote(kind)
end
false ->
quote do
result = unquote(data)
is_tuple(result) and tuple_size(result) > 0
and :erlang.element(1, result) == unquote(kind)
end
end
end
@doc """
Checks if the given `data` is a record.
This is implemented as a macro so it can be used in guard clauses.
## Examples
iex> record = { User, "jose", 27 }
iex> Record.record?(record)
true
iex> tuple = {}
iex> Record.record?(tuple)
false
"""
defmacro record?(data) do
case __CALLER__.in_guard? do
true ->
quote do
is_tuple(unquote(data)) and tuple_size(unquote(data)) > 0
and is_atom(:erlang.element(1, unquote(data)))
end
false ->
quote do
result = unquote(data)
is_tuple(result) and tuple_size(result) > 0
and is_atom(:erlang.element(1, result))
end
end
end
@doc false
def defmacros(name, values, env, tag \\ nil) do
Record.Deprecated.defmacros(name, values, env, tag)
end
@doc false
def deftypes(values, types, env) do
Record.Deprecated.deftypes(values, types, env)
end
@doc false
def deffunctions(values, env) do
Record.Deprecated.deffunctions(values, env)
end
@doc """
Defines a set of macros to create and access a record.
The macros are going to have `name`, a tag (which defaults)
to the name if none is given, and a set of fields given by
`kv`.
## Examples
defmodule User do
Record.defrecord :user, [name: "José", age: "25"]
end
In the example above, a set of macros named `user` but with different
arities will be defined to manipulate the underlying record:
# To create records
user() #=> { :user, "José", 25 }
user(age: 26) #=> { :user, "José", 26 }
# To get a field from the record
user(record, :name) #=> "José"
# To update the record
user(record, age: 26) #=> { :user, "José", 26 }
By default, Elixir uses the record name as the first element of
the tuple (the tag). But it can be changed to something else:
defmodule User do
Record.defrecord :user, User, name: nil
end
user() #=> { User, nil }
"""
defmacro defrecord(name, tag \\ nil, kv) do
kv = Macro.escape(kv, unquote: true)
quote bind_quoted: [name: name, tag: tag, kv: kv] do
tag = tag || name
kv = Macro.expand(kv, __ENV__)
{ fields, _types } = Record.Backend.split_fields_and_types(:defrecord, kv)
fields = Macro.escape(fields)
defmacro(unquote(name)(args \\ [])) do
Record.Backend.access(unquote(tag), unquote(fields), args, __CALLER__)
end
defmacro(unquote(name)(record, args)) do
Record.Backend.access(unquote(tag), unquote(fields), record, args, __CALLER__)
end
end
end
@doc """
Same as `defrecord/3` but generates private macros.
"""
defmacro defrecordp(name, tag \\ nil, kv) do
kv = Macro.escape(kv, unquote: true)
quote bind_quoted: [name: name, tag: tag, kv: kv] do
tag = tag || name
kv = Macro.expand(kv, __ENV__)
{ fields, _types } = Record.Backend.split_fields_and_types(:defrecordp, kv)
fields = Macro.escape(fields)
defmacrop(unquote(name)(args \\ [])) do
Record.Backend.access(unquote(tag), unquote(fields), args, __CALLER__)
end
defmacrop(unquote(name)(record, args)) do
Record.Backend.access(unquote(tag), unquote(fields), record, args, __CALLER__)
end
end
end
end
|
lib/elixir/lib/record.ex
| 0.796886 | 0.687171 |
record.ex
|
starcoder
|
require Utils
require Program
defmodule D2 do
@moduledoc """
--- Day 2: 1202 Program Alarm ---
On the way to your gravity assist around the Moon, your ship computer beeps angrily about a "1202 program alarm". On the radio, an Elf is already explaining how to handle the situation: "Don't worry, that's perfectly norma--" The ship computer bursts into flames.
You notify the Elves that the computer's magic smoke seems to have escaped. "That computer ran Intcode programs like the gravity assist program it was working on; surely there are enough spare parts up there to build a new Intcode computer!"
An Intcode program is a list of integers separated by commas (like 1,0,0,3,99). To run one, start by looking at the first integer (called position 0). Here, you will find an opcode - either 1, 2, or 99. The opcode indicates what to do; for example, 99 means that the program is finished and should immediately halt. Encountering an unknown opcode means something went wrong.
Opcode 1 adds together numbers read from two positions and stores the result in a third position. The three integers immediately after the opcode tell you these three positions - the first two indicate the positions from which you should read the input values, and the third indicates the position at which the output should be stored.
For example, if your Intcode computer encounters 1,10,20,30, it should read the values at positions 10 and 20, add those values, and then overwrite the value at position 30 with their sum.
Opcode 2 works exactly like opcode 1, except it multiplies the two inputs instead of adding them. Again, the three integers after the opcode indicate where the inputs and outputs are, not their values.
Once you're done processing an opcode, move to the next one by stepping forward 4 positions.
Once you have a working computer, the first step is to restore the gravity assist program (your puzzle input) to the "1202 program alarm" state it had just before the last computer caught fire. To do this, before running the program, replace position 1 with the value 12 and replace position 2 with the value 2. What value is left at position 0 after the program halts?
--- Part Two ---
"Good, the new computer seems to be working correctly! Keep it nearby during this mission - you'll probably use it again. Real Intcode computers support many more features than your new one, but we'll let you know what they are as you need them."
"However, your current priority should be to complete your gravity assist around the Moon. For this mission to succeed, we should settle on some terminology for the parts you've already built."
Intcode programs are given as a list of integers; these values are used as the initial state for the computer's memory. When you run an Intcode program, make sure to start by initializing memory to the program's values. A position in memory is called an address (for example, the first value in memory is at "address 0").
Opcodes (like 1, 2, or 99) mark the beginning of an instruction. The values used immediately after an opcode, if any, are called the instruction's parameters. For example, in the instruction 1,2,3,4, 1 is the opcode; 2, 3, and 4 are the parameters. The instruction 99 contains only an opcode and has no parameters.
The address of the current instruction is called the instruction pointer; it starts at 0. After an instruction finishes, the instruction pointer increases by the number of values in the instruction; until you add more instructions to the computer, this is always 4 (1 opcode + 3 parameters) for the add and multiply instructions. (The halt instruction would increase the instruction pointer by 1, but it halts the program instead.)
"With terminology out of the way, we're ready to proceed. To complete the gravity assist, you need to determine what pair of inputs produces the output 19690720."
The inputs should still be provided to the program by replacing the values at addresses 1 and 2, just like before. In this program, the value placed in address 1 is called the noun, and the value placed in address 2 is called the verb. Each of the two input values will be between 0 and 99, inclusive.
Once the program has halted, its output is available at address 0, also just like before. Each time you try a pair of inputs, make sure you first reset the computer's memory to the values in the program (your puzzle input) - in other words, don't reuse memory from a previous attempt.
Find the input noun and verb that cause the program to produce the output 19690720. What is 100 * noun + verb? (For example, if noun=12 and verb=2, the answer would be 1202.)
"""
@behaviour Day
defp execute(input) do
%Program{state: %{0 => result}} = input |> Program.new() |> Program.run()
result
end
defp modify(list, value_at_index_1, value_at_index_2) do
list
|> List.replace_at(1, value_at_index_1)
|> List.replace_at(2, value_at_index_2)
end
def solve(input) do
input =
input
|> Utils.to_ints()
part_1_input = modify(input, 12, 2)
part_1 = execute(part_1_input)
part_2_base = input |> modify(0, 0) |> execute
part_2_dx = (input |> modify(1, 0) |> execute) - part_2_base
part_2_dy = (input |> modify(0, 1) |> execute) - part_2_base
part_2_desired_outcome = 19_690_720
part_2_desired_x = div(part_2_desired_outcome - part_2_base, part_2_dx)
part_2_desired_y =
div(part_2_desired_outcome - part_2_base - part_2_desired_x * part_2_dx, part_2_dy)
part_2 = 100 * part_2_desired_x + part_2_desired_y
{
part_1,
part_2
}
end
end
|
lib/days/02.ex
| 0.718496 | 0.812979 |
02.ex
|
starcoder
|
defmodule Quarry.Filter do
@moduledoc false
require Ecto.Query
alias Quarry.{Join, From}
@type filter :: %{optional(atom()) => String.t() | number() | filter()}
@spec build({Ecto.Query.t(), [Quarry.error()]}, Quarry.filter(), [atom()]) ::
{Ecto.Query.t(), [Quarry.error()]}
def build({query, errors}, filters, load_path \\ []) do
root_binding = From.get_root_binding(query)
schema = From.get_root_schema(query)
filter({query, errors}, filters,
binding: root_binding,
schema: schema,
path: [],
load_path: load_path
)
end
defp filter(acc, filters, state) do
Enum.reduce(filters, acc, &maybe_filter_field(&1, &2, state))
end
defp maybe_filter_field({field_name, value} = entry, {query, errors}, state) do
fields = state[:schema].__schema__(:fields)
association = state[:schema].__schema__(:associations)
if (is_map(value) && field_name in association) || field_name in fields do
filter_field(entry, {query, errors}, state)
else
{query, [build_error(field_name, state) | errors]}
end
end
defp build_error(field_name, state) do
%{
type: :filter,
path: Enum.reverse([field_name | state[:path]]),
load_path: Enum.reverse(state[:load_path]),
message: "Quarry couldn't find field \"#{field_name}\" on Ecto schema \"#{state[:schema]}\""
}
end
defp filter_field({field_name, child_filter}, acc, state) when is_map(child_filter) do
child_schema = state[:schema].__schema__(:association, field_name).related
state =
state
|> Keyword.put(:schema, child_schema)
|> Keyword.update!(:path, &List.insert_at(&1, 0, field_name))
filter(acc, child_filter, state)
end
defp filter_field({field_name, values}, {query, errors}, state) when is_list(values) do
{query, join_binding} = Join.join_dependencies(query, state[:binding], state[:path])
query = Ecto.Query.where(query, field(as(^join_binding), ^field_name) in ^values)
{query, errors}
end
defp filter_field({field_name, value}, acc, state) when not is_tuple(value) do
filter_field({field_name, {:eq, value}}, acc, state)
end
defp filter_field({field_name, {operation, value}}, {query, errors}, state) do
query
|> Join.join_dependencies(state[:binding], state[:path])
|> filter_by_operation(field_name, operation, value)
|> then(&{&1, errors})
end
defp filter_by_operation({query, join_binding}, field_name, :eq, value) do
Ecto.Query.where(query, field(as(^join_binding), ^field_name) == ^value)
end
defp filter_by_operation({query, join_binding}, field_name, :lt, value) do
Ecto.Query.where(query, field(as(^join_binding), ^field_name) < ^value)
end
defp filter_by_operation({query, join_binding}, field_name, :gt, value) do
Ecto.Query.where(query, field(as(^join_binding), ^field_name) > ^value)
end
defp filter_by_operation({query, join_binding}, field_name, :lte, value) do
Ecto.Query.where(query, field(as(^join_binding), ^field_name) <= ^value)
end
defp filter_by_operation({query, join_binding}, field_name, :gte, value) do
Ecto.Query.where(query, field(as(^join_binding), ^field_name) >= ^value)
end
defp filter_by_operation({query, join_binding}, field_name, :starts_with, value) do
Ecto.Query.where(query, ilike(field(as(^join_binding), ^field_name), ^"#{value}%"))
end
defp filter_by_operation({query, join_binding}, field_name, :ends_with, value) do
Ecto.Query.where(query, ilike(field(as(^join_binding), ^field_name), ^"%#{value}"))
end
end
|
lib/quarry/filter.ex
| 0.717111 | 0.428473 |
filter.ex
|
starcoder
|
defmodule Retrieval.PatternParser do
@moduledoc """
Parses and verifies patterns that can be matched against the trie data structure.
"""
# Enter initial state
def parse(pattern), do: parse(pattern, 1, [])
# Accept wildcard
def parse(<<"*", rest :: binary>>, col, acc) do
parse(rest, col + 1, [:wildcard|acc])
end
# Jump to group state (exclusion)
def parse(<<"[^", rest :: binary>>, col, acc) do
parse_gr(rest, col + 1, %{}, acc, :exclusion, col)
end
# Jump to group state (inclusion)
def parse(<<"[", rest :: binary>>, col, acc) do
parse_gr(rest, col + 1, %{}, acc, :inclusion, col)
end
# Jump to capture state
def parse(<<"{", rest :: binary>>, col, acc) do
parse_cap(rest, col + 1, acc, <<>>, col)
end
# Pattern consumed, return parsed pattern
def parse(<<>>, _col, acc), do: Enum.reverse(acc)
# Accept character
def parse(binary, col, acc) do
case parse_escape(binary, col) do
{:escape, ch, rest} ->
parse(rest, col + 3, [{:character, ch}|acc])
{:character, ch, rest} ->
parse(rest, col + 1, [{:character, ch}|acc])
end
end
# Accept group
defp parse_gr(<<"]", rest :: binary>>, col, group, acc, type, _start) do
parse(rest, col + 1, [{type, group}|acc])
end
# Detect dangling group
defp parse_gr(<<>>, _col, _group, _acc, type, start) do
dangling_error("#{type}", start, "]")
end
# Detect group character
defp parse_gr(binary, col, group, acc, type, start) do
case parse_escape(binary, col) do
{:escape, ch, rest} ->
group = Map.put(group, ch, ch)
parse_gr(rest, col + 3, group, acc, type, start)
{:character, ch, rest} ->
group = Map.put(group, ch, ch)
parse_gr(rest, col + 1, group, acc, type, start)
unescaped_symbol_error ->
unescaped_symbol_error
end
end
# Accept capture or return unnamed capture error
defp parse_cap(<<"}", rest :: binary>>, col, acc, name, start) do
case name do
<<>> -> unnamed_capture_error(start, "capture cannot be empty")
_ -> parse(rest, col + 1, [{:capture, name}|acc])
end
end
# Jump to capture group (exclusion)
defp parse_cap(<<"[^", rest :: binary>>, col, acc, name, start) do
case name do
<<>> -> unnamed_capture_error(start, "capture must be named before group")
_ -> parse_cap_gr(rest, col + 1, acc, name, %{}, :exclusion, {col, start})
end
end
# Jump to capture group (inclusion)
defp parse_cap(<<"[", rest :: binary>>, col, acc, name, start) do
case name do
<<>> -> unnamed_capture_error(start, "capture must be named before group")
_ -> parse_cap_gr(rest, col + 1, acc, name, %{}, :inclusion, {col, start})
end
end
# Detect dangling capture
defp parse_cap(<<>>, _col, _acc, _name, start) do
dangling_error("capture", start, "}")
end
# Detect capture name character
defp parse_cap(binary, col, acc, name, start) do
case parse_escape(binary, col) do
{:escape, ch, rest} ->
parse_cap(rest, col + 3, acc, name <> <<ch>>, start)
{:character, ch, rest} ->
parse_cap(rest, col + 1, acc, name <> <<ch>>, start)
unescaped_symbol_error ->
unescaped_symbol_error
end
end
# Accept capture group
defp parse_cap_gr(<<"]}", rest :: binary>>, col, acc, name, group, type, _start) do
parse(rest, col + 2, [{:capture, name, type, group}|acc])
end
# Detect nontrailing group or dangling capture
defp parse_cap_gr(<<"]", rest :: binary>>, _col, _acc, _name, _group, type, {start, cap}) do
case rest do
<<>> -> dangling_error("capture", cap, "}")
_ -> nontrailing_group_error(start, type)
end
end
defp parse_cap_gr(<<>>, _col, _acc, _name, _group, type, {start, _}) do
dangling_error("#{type}", start, "]")
end
# Detect capture group character
defp parse_cap_gr(binary, col, acc, name, group, type, start) do
case parse_escape(binary, col) do
{:escape, ch, rest} ->
group = Map.put(group, ch, ch)
parse_cap_gr(rest, col + 3, acc, name, group, type, start)
{:character, ch, rest} ->
group = Map.put(group, ch, ch)
parse_cap_gr(rest, col + 1, acc, name, group, type, start)
unescaped_symbol_error ->
unescaped_symbol_error
end
end
# Detect escaped and unescaped symbols
# 94 = ^
# 42 = *
# 91 = [
# 93 = [
# 123 = {
# 125 = }
# ... Emacs won't shut up if I use the ?c syntax with brackets and
# I have no desire to fight with it. This will do.
defp parse_escape(<<"\\^", rest :: binary>>, _col), do: {:escape, 94, rest}
defp parse_escape(<<"\\*", rest :: binary>>, _col), do: {:escape, 42, rest}
defp parse_escape(<<"\\[", rest :: binary>>, _col), do: {:escape, 91, rest}
defp parse_escape(<<"\\]", rest :: binary>>, _col), do: {:escape, 93, rest}
defp parse_escape(<<"\\{", rest :: binary>>, _col), do: {:escape, 123, rest}
defp parse_escape(<<"\\}", rest :: binary>>, _col), do: {:escape, 125, rest}
defp parse_escape(<<"^", _rest :: binary>>, col), do: unescaped_symbol_error("^", col)
defp parse_escape(<<"*", _rest :: binary>>, col), do: unescaped_symbol_error("*", col)
defp parse_escape(<<"[", _rest :: binary>>, col), do: unescaped_symbol_error("[", col)
defp parse_escape(<<"]", _rest :: binary>>, col), do: unescaped_symbol_error("]", col)
defp parse_escape(<<"{", _rest :: binary>>, col), do: unescaped_symbol_error("{", col)
defp parse_escape(<<"}", _rest :: binary>>, col), do: unescaped_symbol_error("}", col)
defp parse_escape(<<ch, rest :: binary>>, _col), do: {:character, ch, rest}
# Return dangling symbol error
defp dangling_error(type, start_col, expected) do
{:error, "Dangling group (#{type}) starting at column #{start_col}, expecting #{expected}"}
end
# Return unescaped symbol error
defp unescaped_symbol_error(symbol, col) do
{:error, "Unescaped symbol #{symbol} at column #{col}"}
end
# Return unnamed capture error
defp unnamed_capture_error(start_col, context) do
{:error, "Unnamed capture starting at column #{start_col}, #{context}"}
end
# Return trailing group error
defp nontrailing_group_error(start_col, type) do
{:error, "Group (#{type}) must in the tail position of capture starting at column #{start_col}"}
end
end
|
lib/retrieval/patternparser.ex
| 0.685002 | 0.469642 |
patternparser.ex
|
starcoder
|
defmodule ChartPatternDetection do
alias Decimal, as: D
alias List, as: L
alias Enum, as: E
alias Map, as: M
defp get_pairs(list),
do: E.zip(E.drop(list, -1), E.drop(list, 1))
defp normalize_one(val, min, max) when is_integer(val) and is_integer(min) and is_integer(max),
do: normalize_one(D.new(val), D.new(min), D.new(max))
defp normalize_one(val = %D{}, max = %D{}, min = %D{}),
do: D.div(D.sub(val, min), D.sub(max, min))
def reversals(bar_list) when is_list(bar_list) do
get_pairs(bar_list)
|> E.reduce([], fn {prev, next}, matches ->
new_lo = D.lt?(M.get(next, :l), M.get(prev, :l))
new_hi = D.gt?(M.get(next, :h), M.get(prev, :h))
reversal =
case {new_lo, new_hi} do
{true, false} -> :lo
{false, true} -> :hi
_ -> nil
end
case {L.first(matches), reversal} do
{_, nil} -> matches
{nil, :lo} -> [{:lo, next}]
{nil, :hi} -> [{:hi, next}]
{{:lo, _}, :lo} -> L.replace_at(matches, 0, {:lo, next})
{{:lo, _}, :hi} -> [{:hi, next} | matches]
{{:hi, _}, :hi} -> L.replace_at(matches, 0, {:hi, next})
{{:hi, _}, :lo} -> [{:lo, next} | matches]
end
end)
end
def normalize(reversals) do
{max_h, min_l} =
E.reduce(reversals, {"NaN", "NaN"}, fn
{:hi, %{h: h = %D{}}}, {max_h, min_l} -> {D.max(max_h, h), min_l}
{:lo, %{l: l = %D{}}}, {max_h, min_l} -> {max_h, D.min(min_l, l)}
end)
{_, %{t: max_t}} = L.first(reversals)
{_, %{t: min_t}} = L.last(reversals)
E.map(reversals, fn {side, %{h: h, l: l, t: t}} ->
{side,
%{
h: normalize_one(h, max_h, min_l),
l: normalize_one(l, max_h, min_l),
t: normalize_one(t, max_t, min_t)
}}
end)
end
def get_subsets(results_list, min_l) when is_integer(min_l) and is_list(results_list) do
len = length(results_list)
if min_l < len and min_l > 1 do
{:ok, E.map(len..min_l, fn num -> E.take(results_list, num) end)}
else
{:error, "cannot scan between minimum #{min_l} and set of #{len} items"}
end
end
def detect(data_set, rules_list) when is_list(rules_list) and is_list(data_set) do
E.map(rules_list, fn rule_fn when is_function(rule_fn) -> rule_fn.(data_set) end)
|> E.all?(fn test_result -> test_result == true end)
end
end
|
lib/chart_pattern_detection.ex
| 0.514644 | 0.440349 |
chart_pattern_detection.ex
|
starcoder
|
defmodule Alice.Conn do
@moduledoc """
Alice.Conn defines a struct that is used throughout alice to hold state
during the lifetime of a message handling.
An Alice.Conn struct contains 3 things: `message`, the incoming message
that is currently being handled; `slack`, a data structure from the Slack
library that holds all the information about the Slack instance; and `state`,
which is the state of the bot that is persisted between messages. State
defaults to an in-memory Map, but may be configured to be backed by Redis.
The Alice.Conn module also contains several helper functions that operate on
Conn structs.
"""
alias Alice.Conn
defstruct([:message, :slack, :state])
@doc """
Convenience function to make a new `Alice.Conn` struct
"""
def make({message, slack, state}) do
%Conn{message: message, slack: slack, state: state}
end
def make(message, slack, state \\ %{}) do
make({message, slack, state})
end
@doc """
Returns a boolean depending on whether or
not the incoming message is a command
"""
def command?(conn = %Conn{}) do
String.contains?(conn.message.text, "<@#{conn.slack.me.id}>")
end
@doc """
Returns the name of the user for the incoming message
"""
def user(conn = %Conn{}) do
user_data(conn)["name"]
end
@doc """
Returns the timezone offset for the user
"""
def tz_offset(conn = %Conn{}) do
user_data(conn)["tz_offset"]
end
@doc """
Returns the timestamp of the message
"""
def timestamp(conn = %Conn{}) do
conn.message.ts
end
@doc """
Builds a string to use as an @reply back to the user who sent the message
"""
def at_reply_user(conn = %Conn{}) do
"<@#{user_data(conn)["id"]}>"
end
defp user_data(%Conn{message: %{user: id}, slack: %{users: users}}) do
Enum.find(users, &(&1["id"] == id))
end
@doc """
Used internally to add the regex captures to the `message`
"""
def add_captures(conn = %Conn{}, pattern) do
conn.message
|> Map.put(:captures, Regex.run(pattern, conn.message.text))
|> make(conn.slack, conn.state)
end
@doc """
Get the last capture from the `conn`
"""
def last_capture(%Conn{message: %{captures: captures}}) do
captures |> Enum.reverse() |> hd
end
@doc """
Used internally to sanitize the incoming message text
"""
def sanitize_message(conn = %Conn{message: message = %{text: text}}) do
message
|> Map.put(:original_text, text)
|> Map.put(:text, sanitize_text(text))
|> make(conn.slack, conn.state)
end
defp sanitize_text(text) do
text
|> remove_smart_quotes
|> remove_formatted_emails
|> remove_formatted_urls
end
defp remove_smart_quotes(text) do
text
|> String.replace(~s(“), ~s("))
|> String.replace(~s(”), ~s("))
|> String.replace(~s(’), ~s('))
end
defp remove_formatted_emails(text) do
text |> String.replace(~r/<mailto:([^|]+)[^\s]*>/i, "\\1")
end
defp remove_formatted_urls(text) do
text |> String.replace(~r/<([^|@]+)([^\s]*)?>/, "\\1")
end
@doc """
Used internally to get namespaced state
"""
def get_state_for(conn = %Conn{}, namespace, default \\ nil) do
state_backend().get(conn.state, namespace, default)
end
@doc """
Used internally to put namespaced state
"""
def put_state_for(conn = %Conn{}, namespace, value) do
new_state = state_backend().put(conn.state, namespace, value)
make(conn.message, conn.slack, new_state)
end
@doc """
Used internally to delete namespaced state
"""
def delete_state_for(conn = %Conn{}, namespace) do
new_state = state_backend().delete(conn.state, namespace)
make(conn.message, conn.slack, new_state)
end
defp state_backend do
case Application.get_env(:alice, :state_backend) do
:redis -> Alice.StateBackends.Redis
_other -> Alice.StateBackends.Memory
end
end
end
|
lib/alice/conn.ex
| 0.830009 | 0.550184 |
conn.ex
|
starcoder
|
defmodule ExNotification do
@moduledoc """
An elixir client for the notification system APIs used by the Australian, British, and Canadian governments.
## Installation
The package can be installed by adding `ex_notification` to your list of dependencies in `mix.exs`:
```elixir
def deps do
[
{:ex_notification, "~> 0.1.0"}
]
end
```
## Configuration
Set the following configuration values in your `config.exs`, setting the `api` value to your API endpoint and the `key` value to your API key. Ex:
```
config :ex_notification, api: "https://localhost"
config :ex_notification, key: "<KEY>"
```
## Documentation
Documentation can be found at [https://hexdocs.pm/ex_notification](https://hexdocs.pm/ex_notification).
## License
MIT
"""
alias ExNotification.Client
@doc """
Get a notification by ID
You can filter the returned messages by including the following optional arguments in the URL:
- `template_type`
- `status`
- `reference`
- `older_than`
### Arguments
You can omit any of these arguments to ignore these filters.
#### template_type (optional)
You can filter by:
* `email`
* `sms`
* `letter`
#### status (optional)
| status | description | text | email |
| :--- | :--- | :--- | :--- |
| created | Notification has placed the message in a queue, ready to be sent to the provider. It should only remain in this state for a few seconds.| Yes | Yes |
| sending | Notification has sent the message to the provider. The provider will try to deliver the message to the recipient. Notification is waiting for delivery information. | Yes | Yes |
| delivered | The message was successfully delivered | Yes | Yes |
| sent / sent internationally | The message was sent to an international number. The mobile networks in some countries do not provide any more delivery information.| Yes | |
| pending | Notification is waiting for more delivery information.<br>Notification received a callback from the provider but the recipient’s device has not yet responded. Another callback from the provider determines the final status of the notification.| Yes | |
| failed | This returns all failure statuses:<br>- permanent-failure<br>- temporary-failure<br>- technical-failure | Yes | Yes |
| permanent-failure | The provider could not deliver the message because the email address or phone number was wrong. You should remove these email addresses or phone numbers from your database. You’ll still be charged for text messages to numbers that do not exist. | Yes | Yes |
| temporary-failure | The provider could not deliver the message after trying for 72 hours. This can happen when the recipient’s inbox is full or their phone is off. You can try to send the message again. You’ll still be charged for text messages to phones that are not accepting messages. | Yes | Yes |
| technical-failure | Email / Text: Your message was not sent because there was a problem between Notify and the provider.<br>You’ll have to try sending your messages again. You will not be charged for text messages that are affected by a technical failure. | Yes | Yes |
#### reference (optional)
An identifier you can create if necessary. This reference identifies a single notification or a batch of notifications. It must not contain any personal information such as name or postal address. For example:
```
"reference": "STRING"
```
#### older_than (optional)
Input the ID of a notification into this argument. If you use this argument, the method returns the next 250 received notifications older than the given ID.
```
"older_than":"740e5834-3a29-46b4-9a6f-16142fde533a"
```
If you leave out this argument, the method returns the most recent 250 notifications.
The client only returns notifications that are 7 days old or newer. If the notification specified in this argument is older than 7 days, the client returns an empty response.
### Response
If the request is successful, the response body is `json` and the status code is `200`.
```
{
"id": "740e5834-3a29-46b4-9a6f-16142fde533a", # required string - notification ID
"reference": "STRING", # optional string - client reference
"email_address": "<EMAIL>", # required string for emails
"phone_number": "+447900900123", # required string for text messages
"type": "sms / email", # required string
"status": "sending / delivered / permanent-failure / temporary-failure / technical-failure", # required string
"template": {
"version": 1
"id": 'f33517ff-2a88-4f6e-b855-c550268ce08a' # required string - template ID
"uri": "/v2/template/{id}/{version}", # required
},
"body": "STRING", # required string - body of notification
"subject": "STRING" # required string for email - subject of email
"created_at": "STRING", # required string - date and time notification created
"created_by_name": "STRING", # optional string - name of the person who sent the notification if sent manually
"sent_at": " STRING", # optional string - date and time notification sent to provider
"completed_at": "STRING" # optional string - date and time notification delivered or failed
}
```
"""
def get_notification(
id,
template_type \\ nil,
status \\ nil,
reference \\ nil,
older_than \\ nil
) do
case Client.get(
"/v2/notifications/#{id}",
query: %{
template_type: template_type,
status: status,
reference: reference,
older_than: older_than
}) do
{:ok, %{body: body}} -> body
{:error, error} -> {:error, error}
end
end
@doc """
Get a batch of notifications
This will return all your messages with statuses. They will display in pages of up to 250 messages each.
You can only get the status of messages that are 7 days old or newer.
You can filter the returned messages by including the following optional arguments in the URL:
- `template_type`
- `status`
- `reference`
- `older_than`
### Arguments
You can omit any of these arguments to ignore these filters.
#### template_type (optional)
You can filter by:
* `email`
* `sms`
* `letter`
#### status (optional)
| status | description | text | email |
| :--- | :--- | :--- | :--- |
| created | Notification has placed the message in a queue, ready to be sent to the provider. It should only remain in this state for a few seconds.| Yes | Yes |
| sending | Notification has sent the message to the provider. The provider will try to deliver the message to the recipient. Notification is waiting for delivery information. | Yes | Yes |
| delivered | The message was successfully delivered | Yes | Yes |
| sent / sent internationally | The message was sent to an international number. The mobile networks in some countries do not provide any more delivery information.| Yes | |
| pending | Notification is waiting for more delivery information.<br>Notification received a callback from the provider but the recipient’s device has not yet responded. Another callback from the provider determines the final status of the notification.| Yes | |
| failed | This returns all failure statuses:<br>- permanent-failure<br>- temporary-failure<br>- technical-failure | Yes | Yes |
| permanent-failure | The provider could not deliver the message because the email address or phone number was wrong. You should remove these email addresses or phone numbers from your database. You’ll still be charged for text messages to numbers that do not exist. | Yes | Yes |
| temporary-failure | The provider could not deliver the message after trying for 72 hours. This can happen when the recipient’s inbox is full or their phone is off. You can try to send the message again. You’ll still be charged for text messages to phones that are not accepting messages. | Yes | Yes |
| technical-failure | Email / Text: Your message was not sent because there was a problem between Notify and the provider.<br>You’ll have to try sending your messages again. You will not be charged for text messages that are affected by a technical failure. | Yes | Yes |
#### reference (optional)
An identifier you can create if necessary. This reference identifies a single notification or a batch of notifications. It must not contain any personal information such as name or postal address. For example:
```
"reference": "STRING"
```
#### older_than (optional)
Input the ID of a notification into this argument. If you use this argument, the method returns the next 250 received notifications older than the given ID.
```
"older_than":"740e5834-3a29-46b4-9a6f-16142fde533a"
```
If you leave out this argument, the method returns the most recent 250 notifications.
The client only returns notifications that are 7 days old or newer. If the notification specified in this argument is older than 7 days, the client returns an empty response.
### Response
If the request is successful, the response body is `json` and the status code is `200`.
```
{
"notifications": [
{
"id": "740e5834-3a29-46b4-9a6f-16142fde533a", # required string - notification ID
"reference": "STRING", # optional string - client reference
"email_address": "<EMAIL>", # required string for emails
"phone_number": "+447900900123", # required string for text messages
"type": "sms / email", # required string
"status": "sending / delivered / permanent-failure / temporary-failure / technical-failure", # required string
"template": {
"version": 1
"id": 'f33517ff-2a88-4f6e-b855-c550268ce08a' # required string - template ID
"uri": "/v2/template/{id}/{version}", # required
},
"body": "STRING", # required string - body of notification
"subject": "STRING" # required string for email - subject of email
"created_at": "STRING", # required string - date and time notification created
"created_by_name": "STRING", # optional string - name of the person who sent the notification if sent manually
"sent_at": " STRING", # optional string - date and time notification sent to provider
"completed_at": "STRING" # optional string - date and time notification delivered or failed
},
…
],
"links": {
"current": "/notifications?template_type=sms&status=delivered",
"next": "/notifications?other_than=last_id_in_list&template_type=sms&status=delivered"
}
}
```
"""
def get_notifications(
template_type \\ nil,
status \\ nil,
reference \\ nil,
older_than \\ nil
) do
case Client.get(
"/v2/notifications",
query: %{
template_type: template_type,
status: status,
reference: reference,
older_than: older_than
}) do
{:ok, %{body: body}} -> body
{:error, error} -> {:error, error}
end
end
@doc """
Send email message.
### Arguments
#### email_address (required)
The email address of the recipient.
#### template_id (required)
Sign in to [Notification](https://notification.alpha.canada.ca) and go to the __Templates__ page to find the template ID.
#### personalisation (optional)
If a template has placeholder fields for personalised information such as name or reference number, you need to provide their values in a dictionary with key value pairs. For example:
```
"personalisation": %{
"first_name": "Amala",
"application_date": "2018-01-01",
}
```
You can leave out this argument if a template does not have any placeholder fields for personalised information.
#### reference (optional)
An identifier you can create if necessary. This reference identifies a single notification or a batch of notifications. It must not contain any personal information such as name or postal address. For example:
```
"reference": "STRING"
```
You can leave out this argument if you do not have a reference.
#### email_reply_to_id (optional)
This is an email reply-to address specified by you to receive replies from your users. Your service cannot go live until you set up at least one of these email addresses. To set up:
1. Sign into your Notification account.
1. Go to __Settings__.
1. If you need to change to another service, select __Switch service__ in the top right corner of the screen and select the correct one.
1. Go to the Email section and select __Manage__ on the __Email reply-to addresses__ row.
1. Select __Change__ to specify the email address to receive replies, and select __Save__.
For example:
```
"email_reply_to_id": "8e222534-7f05-4972-86e3-17c5d9f894e2"
```
You can leave out this argument if your service only has one email reply-to address, or you want to use the default email address.
## Send a file by email
Send files without the need for email attachments.
This is an invitation-only feature. [Contact the Notification team](https://notification.alpha.canada.ca/support/ask-question-give-feedback) to enable this function for your service.
To send a file by email, add a placeholder field to the template then upload a file. The placeholder field will contain a secure link to download the file.
#### Add a placeholder field to the template
1. Sign in to [Notification](https://notification.alpha.canada.ca/).
1. Go to the __Templates__ page and select the relevant email template.
1. Add a placeholder field to the email template using double brackets. For example:
"Download your file at: ((link_to_document))"
#### Upload your file
The file you upload must be a PDF file smaller than 2MB. You’ll need to convert the file into a string that is base64 encoded.
Pass the file object as a value into the personalisation argument. For example:
```
"personalisation":{
"first_name": "Amala",
"application_date": "2018-01-01",
"link_to_document": "file as base64 encoded string",
}
```
### Response
If the request to the client is successful, the client returns a `dict`:
```
{
"id": "740e5834-3a29-46b4-9a6f-16142fde533a",
"reference": "STRING",
"content": {
"subject": "SUBJECT TEXT",
"body": "MESSAGE TEXT",
"from_email": "SENDER EMAIL"
},
"uri": "https://api.notification.alpha.canada.ca/v2/notifications/740e5834-3a29-46b4-9a6f-16142fde533a",
"template": {
"id": "f33517ff-2a88-4f6e-b855-c550268ce08a",
"version": 1,
"uri": "https://api.notification.alpha.canada.ca/v2/template/f33517ff-2a88-4f6e-b855-c550268ce08a"
}
}
```
"""
def send_email(
email_address,
template_id,
personalisation \\ %{},
reference \\ nil,
email_reply_to_id \\ nil)
do
case Client.post(
"/v2/notifications/email",
%{
email_address: email_address,
template_id: template_id,
personalisation: personalisation,
reference: reference,
email_reply_to_id: email_reply_to_id
})
do
{:ok, %{body: body}} -> body
{:error, error} -> {:error, error}
end
end
@doc """
Send sms message.
#### phone_number (required)
The phone number of the recipient of the text message. This can be a Canadian or international number.
#### template_id (required)
Sign in to [Notification](https://notification.alpha.canada.da/) and go to the __Templates__ page to find the template ID.
#### personalisation (optional)
If a template has placeholder fields for personalised information such as name or reference number, you must provide their values in a dictionary with key value pairs. For example:
```
"personalisation": {
"first_name": "Amala",
"application_date": "2018-01-01",
}
```
You can leave out this argument if a template does not have any placeholder fields for personalised information.
#### reference (optional)
An identifier you can create if necessary. This reference identifies a single notification or a batch of notifications. It must not contain any personal information such as name or postal address. For example:
```
"reference": "STRING"
```
You can leave out this argument if you do not have a reference.
#### sms_sender_id (optional)
A unique identifier of the sender of the text message notification. You can find this information on the __Text Message sender__ settings screen:
1. Sign into your Notification account.
1. Go to __Settings__.
1. If you need to change to another service, select __Switch service__ in the top right corner of the screen and select the correct one.
1. Go to the __Text Messages__ section and select __Manage__ on the __Text Message sender__ row.
You can then either:
- copy the sender ID that you want to use and paste it into the method
- select __Change__ to change the default sender that the service will use, and select __Save__
```
"sms_sender_id": "8e222534-7f05-4972-86e3-17c5d9f894e2"
```
You can leave out this argument if your service only has one text message sender, or if you want to use the default sender.
### Response
If the request is successful, the response body is `json` with a status code of `201`:
```
{
"id": "740e5834-3a29-46b4-9a6f-16142fde533a",
"reference": "STRING",
"content": {
"body": "MESSAGE TEXT",
"from_number": "SENDER"
},
"uri": "https://api.notification.alpha.canada.ca/v2/notifications/740e5834-3a29-46b4-9a6f-16142fde533a",
"template": {
"id": "f33517ff-2a88-4f6e-b855-c550268ce08a",
"version": 1,
"uri": "https://api.notification.alpha.canada.ca/v2/template/ceb50d92-100d-4b8b-b559-14fa3b091cd"
}
}
```
If you are using the test API key, all your messages will come back with a `delivered` status.
All messages sent using the team and whitelist or live keys will appear on your dashboard.
"""
def send_sms(
phone_number,
template_id,
personalisation \\ %{},
reference \\ nil,
sms_sender_id \\ nil)
do
case Client.post(
"/v2/notifications/sms",
%{
phone_number: phone_number,
template_id: template_id,
personalisation: personalisation,
reference: reference,
sms_sender_id: sms_sender_id
})
do
{:ok, %{body: body}} -> body
{:error, error} -> {:error, error}
end
end
end
|
lib/ex_notification.ex
| 0.916563 | 0.876819 |
ex_notification.ex
|
starcoder
|
defmodule Indicado.MACD do
@moduledoc """
This is the MACD module used for calculating Moving Average Convergence Divergence
"""
@doc """
Calculates MACD for the list.
Returns list of map `[{macd: x, signal: y}]` or `{:error, reason}`
- `macd` represents macd calculation
- `signal` represents signal line
## Examples
iex> Indicado.MACD.eval([10, 15, 20, 30, 35, 40, 50], 2, 4, 3)
{:ok, [%{macd: 1.333333333333334, signal: 0.666666666666667},
%{macd: 2.5777777777777793, signal: 1.6222222222222231},
%{macd: 4.80592592592593, signal: 3.2140740740740767},
%{macd: 5.303308641975313, signal: 4.258691358024695},
%{macd: 5.321902880658442, signal: 4.790297119341568},
%{macd: 6.573114293552813, signal: 5.6817057064471905}]}
iex> Indicado.MACD.eval([1, 2, 3, 4], 2, 4, 3)
{:ok, [%{macd: 0.2666666666666666, signal: 0.1333333333333333},
%{macd: 0.5155555555555553, signal: 0.3244444444444443},
%{macd: 0.6945185185185183, signal: 0.5094814814814813}]}
iex> Indicado.MACD.eval([], 4, 8, 6)
{:error, :not_enough_data}
iex> Indicado.MACD.eval([1, 2, 3, 4], 0, 4, 3)
{:error, :bad_period}
iex> Indicado.MACD.eval([1, 2, 3, 4], 2, 0, 3)
{:error, :bad_period}
iex> Indicado.MACD.eval([1, 2, 3, 4], 2, 4, 0)
{:error, :bad_period}
"""
@spec eval(list, pos_integer, pos_integer, pos_integer) ::
{:ok, nonempty_list(map)} | {:error, :bad_period | :not_enough_data}
def eval(list, fast_period, slow_period, signal_period),
do: calc(list, fast_period, slow_period, signal_period)
@doc """
Calculates MACD for the list.
Returns list of map `[{macd: x, signal: y}]` or `{:error, reason}`
- `macd` represents macd calculation
- `signal` represents signal line
Raises `NotEnoughDataError` if the given list is empty.
Raises `BadPeriodError` if any period is an unacceptable number.
## Examples
iex> Indicado.MACD.eval!([1, 2, 3, 4], 2, 4, 3)
[%{macd: 0.2666666666666666, signal: 0.1333333333333333},
%{macd: 0.5155555555555553, signal: 0.3244444444444443},
%{macd: 0.6945185185185183, signal: 0.5094814814814813}]
iex> Indicado.MACD.eval!([], 4, 8, 6)
** (NotEnoughDataError) not enough data
iex> Indicado.MACD.eval!([1, 2, 3, 4], 0, 4, 3)
** (BadPeriodError) bad period
iex> Indicado.MACD.eval!([1, 2, 3, 4], 2, 0, 3)
** (BadPeriodError) bad period
iex> Indicado.MACD.eval!([1, 2, 3, 4], 2, 4, 0)
** (BadPeriodError) bad period
"""
@spec eval!(list, pos_integer, pos_integer, pos_integer) :: nonempty_list(map) | no_return
def eval!(list, fast_period, slow_period, signal_period) do
case calc(list, fast_period, slow_period, signal_period) do
{:ok, result} -> result
{:error, :not_enough_data} -> raise NotEnoughDataError
{:error, :bad_period} -> raise BadPeriodError
end
end
defp calc(list, fast_period, slow_period, signal_period)
defp calc([], _fast_period, _slow_period, _signal_period), do: {:error, :not_enough_data}
defp calc(_list, fast_period, _slow_period, _signal_period) when fast_period < 1,
do: {:error, :bad_period}
defp calc(_list, _fast_period, slow_period, _signal_period) when slow_period < 1,
do: {:error, :bad_period}
defp calc(_list, _fast_period, _slow_period, signal_period) when signal_period < 1,
do: {:error, :bad_period}
defp calc(list, fast_period, slow_period, signal_period) do
fast_ema = Indicado.EMA.eval!(list, fast_period)
slow_ema = Indicado.EMA.eval!(list, slow_period)
macd = Enum.zip(fast_ema, slow_ema) |> Enum.map(fn {x, y} -> x - y end)
{:ok,
[macd, Indicado.EMA.eval!(macd, signal_period)]
|> Enum.zip_with(fn [x, y] -> %{macd: x, signal: y} end)
|> Enum.drop(1)}
end
end
|
lib/indicado/macd.ex
| 0.918872 | 0.767908 |
macd.ex
|
starcoder
|
defmodule Penelope.ML.Word2vec.Index do
@moduledoc """
This module represents a word2vec-style vectorset, compiled into a
set of hash-partitioned DETS files. Each record is a tuple consisting
of the term (word) and a set of weights (vector). This module also
supports parsing the standard text representation of word vectors
via the compile function.
On disk, the following files are created:
<path>/header.dets index header (version, metadata)
<path>/<name>_<part>.dets partition file
"""
alias __MODULE__, as: Index
alias Penelope.ML.Vector, as: Vector
alias Penelope.ML.Word2vec.IndexError, as: IndexError
defstruct version: 1,
name: nil,
partitions: 1,
vector_size: 300,
header: nil,
tables: []
@type t :: %Index{
version: pos_integer,
name: atom,
partitions: pos_integer,
vector_size: pos_integer,
header: atom,
tables: [atom]
}
@version 1
@doc """
creates a new word2vec index
files will be created as <path>/<name>_<part>.dets, one per partition
"""
@spec create!(
path :: String.t(),
name :: String.t(),
partitions: pos_integer,
size_hint: pos_integer,
vector_size: pos_integer
) :: Index.t()
def create!(path, name, options \\ []) do
name = String.to_atom(name)
partitions = Keyword.get(options, :partitions, 1)
vector_size = Keyword.get(options, :vector_size, 300)
size_hint = div(Keyword.get(options, :size_hint, 200_000), partitions)
header_data = [
version: @version,
name: name,
partitions: partitions,
vector_size: vector_size
]
File.mkdir_p!(path)
header = create_header(path, header_data)
tables =
0..(partitions - 1)
|> Stream.map(&create_table(path, name, &1, size_hint))
|> Enum.reduce([], &(&2 ++ [&1]))
%Index{
version: @version,
name: name,
partitions: partitions,
vector_size: vector_size,
header: header,
tables: tables
}
end
defp create_header(path, header) do
file =
path
|> Path.join("header.dets")
|> String.to_charlist()
options = [file: file, access: :read_write, type: :set, min_no_slots: 1]
name = String.to_atom(path)
with {:ok, file} <- :dets.open_file(name, options),
:ok <- :dets.insert(file, {:header, header}) do
file
else
{:error, reason} -> raise IndexError, inspect(reason)
end
end
defp create_table(path, name, partition, size_hint) do
{name, file} = table_file(path, name, partition)
options = [
file: file,
access: :read_write,
type: :set,
min_no_slots: size_hint
]
case :dets.open_file(name, options) do
{:ok, file} -> file
{:error, reason} -> raise IndexError, inspect(reason)
end
end
defp table_file(path, name, partition) do
part =
partition
|> Integer.to_string()
|> String.pad_leading(2, "0")
name = "#{name}_#{part}"
file =
path
|> Path.join("#{name}.dets")
|> String.to_charlist()
{String.to_atom(name), file}
end
@doc """
opens an existing word2vec index at the specified path
"""
@spec open!(path :: String.t(), cache_size: pos_integer) :: Index.t()
def open!(path, options \\ []) do
{
header,
[
version: version,
name: name,
partitions: partitions,
vector_size: vector_size
]
} = open_header(path)
tables =
0..(partitions - 1)
|> Stream.map(&open_table(path, name, &1))
|> Enum.reduce([], &(&2 ++ [&1]))
cache_size = Keyword.get(options, :cache_size, 1_000_000)
try do
:e2qc.setup(name, size: cache_size)
rescue
_ in ErlangError -> :ok
end
%Index{
version: version,
name: name,
partitions: partitions,
vector_size: vector_size,
header: header,
tables: tables
}
end
defp open_header(path) do
file =
path
|> Path.join("header.dets")
|> String.to_charlist()
options = [file: file, access: :read, type: :set]
name = String.to_atom(path)
with {:ok, file} <- :dets.open_file(name, options),
[{:header, header}] <- :dets.lookup(file, :header) do
{file, header}
else
{:error, reason} -> raise IndexError, inspect(reason)
end
end
defp open_table(path, name, partition) do
{name, file} = table_file(path, name, partition)
case :dets.open_file(name, file: file, access: :read) do
{:ok, file} -> file
{:error, reason} -> raise IndexError, inspect(reason)
end
end
@doc """
closes the index
"""
@spec close(index :: Index.t()) :: :ok
def close(%Index{header: header, tables: tables}) do
:dets.close(header)
Enum.each(tables, &:dets.close/1)
end
@doc """
inserts word vectors from a text file into a word2vec index
the index must have been opened using create()
"""
@spec compile!(index :: Index.t(), path :: String.t()) :: :ok
def compile!(index, path) do
path
|> File.stream!()
|> Stream.with_index(_offset = 1)
|> Task.async_stream(&parse_insert!(index, &1), ordered: false)
|> Stream.run()
end
@doc """
parses and inserts a single word vector text line into a word2vec index
"""
@spec parse_insert!(
index :: Index.t(),
{line :: String.t(), id :: pos_integer}
) :: {String.t(), pos_integer, Vector.t()}
def parse_insert!(index, {line, id}) do
{term, vector} = parse_line!(line)
record = {term, id, vector}
insert!(index, record)
record
end
@doc """
parses a word vector line: "<term> <weight> <weight> ..."
"""
@spec parse_line!(line :: String.t()) :: {String.t(), Vector.t()}
def parse_line!(line) do
[term | weights] = String.split(line, " ")
{term,
weights
|> Enum.map(&parse_weight/1)
|> Vector.from_list()}
end
defp parse_weight(str) do
case Float.parse(str) do
{value, _remain} -> value
:error -> raise ArgumentError, "invalid weight: #{str}"
end
end
@doc """
inserts a word vector tuple into a word2vec index
"""
@spec insert!(
index :: Index.t(),
record :: {String.t(), pos_integer, Vector.t()}
) :: :ok
def insert!(
%Index{vector_size: vector_size, header: header} = index,
{term, id, vector} = record
) do
actual_size = div(byte_size(vector), 4)
unless actual_size === vector_size do
raise IndexError,
"invalid vector size: #{actual_size} != #{vector_size}"
end
table = get_table(index, elem(record, 0))
with :ok <- :dets.insert(header, {id, term}),
:ok <- :dets.insert(table, record) do
:ok
else
{:error, reason} -> raise IndexError, inspect(reason)
end
end
@doc """
retrieves a term by its id
if found, returns the term string
otherwise, returns nil
"""
@spec fetch!(index :: Index.t(), id :: pos_integer) :: String.t() | nil
def fetch!(%{name: name} = index, id) do
:e2qc.cache(name, id, fn -> do_fetch(index, id) end)
end
defp do_fetch(%{header: header}, id) do
case :dets.lookup(header, id) do
[{_id, term}] -> term
[] -> nil
{:error, reason} -> raise IndexError, inspect(reason)
end
end
@doc """
searches for a term in the word2vec index
if found, returns the id and word vector (no term)
otherwise, returns nil
"""
@spec lookup!(index :: Index.t(), term :: String.t()) ::
{integer, Vector.t()}
def lookup!(%{name: name} = index, term) do
:e2qc.cache(name, term, fn -> do_lookup(index, term) end)
end
defp do_lookup(%{vector_size: vector_size} = index, term) do
bit_size = vector_size * 32
case index
|> get_table(term)
|> :dets.lookup(term) do
[{_term, id, vector}] -> {id, vector}
[] -> {0, <<0::size(bit_size)>>}
{:error, reason} -> raise IndexError, inspect(reason)
end
end
defp get_table(%Index{tables: tables, partitions: partitions}, term) do
partition = rem(:erlang.phash2(term), partitions)
Enum.at(tables, partition)
end
end
defmodule Penelope.ML.Word2vec.IndexError do
@moduledoc "DETS index processing error"
defexception message: "an index error occurred"
end
|
lib/penelope/ml/word2vec/index.ex
| 0.82828 | 0.667497 |
index.ex
|
starcoder
|
defmodule ExPlasma.Output.Position do
@moduledoc """
Generates an Output position if given the:
`blknum` - The block number for this output
`txindex` - The index of the Transaction in the block.
`oindex` - The index of the Output in the Transaction.
"""
@behaviour ExPlasma.Output
alias __MODULE__.Validator
alias ExPlasma.Output
alias ExPlasma.Utils.RlpDecoder
@type position() :: pos_integer()
@type t() :: %{
blknum: non_neg_integer(),
txindex: non_neg_integer(),
oindex: non_neg_integer()
}
@type with_position() :: %{
position: position(),
blknum: non_neg_integer(),
txindex: non_neg_integer(),
oindex: non_neg_integer()
}
@type validation_responses() ::
:ok
| {:error,
Validator.blknum_validation_errors()
| Validator.oindex_validation_errors()
| Validator.txindex_validation_errors()}
# Contract settings
# These are being hard-coded from the same values on the contracts.
# See: https://github.com/omisego/plasma-contracts/blob/master/plasma_framework/contracts/src/utils/PosLib.sol#L16-L23
@block_offset 1_000_000_000
@transaction_offset 10_000
def block_offset(), do: @block_offset
def transaction_offset(), do: @transaction_offset
@doc """
Creates an output_id from a block number, a tx index and an output index.
## Example
iex> ExPlasma.Output.Position.new(1, 0, 0)
%{blknum: 1, txindex: 0, oindex: 0, position: 1_000_000_000}
"""
@spec new(non_neg_integer(), non_neg_integer(), non_neg_integer()) :: with_position()
def new(blknum, txindex, oindex) when is_integer(blknum) and is_integer(txindex) and is_integer(oindex) do
output_id = %{blknum: blknum, txindex: txindex, oindex: oindex}
Map.put(output_id, :position, pos(output_id))
end
@doc """
Encodes the blknum, txindex, and oindex into a single integer.
## Example
iex> pos = %{blknum: 1, txindex: 0, oindex: 0}
iex> ExPlasma.Output.Position.pos(pos)
1_000_000_000
"""
@spec pos(t() | with_position()) :: position()
def pos(%{blknum: blknum, txindex: txindex, oindex: oindex}) do
blknum * @block_offset + txindex * @transaction_offset + oindex
end
@doc """
Transforms the output position into a positive integer representing the position.
## Example
iex> output_id = %{blknum: 1, txindex: 0, oindex: 0}
iex> ExPlasma.Output.Position.to_rlp(output_id)
<<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 59, 154, 202, 0>>
"""
@impl Output
@spec to_rlp(t() | with_position()) :: binary()
def to_rlp(output_id), do: output_id |> pos() |> encode()
@doc """
Encodes the output position into an RLP encodable object.
## Example
iex> pos = ExPlasma.Output.Position.pos(%{blknum: 1, txindex: 0, oindex: 0})
iex> ExPlasma.Output.Position.encode(pos)
<<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 59, 154, 202, 0>>
"""
@spec encode(position()) :: binary()
def encode(position) do
position |> :binary.encode_unsigned(:big) |> pad_binary()
end
@doc """
Returns a map of the decoded position.
## Example
iex> pos = 1_000_000_000
iex> ExPlasma.Output.Position.to_map(pos)
{:ok, %{position: 1_000_000_000, blknum: 1, txindex: 0, oindex: 0}}
"""
@impl Output
@spec to_map(position()) :: {:ok, with_position()} | {:error, :malformed_output_position}
def to_map(pos) when is_integer(pos) do
blknum = div(pos, @block_offset)
txindex = pos |> rem(@block_offset) |> div(@transaction_offset)
oindex = rem(pos, @transaction_offset)
{:ok, %{position: pos, blknum: blknum, txindex: txindex, oindex: oindex}}
end
def to_map(_), do: {:error, :malformed_output_position}
@doc """
Decodes and returns the integer position.
## Example
iex> pos = <<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 59, 154, 202, 0>>
iex> ExPlasma.Output.Position.decode(pos)
{:ok, 1_000_000_000}
"""
@spec decode(binary()) :: {:ok, position()} | {:error, :malformed_input_position_rlp}
def decode(encoded_pos) do
case RlpDecoder.parse_uint256_with_leading(encoded_pos) do
{:ok, pos} -> {:ok, pos}
_error -> {:error, :malformed_input_position_rlp}
end
end
@doc """
Validates that values can give a valid position.
## Example
iex> output_id = %{blknum: 1, txindex: 0, oindex: 0}
iex> ExPlasma.Output.Position.validate(output_id)
:ok
"""
@impl Output
@spec validate(t() | with_position()) :: validation_responses()
def validate(nil), do: :ok
def validate(output_id) do
with :ok <- Validator.validate_blknum(output_id.blknum),
:ok <- Validator.validate_txindex(output_id.txindex),
:ok <- Validator.validate_oindex(output_id.oindex) do
:ok
end
end
defp pad_binary(unpadded) do
pad_size = (32 - byte_size(unpadded)) * 8
<<0::size(pad_size)>> <> unpadded
end
end
|
lib/ex_plasma/output/position.ex
| 0.879231 | 0.708364 |
position.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.