code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
defmodule Mulix do
@moduledoc """
µlix (Mulix) is a genetic programming toolkit for Elixir. It implements a vaguely [Push
3.0](http://faculty.hampshire.edu/lspector/push3-description.html)-like virtual machine.
## Description of the execution process
`Op`s define virtual machine instructions / operations, and they form _expressions_. An expression
can be a literal (number, bool, binary, atom), an `Op`, or a list of expressions.
An `Environment` struct contains stacks (`:exec`,`:code`,`:number`,`:boolean`,`:string`,`:name`
(atoms), `:type` `:error`) that expressions can mutate, and configurations related to execution.
Literals are pushed to their respective stacks. An `Op` takes in an `Environment`, and return a
possibly mutated version.
An `Individual` has both a _phenome_ (top-level expression + an `Environment` with a set of stacks
and defined names suitable for executing expressions), and a _genome_ (an `Environment` created with
`Genome`, with defined names that generate the phenome.)
# IDEA: add a `:tag` type
# IDEA: tag-based co-operation
http://faculty.hampshire.edu/lspector/pubs/multitags-GPTP06.pdf
https://pdfs.semanticscholar.org/f03f/ebca2a77b16fe8a04c4ae377e4bfdfa84ea9.pdf
- each individual has a multidimensional tag
- tag inherited from parent, + mutation
https://erp12.github.io/push-redux/pages/names_and_tags/index.html
https://www.lri.fr/~hansen/proceedings/2011/GECCO/proceedings/p1419.pdf
# IDEA: a `World` with a coordinate system
- world has `num_individuals`
- each individual has a position in R^num_err_fn, where `num_err_fn` is the number of defined
error functions `fit_fn_i` (at least output error). Each axis is capped at `worst_fitness` (e.g. 1000)
- total fitness of an individual is distance from origin
- fitness functions have a rank, so `[:fit_fn2, :fn0, :fn1]` would mean `:fit_fn2` has more weight than
`:fit_fn0` etc.
- world has `food_units_per_pos` "food" at each position `{0..worst_fitness, 0..worst_fitness,
...}`
- individuals need `food_intake` (dependent on steps taken during execution & size) units of food
within some radius `eat_radius`
- individuals eat in fitness order. They are sorted by `total_fitness`, then divided into groups
of size `ratio_in_food_group * num_individuals`. Then for each food group in total fitness
order: for each fitness function `fit_fn_i` in rank order, take `food_tournament_size`
individuals at random, sort by `fit_fn_i`, choose nth best with probability `food_tournament_p *
(1-f_t_p^n)`
- individuals can store at most `max_food_storage` units of food
- if `food_stored` goes to 0, the individual dies
# IDEA: autoconstructive evolution
See https://www.lri.fr/~hansen/proceedings/2013/GECCO/companion/p1627.pdf
"""
end
|
lib/mulix.ex
| 0.816918 | 0.783658 |
mulix.ex
|
starcoder
|
defmodule Pummpcomm.PumpModel do
@moduledoc """
Model-specific options and information
"""
# Types
@typedoc """
Model number of a pump as an integer
"""
@type pump_model :: non_neg_integer
@typedoc """
Return how many turns the pump motor takes to deliver 1U of insulin
"""
@type strokes_per_unit :: 10 | 40
@typedoc """
* `large_format` - history records use larger format
* `strokes_per_unit` - how many turns the pump motor takes to deliver 1U of insulin
* `supports_low_suspend` - if the pump will automatically suspend basal when the CGM reports a low blood glucose
"""
@type pump_options :: %{
large_format: boolean,
strokes_per_unit: strokes_per_unit,
supports_low_suspend: boolean
}
# Functions
@doc """
Return the model number of the pump from its model string
## Examples
iex> Pummpcomm.PumpModel.model_number("723")
{:ok, 723}
iex> Pummpcomm.PumpModel.model_number("522")
{:ok, 522}
iex> Pummpcomm.PumpModel.model_number("677")
{:error, "Bad Pump Model"}
iex> Pummpcomm.PumpModel.model_number("invalid")
{:error, "Bad Pump Model"}
"""
def model_number(model_string) when is_binary(model_string),
do: model_string |> Integer.parse() |> model_number
def model_number(model_number) when model_number == :error, do: {:error, "Bad Pump Model"}
def model_number({model_number, _}) when div(model_number, 100) in [5, 7],
do: {:ok, model_number}
def model_number(_), do: {:error, "Bad Pump Model"}
@spec pump_options(pump_model) :: pump_options
def pump_options(pump_model) do
%{
large_format: large_format?(pump_model),
strokes_per_unit: strokes_per_unit(pump_model),
supports_low_suspend: supports_low_suspend?(pump_model)
}
end
@doc """
Return true if history records use larger format
## Examples
iex> Pummpcomm.PumpModel.large_format?(723)
true
iex> Pummpcomm.PumpModel.large_format?(522)
false
"""
def large_format?(model_number) when rem(model_number, 100) >= 23, do: true
def large_format?(_), do: false
@doc """
Return true if pump supports MySentry
## Examples
iex> Pummpcomm.PumpModel.supports_my_sentry?(723)
true
iex> Pummpcomm.PumpModel.supports_my_sentry?(522)
false
"""
def supports_my_sentry?(model_number) when rem(model_number, 100) >= 23, do: true
def supports_my_sentry?(_), do: false
@doc """
Return true if pump supports low suspend
## Examples
iex> Pummpcomm.PumpModel.supports_low_suspend?(723)
false
iex> Pummpcomm.PumpModel.supports_low_suspend?(751)
true
"""
def supports_low_suspend?(model_number) when rem(model_number, 100) >= 51, do: true
def supports_low_suspend?(_), do: false
@doc """
Return true if pump writes a square wave bolus to history as it starts,
then updates the same entry upon completion
## Examples
iex> Pummpcomm.PumpModel.records_square_wave_bolus_before_delivery?(723)
true
iex> Pummpcomm.PumpModel.records_square_wave_bolus_before_delivery?(522)
false
"""
def records_square_wave_bolus_before_delivery?(model_number) when rem(model_number, 100) >= 23,
do: true
def records_square_wave_bolus_before_delivery?(_), do: false
@doc """
Return how many turns the pump motor takes to deliver 1U of insulin
## Examples
iex> Pummpcomm.PumpModel.strokes_per_unit(722)
10
iex> Pummpcomm.PumpModel.strokes_per_unit(751)
40
"""
def strokes_per_unit(model_number) when rem(model_number, 100) >= 23, do: 40
def strokes_per_unit(_), do: 10
@doc """
Returns how much the pump's reservoir can hold
## Examples
iex> Pummpcomm.PumpModel.reservoir_capacity(522)
176
iex> Pummpcomm.PumpModel.reservoir_capacity(751)
300
"""
def reservoir_capacity(model_number) when div(model_number, 100) == 5, do: 176
def reservoir_capacity(model_number) when div(model_number, 100) == 7, do: 300
end
|
lib/pummpcomm/pump_model.ex
| 0.889637 | 0.510374 |
pump_model.ex
|
starcoder
|
defmodule Debounce do
@moduledoc """
A process-based debouncer for Elixir.
## What is a debouncer?
A debouncer is responsible for calling a function with a delay, but if that
function is called multiple times within the delay period, the time is reset
and delay is counted again. In other words, the function will be called
after a delay period has elapsed from the last application.
Each time, the debounced function is called, a new task is started.
## Example
iex> {:ok, pid} = Debounce.start_link({Kernel, :send, [self(), "Hello"]}, 100)
iex> Debounce.apply(pid) # Schedules call in 100 ms
iex> :timer.sleep(50)
iex> Debounce.apply(pid) # Resets timer back to 100 ms
iex> :timer.sleep(100)
iex> receive do msg -> msg end
"Hello" # Timer elapsed
iex> Debounce.apply(pid) # Schedules call in 100 ms
iex> Debounce.cancel(pid) # Cancels scheduled call
:ok
"""
@behaviour :gen_statem
@type mfargs :: {module, atom, [term]}
@type apply :: (() -> term) | mfargs
@type time :: non_neg_integer
@type debouncer :: :gen_statem.server_ref()
@type option :: {:name, GenServer.name()} | :gen_statem.start_opt()
defmacrop is_apply(apply) do
quote do
is_function(unquote(apply)) or
(is_atom(elem(unquote(apply), 0)) and
is_atom(elem(unquote(apply), 1)) and
is_list(elem(unquote(apply), 2)))
end
end
@doc """
Starts a `Debounce` process linked to the current process.
This can be used to start the `Debounce` as part of a supervision tree.
Delays invoking `apply` until after `timeout` millisecnds have elapsed
since the last time the `apply/2` function was called.
## Options
* `:name`- used for name registration, like in `GenServer.start_link/3`.
* all other options supported by `:gen_statem.start_link/4`
"""
@spec start_link(apply, time, [option]) :: :gen_statem.start_ret()
def start_link(apply, timeout, opts \\ []) do
do_start(:start_link, apply, timeout, opts)
end
@doc """
Starts a `Debounce` process without links (outside of a supervision tree).
See `start_link/3` for more information.
"""
@spec start(apply, time, [option]) :: :gen_statem.start_ret()
def start(apply, timeout, opts \\ []) do
do_start(:start, apply, timeout, opts)
end
@doc """
Synchronously stops the debouncer with the given `reason`.
"""
@spec stop(debouncer, reason :: term, timeout) :: :ok
def stop(debouncer, reason \\ :normal, timeout \\ :infinity) do
:gen_statem.stop(debouncer, reason, timeout)
end
@doc """
Schedules call to the current `debouncer`'s function.
If the function is a fun, calls it with provided `args`.
If the function is an `t:mfargs/0` tuple, appends provided `args`
to the original ones.
If this function is called again withing the current `debouncer`'s timeout
value, the time will reset.
"""
@spec apply(debouncer, [term]) :: :ok
def apply(debouncer, args \\ []) do
call(debouncer, {:apply, args})
end
@doc """
Cancels any scheduled call to the current `debouncer`'s function.
"""
@spec cancel(debouncer) :: :ok
def cancel(debouncer) do
call(debouncer, :cancel)
end
@doc """
Immediately invokes the current `debouncer`'s function.
If the function is a fun, calls it with provided `args`.
If the function is an `t:mfargs/0` tuple, appends provided `args`
to the original ones.
"""
@spec flush(debouncer, [term]) :: :ok
def flush(debouncer, args) do
call(debouncer, {:flush, args})
end
@doc """
Changes the function the `debouncer` is applying.
Affects only future calls to `apply/2`.
"""
@spec change_function(debouncer, apply) :: :ok
def change_function(debouncer, new_function) do
call(debouncer, {:change_function, new_function})
end
@doc """
Changes the delay the `debouncer` operates with.
Affects only future calls to `apply/2`.
"""
@spec change_timeout(debouncer, time) :: :ok
def change_timeout(debouncer, new_timeout) when is_integer(new_timeout) do
call(debouncer, {:change_timeout, new_timeout})
end
## Callbacks
import Record
defrecordp :data, [:apply, :timeout]
@doc false
def callback_mode, do: :state_functions
@doc false
def init({apply, timeout}) do
{:ok, :waiting, data(apply: apply, timeout: timeout)}
end
@doc false
def waiting({:call, from}, {:apply, args}, data(apply: apply, timeout: timeout) = data) do
{:next_state, :counting, data,
[{:reply, from, :ok}, {:state_timeout, timeout, {apply, args}}]}
end
def waiting({:call, from}, :cancel, data) do
{:keep_state, data, {:reply, from, :ok}}
end
def waiting(event, event_content, data) do
handle_event(event, event_content, data)
end
@doc false
def counting({:call, from}, {:apply, args}, data(apply: apply, timeout: timeout) = data) do
{:keep_state, data, [{:reply, from, :ok}, {:state_timeout, timeout, {apply, args}}]}
end
def counting({:call, from}, :cancel, data) do
{:next_state, :waiting, data, {:reply, from, :ok}}
end
def counting(:state_timeout, {apply, args}, data) do
apply_function(apply, args)
{:next_state, :waiting, data}
end
def counting(event, event_content, data) do
handle_event(event, event_content, data)
end
defp handle_event({:call, from}, {:change_function, apply}, data) do
{:keep_state, data(data, apply: apply), {:reply, from, :ok}}
end
defp handle_event({:call, from}, {:change_timeout, timeout}, data) do
{:keep_state, data(data, timeout: timeout), {:reply, from, :ok}}
end
defp handle_event({:call, from}, {:flush, args}, data(apply: apply) = data) do
apply_function(apply, args)
{:next_state, :waiting, data, {:reply, from, :ok}}
end
defp handle_event({:call, _from}, msg, data) do
{:stop, {:bad_call, msg}, data}
end
defp handle_event(:cast, msg, data) do
{:stop, {:bad_cast, msg}, data}
end
defp handle_event(:info, msg, data) do
proc =
case Process.info(self(), :registered_name) do
{_, []} -> self()
{_, name} -> name
end
:error_logger.error_msg(
'~p ~p received unexpected message: ~p~n',
[__MODULE__, proc, msg]
)
{:keep_state, data}
end
@doc false
def terminate(_reason, _state, _data) do
:ok
end
@doc false
def code_change(_vsn, state, data, _extra) do
{:ok, state, data}
end
## Helpers
defp do_start(start, apply, timeout, opts) when is_apply(apply) and is_integer(timeout) do
if name = name(opts[:name]) do
apply(:gen_statem, start, [name, __MODULE__, {apply, timeout}, opts])
else
apply(:gen_statem, start, [__MODULE__, {apply, timeout}, opts])
end
end
defp name(nil), do: nil
defp name(atom) when is_atom(atom), do: {:local, atom}
defp name(other), do: other
defp call(debounce, request) do
:gen_statem.call(debounce, request, {:dirty_timeout, 5_000})
end
defp apply_function({m, f, a}, args) do
Task.Supervisor.start_child(Debounce.Supervisor, m, f, a ++ args)
end
defp apply_function(fun, args) do
Task.Supervisor.start_child(Debounce.Supervisor, :erlang, :apply, [fun, args])
end
end
|
lib/debounce.ex
| 0.820001 | 0.498718 |
debounce.ex
|
starcoder
|
defmodule Benchmarks.GoogleMessage3.Message22853 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field22869: Benchmarks.GoogleMessage3.Enum22854.t(),
field22870: [non_neg_integer],
field22871: [float | :infinity | :negative_infinity | :nan],
field22872: [float | :infinity | :negative_infinity | :nan],
field22873: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil
}
defstruct [:field22869, :field22870, :field22871, :field22872, :field22873]
field :field22869, 1, optional: true, type: Benchmarks.GoogleMessage3.Enum22854, enum: true
field :field22870, 2, repeated: true, type: :uint32, packed: true
field :field22871, 3, repeated: true, type: :float, packed: true
field :field22872, 5, repeated: true, type: :float, packed: true
field :field22873, 4, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
end
defmodule Benchmarks.GoogleMessage3.Message24345 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field24533: String.t(),
field24534: Benchmarks.GoogleMessage3.UnusedEnum.t(),
field24535: Benchmarks.GoogleMessage3.Message24346.t() | nil,
field24536: String.t(),
field24537: String.t(),
field24538: Benchmarks.GoogleMessage3.UnusedEnum.t(),
field24539: String.t(),
field24540: String.t(),
field24541: String.t(),
field24542: String.t(),
field24543: Benchmarks.GoogleMessage3.Message24316.t() | nil,
field24544: Benchmarks.GoogleMessage3.Message24376.t() | nil,
field24545: String.t(),
field24546: String.t(),
field24547: String.t(),
field24548: String.t(),
field24549: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field24550: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field24551: [String.t()],
field24552: String.t(),
field24553: integer,
field24554: Benchmarks.GoogleMessage3.Message24379.t() | nil,
field24555: String.t(),
field24556: [Benchmarks.GoogleMessage3.Message24356.t()],
field24557: [Benchmarks.GoogleMessage3.Message24366.t()]
}
defstruct [
:field24533,
:field24534,
:field24535,
:field24536,
:field24537,
:field24538,
:field24539,
:field24540,
:field24541,
:field24542,
:field24543,
:field24544,
:field24545,
:field24546,
:field24547,
:field24548,
:field24549,
:field24550,
:field24551,
:field24552,
:field24553,
:field24554,
:field24555,
:field24556,
:field24557
]
field :field24533, 1, optional: true, type: :string
field :field24534, 22, optional: true, type: Benchmarks.GoogleMessage3.UnusedEnum, enum: true
field :field24535, 2, optional: true, type: Benchmarks.GoogleMessage3.Message24346
field :field24536, 3, optional: true, type: :string
field :field24537, 4, optional: true, type: :string
field :field24538, 23, optional: true, type: Benchmarks.GoogleMessage3.UnusedEnum, enum: true
field :field24539, 5, optional: true, type: :string
field :field24540, 6, required: true, type: :string
field :field24541, 7, optional: true, type: :string
field :field24542, 8, optional: true, type: :string
field :field24543, 9, optional: true, type: Benchmarks.GoogleMessage3.Message24316
field :field24544, 10, optional: true, type: Benchmarks.GoogleMessage3.Message24376
field :field24545, 11, optional: true, type: :string
field :field24546, 19, optional: true, type: :string
field :field24547, 20, optional: true, type: :string
field :field24548, 21, optional: true, type: :string
field :field24549, 12, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field24550, 13, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field24551, 14, repeated: true, type: :string
field :field24552, 15, optional: true, type: :string
field :field24553, 18, optional: true, type: :int32
field :field24554, 16, optional: true, type: Benchmarks.GoogleMessage3.Message24379
field :field24555, 17, optional: true, type: :string
field :field24556, 24, repeated: true, type: Benchmarks.GoogleMessage3.Message24356
field :field24557, 25, repeated: true, type: Benchmarks.GoogleMessage3.Message24366
end
defmodule Benchmarks.GoogleMessage3.Message24403 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field24681: Benchmarks.GoogleMessage3.Message24401.t() | nil,
field24682: Benchmarks.GoogleMessage3.Message24402.t() | nil
}
defstruct [:field24681, :field24682]
field :field24681, 1, optional: true, type: Benchmarks.GoogleMessage3.Message24401
field :field24682, 2, optional: true, type: Benchmarks.GoogleMessage3.Message24402
end
defmodule Benchmarks.GoogleMessage3.Message24391 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field24631: String.t(),
field24632: String.t(),
field24633: [String.t()],
field24634: String.t(),
field24635: [String.t()],
field24636: [String.t()],
field24637: String.t(),
field24638: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field24639: String.t(),
field24640: String.t(),
field24641: String.t(),
field24642: String.t(),
field24643: integer,
field24644: Benchmarks.GoogleMessage3.Message24379.t() | nil,
field24645: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field24646: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field24647: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field24648: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field24649: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field24650: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field24651: String.t(),
field24652: integer,
field24653: integer,
field24654: [String.t()],
field24655: [String.t()]
}
defstruct [
:field24631,
:field24632,
:field24633,
:field24634,
:field24635,
:field24636,
:field24637,
:field24638,
:field24639,
:field24640,
:field24641,
:field24642,
:field24643,
:field24644,
:field24645,
:field24646,
:field24647,
:field24648,
:field24649,
:field24650,
:field24651,
:field24652,
:field24653,
:field24654,
:field24655
]
field :field24631, 1, optional: true, type: :string
field :field24632, 2, optional: true, type: :string
field :field24633, 3, repeated: true, type: :string
field :field24634, 4, optional: true, type: :string
field :field24635, 5, repeated: true, type: :string
field :field24636, 16, repeated: true, type: :string
field :field24637, 17, optional: true, type: :string
field :field24638, 25, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field24639, 7, optional: true, type: :string
field :field24640, 18, optional: true, type: :string
field :field24641, 19, optional: true, type: :string
field :field24642, 20, optional: true, type: :string
field :field24643, 24, optional: true, type: :int32
field :field24644, 8, optional: true, type: Benchmarks.GoogleMessage3.Message24379
field :field24645, 9, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field24646, 10, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field24647, 11, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field24648, 12, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field24649, 13, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field24650, 14, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field24651, 21, optional: true, type: :string
field :field24652, 22, optional: true, type: :int32
field :field24653, 23, optional: true, type: :int32
field :field24654, 15, repeated: true, type: :string
field :field24655, 6, repeated: true, type: :string
end
defmodule Benchmarks.GoogleMessage3.Message27454 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{}
defstruct []
end
defmodule Benchmarks.GoogleMessage3.Message27357 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field27410: String.t(),
field27411: float | :infinity | :negative_infinity | :nan,
field27412: String.t(),
field27413: boolean,
field27414: boolean
}
defstruct [:field27410, :field27411, :field27412, :field27413, :field27414]
field :field27410, 1, optional: true, type: :string
field :field27411, 2, optional: true, type: :float
field :field27412, 3, optional: true, type: :string
field :field27413, 4, optional: true, type: :bool
field :field27414, 5, optional: true, type: :bool
end
defmodule Benchmarks.GoogleMessage3.Message27360 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field27426: Benchmarks.GoogleMessage3.Message27358.t() | nil,
field27427: Benchmarks.GoogleMessage3.Enum27361.t(),
field27428: Benchmarks.GoogleMessage3.Message27358.t() | nil,
field27429: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()]
}
defstruct [:field27426, :field27427, :field27428, :field27429]
field :field27426, 1, optional: true, type: Benchmarks.GoogleMessage3.Message27358
field :field27427, 2, optional: true, type: Benchmarks.GoogleMessage3.Enum27361, enum: true
field :field27428, 3, optional: true, type: Benchmarks.GoogleMessage3.Message27358
field :field27429, 4, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
end
defmodule Benchmarks.GoogleMessage3.Message34387 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field34446: String.t(),
field34447: [Benchmarks.GoogleMessage3.Message34381.t()],
field34448: Benchmarks.GoogleMessage3.UnusedEnum.t(),
field34449: Benchmarks.GoogleMessage3.Enum34388.t(),
field34450: integer
}
defstruct [:field34446, :field34447, :field34448, :field34449, :field34450]
field :field34446, 1, optional: true, type: :string
field :field34447, 2, repeated: true, type: Benchmarks.GoogleMessage3.Message34381
field :field34448, 3, optional: true, type: Benchmarks.GoogleMessage3.UnusedEnum, enum: true
field :field34449, 4, optional: true, type: Benchmarks.GoogleMessage3.Enum34388, enum: true
field :field34450, 5, optional: true, type: :int64
end
defmodule Benchmarks.GoogleMessage3.Message34621 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field34651: float | :infinity | :negative_infinity | :nan,
field34652: float | :infinity | :negative_infinity | :nan,
field34653: float | :infinity | :negative_infinity | :nan,
field34654: float | :infinity | :negative_infinity | :nan,
field34655: float | :infinity | :negative_infinity | :nan,
field34656: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field34657: Benchmarks.GoogleMessage3.Message34619.t() | nil,
field34658: String.t(),
field34659: String.t(),
field34660: float | :infinity | :negative_infinity | :nan,
field34661: binary,
field34662: String.t(),
field34663: String.t(),
field34664: String.t(),
field34665: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field34666: Benchmarks.GoogleMessage3.Message34621.t() | nil,
field34667: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field34668: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil
}
defstruct [
:field34651,
:field34652,
:field34653,
:field34654,
:field34655,
:field34656,
:field34657,
:field34658,
:field34659,
:field34660,
:field34661,
:field34662,
:field34663,
:field34664,
:field34665,
:field34666,
:field34667,
:field34668
]
field :field34651, 1, optional: true, type: :double
field :field34652, 2, optional: true, type: :double
field :field34653, 3, optional: true, type: :double
field :field34654, 4, optional: true, type: :double
field :field34655, 11, optional: true, type: :double
field :field34656, 13, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field34657, 14, optional: true, type: Benchmarks.GoogleMessage3.Message34619
field :field34658, 5, optional: true, type: :string
field :field34659, 9, optional: true, type: :string
field :field34660, 12, optional: true, type: :double
field :field34661, 19, optional: true, type: :bytes
field :field34662, 15, optional: true, type: :string
field :field34663, 16, optional: true, type: :string
field :field34664, 17, optional: true, type: :string
field :field34665, 18, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field34666, 20, optional: true, type: Benchmarks.GoogleMessage3.Message34621
field :field34667, 100, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field34668, 101, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
end
defmodule Benchmarks.GoogleMessage3.Message35476 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field35484: String.t(),
field35485: String.t(),
field35486: String.t(),
field35487: Benchmarks.GoogleMessage3.Enum35477.t(),
field35488: float | :infinity | :negative_infinity | :nan,
field35489: float | :infinity | :negative_infinity | :nan,
field35490: float | :infinity | :negative_infinity | :nan,
field35491: float | :infinity | :negative_infinity | :nan,
field35492: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field35493: integer,
field35494: integer,
field35495: integer,
field35496: String.t(),
field35497: String.t()
}
defstruct [
:field35484,
:field35485,
:field35486,
:field35487,
:field35488,
:field35489,
:field35490,
:field35491,
:field35492,
:field35493,
:field35494,
:field35495,
:field35496,
:field35497
]
field :field35484, 1, optional: true, type: :string
field :field35485, 2, optional: true, type: :string
field :field35486, 3, optional: true, type: :string
field :field35487, 4, optional: true, type: Benchmarks.GoogleMessage3.Enum35477, enum: true
field :field35488, 5, optional: true, type: :float
field :field35489, 6, optional: true, type: :float
field :field35490, 7, optional: true, type: :float
field :field35491, 8, optional: true, type: :float
field :field35492, 9, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field35493, 10, optional: true, type: :int32
field :field35494, 11, optional: true, type: :int32
field :field35495, 12, optional: true, type: :int32
field :field35496, 13, optional: true, type: :string
field :field35497, 14, optional: true, type: :string
end
defmodule Benchmarks.GoogleMessage3.Message949 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field955: String.t(),
field956: integer,
field957: integer,
field958: Benchmarks.GoogleMessage3.Message730.t() | nil,
field959: [String.t()],
field960: String.t(),
field961: boolean
}
defstruct [:field955, :field956, :field957, :field958, :field959, :field960, :field961]
field :field955, 1, optional: true, type: :string
field :field956, 2, optional: true, type: :int64
field :field957, 3, optional: true, type: :int64
field :field958, 4, optional: true, type: Benchmarks.GoogleMessage3.Message730
field :field959, 5, repeated: true, type: :string
field :field960, 6, optional: true, type: :string
field :field961, 7, optional: true, type: :bool
end
defmodule Benchmarks.GoogleMessage3.Message36869 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field36970: integer,
field36971: integer
}
defstruct [:field36970, :field36971]
field :field36970, 1, optional: true, type: :int32
field :field36971, 2, optional: true, type: :int32
end
defmodule Benchmarks.GoogleMessage3.Message33968.Message33969 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{}
defstruct []
end
defmodule Benchmarks.GoogleMessage3.Message33968 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
message33969: [any],
field33989: [Benchmarks.GoogleMessage3.Message33958.t()],
field33990: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field33991: boolean,
field33992: Benchmarks.GoogleMessage3.UnusedEnum.t()
}
defstruct [:message33969, :field33989, :field33990, :field33991, :field33992]
field :message33969, 1, repeated: true, type: :group
field :field33989, 3, repeated: true, type: Benchmarks.GoogleMessage3.Message33958
field :field33990, 106, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field33991, 108, optional: true, type: :bool
field :field33992, 107, optional: true, type: Benchmarks.GoogleMessage3.UnusedEnum, enum: true
end
defmodule Benchmarks.GoogleMessage3.Message6644 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field6701: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field6702: String.t(),
field6703: float | :infinity | :negative_infinity | :nan,
field6704: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field6705: binary,
field6706: binary,
field6707: Benchmarks.GoogleMessage3.Message6637.t() | nil,
field6708: [Benchmarks.GoogleMessage3.Message6126.t()],
field6709: boolean,
field6710: Benchmarks.GoogleMessage3.Message6643.t() | nil,
field6711: String.t(),
field6712: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field6713: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field6714: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field6715: integer,
field6716: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil
}
defstruct [
:field6701,
:field6702,
:field6703,
:field6704,
:field6705,
:field6706,
:field6707,
:field6708,
:field6709,
:field6710,
:field6711,
:field6712,
:field6713,
:field6714,
:field6715,
:field6716
]
field :field6701, 8, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field6702, 1, optional: true, type: :string
field :field6703, 2, optional: true, type: :double
field :field6704, 9, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field6705, 3, optional: true, type: :bytes
field :field6706, 19, optional: true, type: :bytes
field :field6707, 4, optional: true, type: Benchmarks.GoogleMessage3.Message6637
field :field6708, 18, repeated: true, type: Benchmarks.GoogleMessage3.Message6126
field :field6709, 6, optional: true, type: :bool
field :field6710, 10, optional: true, type: Benchmarks.GoogleMessage3.Message6643
field :field6711, 12, optional: true, type: :string
field :field6712, 14, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field6713, 15, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field6714, 16, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field6715, 17, optional: true, type: :int32
field :field6716, 20, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
end
defmodule Benchmarks.GoogleMessage3.Message18831.Message18832.Message18833 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field18843: non_neg_integer,
field18844: String.t(),
field18845: float | :infinity | :negative_infinity | :nan,
field18846: integer,
field18847: boolean
}
defstruct [:field18843, :field18844, :field18845, :field18846, :field18847]
field :field18843, 7, required: true, type: :uint64
field :field18844, 8, optional: true, type: :string
field :field18845, 10, optional: true, type: :float
field :field18846, 12, optional: true, type: :int32
field :field18847, 13, optional: true, type: :bool
end
defmodule Benchmarks.GoogleMessage3.Message18831.Message18832 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field18836: integer,
field18837: String.t(),
field18838: float | :infinity | :negative_infinity | :nan,
field18839: float | :infinity | :negative_infinity | :nan,
field18840: integer,
field18841: [non_neg_integer],
message18833: [any]
}
defstruct [
:field18836,
:field18837,
:field18838,
:field18839,
:field18840,
:field18841,
:message18833
]
field :field18836, 2, optional: true, type: :int32
field :field18837, 5, optional: true, type: :string
field :field18838, 3, optional: true, type: :float
field :field18839, 9, optional: true, type: :float
field :field18840, 11, optional: true, type: :int32
field :field18841, 4, repeated: true, type: :uint64
field :message18833, 6, repeated: true, type: :group
end
defmodule Benchmarks.GoogleMessage3.Message18831 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
message18832: [any]
}
defstruct [:message18832]
field :message18832, 1, repeated: true, type: :group
end
defmodule Benchmarks.GoogleMessage3.Message13090 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field13141: Benchmarks.GoogleMessage3.Message13083.t() | nil,
field13142: Benchmarks.GoogleMessage3.Message13088.t() | nil
}
defstruct [:field13141, :field13142]
field :field13141, 1, optional: true, type: Benchmarks.GoogleMessage3.Message13083
field :field13142, 2, optional: true, type: Benchmarks.GoogleMessage3.Message13088
end
defmodule Benchmarks.GoogleMessage3.Message11874 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field11888: Benchmarks.GoogleMessage3.Message10391.t() | nil,
field11889: String.t(),
field11890: Benchmarks.GoogleMessage3.Message11873.t() | nil,
field11891: boolean,
__pb_extensions__: map
}
defstruct [:field11888, :field11889, :field11890, :field11891, :__pb_extensions__]
field :field11888, 3, optional: true, type: Benchmarks.GoogleMessage3.Message10391
field :field11889, 4, optional: true, type: :string
field :field11890, 6, optional: true, type: Benchmarks.GoogleMessage3.Message11873
field :field11891, 7, optional: true, type: :bool
extensions [{1, 2}, {2, 3}, {5, 6}]
end
defmodule Benchmarks.GoogleMessage3.Message4144.Message4145 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field4165: Benchmarks.GoogleMessage3.Enum4146.t(),
field4166: integer,
field4167: Benchmarks.GoogleMessage3.Enum4160.t(),
field4168: binary,
field4169: Benchmarks.GoogleMessage3.Enum4152.t(),
field4170: String.t()
}
defstruct [:field4165, :field4166, :field4167, :field4168, :field4169, :field4170]
field :field4165, 2, required: true, type: Benchmarks.GoogleMessage3.Enum4146, enum: true
field :field4166, 3, required: true, type: :int32
field :field4167, 9, optional: true, type: Benchmarks.GoogleMessage3.Enum4160, enum: true
field :field4168, 4, optional: true, type: :bytes
field :field4169, 5, optional: true, type: Benchmarks.GoogleMessage3.Enum4152, enum: true
field :field4170, 6, optional: true, type: :string
end
defmodule Benchmarks.GoogleMessage3.Message4144 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
message4145: [any]
}
defstruct [:message4145]
field :message4145, 1, repeated: true, type: :group
end
defmodule Benchmarks.GoogleMessage3.Message35573.Message35574 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{}
defstruct []
end
defmodule Benchmarks.GoogleMessage3.Message35573.Message35575.Message35576 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field35747: non_neg_integer,
field35748: integer,
field35749: integer,
field35750: integer,
field35751: non_neg_integer,
field35752: integer,
field35753: integer,
field35754: integer,
field35755: binary,
field35756: integer,
field35757: String.t(),
field35758: non_neg_integer,
field35759: integer,
field35760: integer,
field35761: integer,
field35762: integer,
field35763: integer,
field35764: integer,
field35765: binary,
field35766: String.t(),
field35767: integer,
field35768: [integer],
field35769: [integer],
field35770: integer,
field35771: Benchmarks.GoogleMessage3.Message0.t() | nil
}
defstruct [
:field35747,
:field35748,
:field35749,
:field35750,
:field35751,
:field35752,
:field35753,
:field35754,
:field35755,
:field35756,
:field35757,
:field35758,
:field35759,
:field35760,
:field35761,
:field35762,
:field35763,
:field35764,
:field35765,
:field35766,
:field35767,
:field35768,
:field35769,
:field35770,
:field35771
]
field :field35747, 5, optional: true, type: :fixed64
field :field35748, 6, optional: true, type: :int32
field :field35749, 49, optional: true, type: :int32
field :field35750, 7, optional: true, type: :int32
field :field35751, 59, optional: true, type: :uint32
field :field35752, 14, optional: true, type: :int32
field :field35753, 15, optional: true, type: :int32
field :field35754, 35, optional: true, type: :int32
field :field35755, 53, optional: true, type: :bytes
field :field35756, 8, optional: true, type: :int32
field :field35757, 9, optional: true, type: :string
field :field35758, 10, optional: true, type: :fixed64
field :field35759, 11, optional: true, type: :int32
field :field35760, 12, optional: true, type: :int32
field :field35761, 41, optional: true, type: :int32
field :field35762, 30, optional: true, type: :int32
field :field35763, 31, optional: true, type: :int32
field :field35764, 13, optional: true, type: :int32
field :field35765, 39, optional: true, type: :bytes
field :field35766, 29, optional: true, type: :string
field :field35767, 42, optional: true, type: :int32
field :field35768, 32, repeated: true, type: :int32
field :field35769, 51, repeated: true, type: :int32
field :field35770, 54, optional: true, type: :int64
field :field35771, 55, optional: true, type: Benchmarks.GoogleMessage3.Message0
end
defmodule Benchmarks.GoogleMessage3.Message35573.Message35575 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field35709: integer,
field35710: String.t(),
field35711: String.t(),
field35712: integer,
field35713: integer,
field35714: integer,
field35715: boolean,
field35716: integer,
field35717: integer,
field35718: boolean,
field35719: non_neg_integer,
field35720: binary,
field35721: integer,
field35722: non_neg_integer,
field35723: boolean,
field35724: integer,
field35725: integer,
field35726: boolean,
field35727: [integer],
field35728: [integer],
field35729: float | :infinity | :negative_infinity | :nan,
field35730: float | :infinity | :negative_infinity | :nan,
field35731: integer,
field35732: [non_neg_integer],
field35733: [non_neg_integer],
field35734: integer,
field35735: integer,
field35736: integer,
field35737: integer,
field35738: boolean,
field35739: boolean,
field35740: integer,
field35741: integer,
field35742: String.t(),
field35743: non_neg_integer,
field35744: [binary],
field35745: Benchmarks.GoogleMessage3.Message0.t() | nil,
message35576: any
}
defstruct [
:field35709,
:field35710,
:field35711,
:field35712,
:field35713,
:field35714,
:field35715,
:field35716,
:field35717,
:field35718,
:field35719,
:field35720,
:field35721,
:field35722,
:field35723,
:field35724,
:field35725,
:field35726,
:field35727,
:field35728,
:field35729,
:field35730,
:field35731,
:field35732,
:field35733,
:field35734,
:field35735,
:field35736,
:field35737,
:field35738,
:field35739,
:field35740,
:field35741,
:field35742,
:field35743,
:field35744,
:field35745,
:message35576
]
field :field35709, 2, optional: true, type: :int64
field :field35710, 3, optional: true, type: :string
field :field35711, 19, optional: true, type: :string
field :field35712, 20, optional: true, type: :int32
field :field35713, 21, optional: true, type: :int32
field :field35714, 22, optional: true, type: :int32
field :field35715, 23, optional: true, type: :bool
field :field35716, 47, optional: true, type: :int32
field :field35717, 48, optional: true, type: :int32
field :field35718, 24, optional: true, type: :bool
field :field35719, 25, optional: true, type: :fixed64
field :field35720, 52, optional: true, type: :bytes
field :field35721, 18, optional: true, type: :int32
field :field35722, 43, optional: true, type: :fixed32
field :field35723, 26, optional: true, type: :bool
field :field35724, 27, optional: true, type: :int32
field :field35725, 17, optional: true, type: :int32
field :field35726, 45, optional: true, type: :bool
field :field35727, 33, repeated: true, type: :int32
field :field35728, 58, repeated: true, type: :int32
field :field35729, 34, optional: true, type: :float
field :field35730, 1009, optional: true, type: :float
field :field35731, 28, optional: true, type: :int32
field :field35732, 1001, repeated: true, type: :fixed64
field :field35733, 1002, repeated: true, type: :fixed64
field :field35734, 44, optional: true, type: :int32
field :field35735, 50, optional: true, type: :int32
field :field35736, 36, optional: true, type: :int32
field :field35737, 40, optional: true, type: :int32
field :field35738, 1016, optional: true, type: :bool
field :field35739, 1010, optional: true, type: :bool
field :field35740, 37, optional: true, type: :int32
field :field35741, 38, optional: true, type: :int32
field :field35742, 46, optional: true, type: :string
field :field35743, 60, optional: true, type: :uint32
field :field35744, 56, repeated: true, type: :bytes
field :field35745, 57, optional: true, type: Benchmarks.GoogleMessage3.Message0
field :message35576, 4, required: true, type: :group
end
defmodule Benchmarks.GoogleMessage3.Message35573 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field35695: non_neg_integer,
field35696: String.t(),
field35697: String.t(),
field35698: integer,
message35574: [any],
field35700: integer,
field35701: integer,
field35702: integer,
field35703: integer,
field35704: integer,
message35575: [any]
}
defstruct [
:field35695,
:field35696,
:field35697,
:field35698,
:message35574,
:field35700,
:field35701,
:field35702,
:field35703,
:field35704,
:message35575
]
field :field35695, 16, optional: true, type: :fixed64
field :field35696, 1000, optional: true, type: :string
field :field35697, 1004, optional: true, type: :string
field :field35698, 1003, optional: true, type: :int32
field :message35574, 1012, repeated: true, type: :group
field :field35700, 1011, optional: true, type: :int64
field :field35701, 1005, optional: true, type: :int64
field :field35702, 1006, optional: true, type: :int64
field :field35703, 1007, optional: true, type: :int64
field :field35704, 1008, optional: true, type: :int64
field :message35575, 1, repeated: true, type: :group
end
defmodule Benchmarks.GoogleMessage3.Message36858.Message36859 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field36968: Benchmarks.GoogleMessage3.Enum36860.t(),
field36969: float | :infinity | :negative_infinity | :nan
}
defstruct [:field36968, :field36969]
field :field36968, 9, required: true, type: Benchmarks.GoogleMessage3.Enum36860, enum: true
field :field36969, 10, optional: true, type: :float
end
defmodule Benchmarks.GoogleMessage3.Message36858 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field36956: [integer],
field36957: [String.t()],
field36958: [String.t()],
field36959: integer,
field36960: integer,
field36961: integer,
field36962: String.t(),
field36963: boolean,
field36964: boolean,
field36965: integer,
field36966: Benchmarks.GoogleMessage3.Message35506.t() | nil,
message36859: [any]
}
defstruct [
:field36956,
:field36957,
:field36958,
:field36959,
:field36960,
:field36961,
:field36962,
:field36963,
:field36964,
:field36965,
:field36966,
:message36859
]
field :field36956, 1, repeated: true, type: :int32
field :field36957, 2, repeated: true, type: :string
field :field36958, 12, repeated: true, type: :string
field :field36959, 3, optional: true, type: :int32
field :field36960, 4, optional: true, type: :int32
field :field36961, 14, optional: true, type: :int32
field :field36962, 11, optional: true, type: :string
field :field36963, 5, optional: true, type: :bool
field :field36964, 13, optional: true, type: :bool
field :field36965, 6, optional: true, type: :int64
field :field36966, 7, optional: true, type: Benchmarks.GoogleMessage3.Message35506
field :message36859, 8, repeated: true, type: :group
end
defmodule Benchmarks.GoogleMessage3.Message13174 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field13237: integer,
field13238: integer,
field13239: integer,
field13240: integer,
field13241: float | :infinity | :negative_infinity | :nan,
field13242: float | :infinity | :negative_infinity | :nan,
field13243: integer,
field13244: integer,
field13245: float | :infinity | :negative_infinity | :nan,
field13246: integer,
field13247: float | :infinity | :negative_infinity | :nan,
field13248: integer,
field13249: Benchmarks.GoogleMessage3.Message13151.t() | nil,
field13250: integer,
field13251: float | :infinity | :negative_infinity | :nan,
field13252: float | :infinity | :negative_infinity | :nan,
field13253: float | :infinity | :negative_infinity | :nan,
field13254: float | :infinity | :negative_infinity | :nan,
field13255: float | :infinity | :negative_infinity | :nan,
field13256: float | :infinity | :negative_infinity | :nan,
field13257: integer
}
defstruct [
:field13237,
:field13238,
:field13239,
:field13240,
:field13241,
:field13242,
:field13243,
:field13244,
:field13245,
:field13246,
:field13247,
:field13248,
:field13249,
:field13250,
:field13251,
:field13252,
:field13253,
:field13254,
:field13255,
:field13256,
:field13257
]
field :field13237, 6, required: true, type: :int32
field :field13238, 3, optional: true, type: :int32
field :field13239, 4, required: true, type: :int32
field :field13240, 8, optional: true, type: :int32
field :field13241, 5, optional: true, type: :double
field :field13242, 7, optional: true, type: :double
field :field13243, 17, optional: true, type: :int32
field :field13244, 19, optional: true, type: :int32
field :field13245, 20, optional: true, type: :double
field :field13246, 9, optional: true, type: :int32
field :field13247, 10, optional: true, type: :double
field :field13248, 11, optional: true, type: :int32
field :field13249, 21, optional: true, type: Benchmarks.GoogleMessage3.Message13151
field :field13250, 1, optional: true, type: :int32
field :field13251, 2, optional: true, type: :double
field :field13252, 15, optional: true, type: :double
field :field13253, 16, optional: true, type: :double
field :field13254, 12, optional: true, type: :double
field :field13255, 13, optional: true, type: :double
field :field13256, 14, optional: true, type: :double
field :field13257, 18, optional: true, type: :int32
end
defmodule Benchmarks.GoogleMessage3.Message18283 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field18478: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18479: integer,
field18480: integer,
field18481: integer,
field18482: integer,
field18483: integer,
field18484: integer,
field18485: integer,
field18486: integer,
field18487: integer,
field18488: integer,
field18489: integer,
field18490: integer,
field18491: boolean,
field18492: boolean,
field18493: integer,
field18494: integer,
field18495: integer,
field18496: integer,
field18497: float | :infinity | :negative_infinity | :nan,
field18498: integer,
field18499: String.t(),
field18500: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18501: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18502: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18503: Benchmarks.GoogleMessage3.Message18253.t() | nil,
field18504: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18505: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18506: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18507: [integer],
field18508: [integer],
field18509: [String.t()],
field18510: binary,
field18511: integer,
field18512: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18513: String.t(),
field18514: float | :infinity | :negative_infinity | :nan,
field18515: float | :infinity | :negative_infinity | :nan,
field18516: float | :infinity | :negative_infinity | :nan,
field18517: float | :infinity | :negative_infinity | :nan,
field18518: integer,
field18519: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field18520: integer,
field18521: integer,
field18522: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18523: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18524: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18525: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18526: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18527: integer,
field18528: integer,
field18529: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18530: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18531: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18532: non_neg_integer,
field18533: integer,
field18534: integer,
field18535: integer,
field18536: non_neg_integer,
field18537: non_neg_integer,
field18538: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18539: integer,
field18540: integer,
field18541: Benchmarks.GoogleMessage3.Message16816.t() | nil,
field18542: Benchmarks.GoogleMessage3.Message16685.t() | nil,
field18543: integer,
field18544: integer,
field18545: integer,
field18546: integer,
field18547: integer,
field18548: integer,
field18549: float | :infinity | :negative_infinity | :nan,
field18550: Benchmarks.GoogleMessage3.Message0.t() | nil,
field18551: [integer],
field18552: integer,
field18553: [non_neg_integer],
field18554: integer,
field18555: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18556: boolean,
field18557: non_neg_integer,
field18558: integer,
field18559: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18560: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18561: integer,
field18562: [non_neg_integer],
field18563: [String.t()],
field18564: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18565: integer,
field18566: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18567: integer,
field18568: non_neg_integer,
field18569: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18570: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18571: non_neg_integer,
field18572: non_neg_integer,
field18573: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18574: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18575: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18576: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18577: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18578: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field18579: integer,
field18580: float | :infinity | :negative_infinity | :nan,
field18581: boolean,
__pb_extensions__: map
}
defstruct [
:field18478,
:field18479,
:field18480,
:field18481,
:field18482,
:field18483,
:field18484,
:field18485,
:field18486,
:field18487,
:field18488,
:field18489,
:field18490,
:field18491,
:field18492,
:field18493,
:field18494,
:field18495,
:field18496,
:field18497,
:field18498,
:field18499,
:field18500,
:field18501,
:field18502,
:field18503,
:field18504,
:field18505,
:field18506,
:field18507,
:field18508,
:field18509,
:field18510,
:field18511,
:field18512,
:field18513,
:field18514,
:field18515,
:field18516,
:field18517,
:field18518,
:field18519,
:field18520,
:field18521,
:field18522,
:field18523,
:field18524,
:field18525,
:field18526,
:field18527,
:field18528,
:field18529,
:field18530,
:field18531,
:field18532,
:field18533,
:field18534,
:field18535,
:field18536,
:field18537,
:field18538,
:field18539,
:field18540,
:field18541,
:field18542,
:field18543,
:field18544,
:field18545,
:field18546,
:field18547,
:field18548,
:field18549,
:field18550,
:field18551,
:field18552,
:field18553,
:field18554,
:field18555,
:field18556,
:field18557,
:field18558,
:field18559,
:field18560,
:field18561,
:field18562,
:field18563,
:field18564,
:field18565,
:field18566,
:field18567,
:field18568,
:field18569,
:field18570,
:field18571,
:field18572,
:field18573,
:field18574,
:field18575,
:field18576,
:field18577,
:field18578,
:field18579,
:field18580,
:field18581,
:__pb_extensions__
]
field :field18478, 1, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18479, 4, optional: true, type: :int32
field :field18480, 106, optional: true, type: :int32
field :field18481, 107, optional: true, type: :int32
field :field18482, 108, optional: true, type: :int32
field :field18483, 109, optional: true, type: :int32
field :field18484, 105, optional: true, type: :int32
field :field18485, 113, optional: true, type: :int32
field :field18486, 114, optional: true, type: :int32
field :field18487, 124, optional: true, type: :int32
field :field18488, 125, optional: true, type: :int32
field :field18489, 128, optional: true, type: :int32
field :field18490, 135, optional: true, type: :int32
field :field18491, 166, optional: true, type: :bool
field :field18492, 136, optional: true, type: :bool
field :field18493, 140, optional: true, type: :int32
field :field18494, 171, optional: true, type: :int32
field :field18495, 148, optional: true, type: :int32
field :field18496, 145, optional: true, type: :int32
field :field18497, 117, optional: true, type: :float
field :field18498, 146, optional: true, type: :int32
field :field18499, 3, optional: true, type: :string
field :field18500, 5, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18501, 6, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18502, 9, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18503, 155, optional: true, type: Benchmarks.GoogleMessage3.Message18253
field :field18504, 184, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18505, 163, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18506, 16, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18507, 20, repeated: true, type: :int32
field :field18508, 7, repeated: true, type: :int32
field :field18509, 194, repeated: true, type: :string
field :field18510, 30, optional: true, type: :bytes
field :field18511, 31, optional: true, type: :int32
field :field18512, 178, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18513, 8, optional: true, type: :string
field :field18514, 2, optional: true, type: :float
field :field18515, 100, optional: true, type: :float
field :field18516, 101, optional: true, type: :float
field :field18517, 102, optional: true, type: :float
field :field18518, 103, optional: true, type: :int32
field :field18519, 104, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18520, 110, optional: true, type: :int32
field :field18521, 112, optional: true, type: :int32
field :field18522, 111, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18523, 115, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18524, 119, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18525, 127, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18526, 185, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18527, 120, optional: true, type: :int32
field :field18528, 132, optional: true, type: :int32
field :field18529, 126, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18530, 129, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18531, 131, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18532, 150, optional: true, type: :fixed64
field :field18533, 133, optional: true, type: :int32
field :field18534, 134, optional: true, type: :int32
field :field18535, 139, optional: true, type: :int32
field :field18536, 137, optional: true, type: :fixed64
field :field18537, 138, optional: true, type: :fixed64
field :field18538, 141, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18539, 142, optional: true, type: :int32
field :field18540, 181, optional: true, type: :int32
field :field18541, 143, optional: true, type: Benchmarks.GoogleMessage3.Message16816
field :field18542, 154, optional: true, type: Benchmarks.GoogleMessage3.Message16685
field :field18543, 144, optional: true, type: :int32
field :field18544, 147, optional: true, type: :int64
field :field18545, 149, optional: true, type: :int64
field :field18546, 151, optional: true, type: :int32
field :field18547, 152, optional: true, type: :int32
field :field18548, 153, optional: true, type: :int32
field :field18549, 161, optional: true, type: :float
field :field18550, 123, optional: true, type: Benchmarks.GoogleMessage3.Message0
field :field18551, 156, repeated: true, type: :int64
field :field18552, 157, optional: true, type: :int32
field :field18553, 188, repeated: true, type: :fixed64
field :field18554, 158, optional: true, type: :int32
field :field18555, 159, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18556, 160, optional: true, type: :bool
field :field18557, 162, optional: true, type: :uint64
field :field18558, 164, optional: true, type: :int32
field :field18559, 10, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18560, 167, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18561, 168, optional: true, type: :int32
field :field18562, 169, repeated: true, type: :fixed64
field :field18563, 170, repeated: true, type: :string
field :field18564, 172, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18565, 173, optional: true, type: :int64
field :field18566, 174, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18567, 175, optional: true, type: :int64
field :field18568, 189, optional: true, type: :uint32
field :field18569, 176, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18570, 177, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18571, 179, optional: true, type: :uint32
field :field18572, 180, optional: true, type: :uint32
field :field18573, 182, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18574, 183, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18575, 121, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18576, 186, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18577, 187, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18578, 190, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18579, 191, optional: true, type: :int32
field :field18580, 192, optional: true, type: :float
field :field18581, 193, optional: true, type: :bool
extensions [{116, 117}, {118, 119}, {130, 131}, {165, 166}]
end
defmodule Benchmarks.GoogleMessage3.Message13169 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field13223: [Benchmarks.GoogleMessage3.Message13168.t()],
field13224: Benchmarks.GoogleMessage3.Message13167.t() | nil,
field13225: String.t()
}
defstruct [:field13223, :field13224, :field13225]
field :field13223, 1, repeated: true, type: Benchmarks.GoogleMessage3.Message13168
field :field13224, 2, required: true, type: Benchmarks.GoogleMessage3.Message13167
field :field13225, 3, optional: true, type: :string
end
defmodule Benchmarks.GoogleMessage3.Message19255 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field19257: String.t()
}
defstruct [:field19257]
field :field19257, 1, optional: true, type: :string
end
defmodule Benchmarks.GoogleMessage3.Message35542 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field35543: boolean,
field35544: boolean,
field35545: boolean
}
defstruct [:field35543, :field35544, :field35545]
field :field35543, 1, optional: true, type: :bool
field :field35544, 2, optional: true, type: :bool
field :field35545, 3, optional: true, type: :bool
end
defmodule Benchmarks.GoogleMessage3.Message3901 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field3990: integer,
field3991: integer,
field3992: integer,
field3993: integer,
field3994: integer,
field3995: integer,
field3996: integer,
field3997: integer,
field3998: integer,
field3999: integer,
field4000: Benchmarks.GoogleMessage3.UnusedEnum.t(),
field4001: integer
}
defstruct [
:field3990,
:field3991,
:field3992,
:field3993,
:field3994,
:field3995,
:field3996,
:field3997,
:field3998,
:field3999,
:field4000,
:field4001
]
field :field3990, 1, optional: true, type: :int32
field :field3991, 2, optional: true, type: :int32
field :field3992, 3, optional: true, type: :int32
field :field3993, 4, optional: true, type: :int32
field :field3994, 7, optional: true, type: :int32
field :field3995, 8, optional: true, type: :int32
field :field3996, 9, optional: true, type: :int32
field :field3997, 10, optional: true, type: :int32
field :field3998, 11, optional: true, type: :int32
field :field3999, 12, optional: true, type: :int32
field :field4000, 6, optional: true, type: Benchmarks.GoogleMessage3.UnusedEnum, enum: true
field :field4001, 5, optional: true, type: :int32
end
defmodule Benchmarks.GoogleMessage3.PbExtension do
@moduledoc false
use Protobuf, syntax: :proto2
extend Benchmarks.GoogleMessage3.Message0, :"Message34621.field34669", 17_562_023,
optional: true,
type: Benchmarks.GoogleMessage3.Message34621
end
|
bench/lib/datasets/google_message3/benchmark_message3_2.pb.ex
| 0.768081 | 0.481881 |
benchmark_message3_2.pb.ex
|
starcoder
|
defmodule DefUnitExample do
@moduledoc """
SI Units using watts and watt hours for energy
Includes constants useful for aerodynamics
This is an example of the use of DefUnit
"""
use DefUnit
@doc_from_operator ~S"""
Convert from other units to core units used for calculations.
If a core unit is specified do nothing, however Dialyzer can
still use this as a type hint.
## Example
```
iex> import DefUnitExample
iex> 12 <~ :feet
3.6576000000000004
iex> 12 <~ :feet2
1.114836
iex> 65 <~ :knots
33.43886
iex> 60 <~ :mph
26.8224
iex> 100 <~ :kmh
27.777
iex> 100 <~ :m
100
iex> 0 <~ :f
-17.77777777777778
iex> 100 <~ :kmh ~> :mph
62.13537938439514
```
"""
@doc_to_operator ~S"""
Convert from core units used for calculations to other units.
If a core unit is specified do nothing, however Dialyzer can
still use this as a type hint.
## Example
```
iex> import DefUnitExample
iex> 3.6576000000000004 ~> :feet
12.0
iex> 1.114836 ~> :feet2
12.0
iex> 33.43886 ~> :knots
65.0
iex> 10 ~> :mph
22.369362920544024
iex> 10 ~> :kmh
36.00100802822479
iex> 10 ~> :m
10
iex>-17.77777777777778 ~> :f
0.0
iex> 100 <~ :kmh ~> :mph
62.13537938439514
```
"""
# Units we do our calculations in
DefUnit.core "m", :m, "SI length"
DefUnit.core "kg", :kg, "SI mass"
DefUnit.core "s", :s, "Time"
DefUnit.core "m^2", :m2, "SI area"
DefUnit.core "m^3", :m3, "SI volume"
DefUnit.core "kgm^{-3}", :kgm3, "SI density"
DefUnit.core "ms^{-1}", :ms, "SI velocity"
DefUnit.core "ms^{-2}", :ms2, "SI acceleration"
DefUnit.core "kgms^{-2}",:n, "SI force (Newtons)"
DefUnit.core "Nm^{-2}", :nm2, "SI pressure"
DefUnit.core "W", :w, "SI energy use rate (Watt-hours $Js^{-1}$)"
DefUnit.core "Wh", :wh, "SI energy (Watts)"
DefUnit.core "C", :c, "Temperature in Celcius"
# Dimensionless coefficients, still treated as core units
DefUnit.core "C_l", :cl, "Coefficient of lift"
DefUnit.core "C_d", :cd, "Coefficient of drag"
DefUnit.core "RN", :rn, "Reynold's Number"
DefUnit.core "E", :e, "Efficiency"
# Units we convert to and from core units
DefUnit.other "feet", :feet, 0.3048, :m, "FPS length and altitude"
DefUnit.other "lbs", :lbs, 0.453592, :kg, "FPS mass"
DefUnit.other "feet^2", :feet2, 0.092903, :m2, "FPS area"
DefUnit.other "feet^3", :feet3, 0.0283168,:m3, "FPS volume"
DefUnit.other "L", :l, 0.001, :m3, "SI litre"
DefUnit.other "kmh^{-1}", :kmh, 0.27777, :ms, "SI velocity"
DefUnit.other "mph", :mph, 0.44704, :ms, "FPS velocity"
DefUnit.other "knots", :knots, 0.514444, :ms, "Nautical miles per hour"
DefUnit.other "minutes", :min, 60, :s, "Minute"
DefUnit.other "hours", :hours, 3_600, :s, "Hour"
DefUnit.other "G_{earth}",:gearth, 9.81, :ms2, "Earth acc. due to gravity"
DefUnit.other "hp", :hp, 745.7, :w, "Horsepower"
# Units with more complex from/to conversion calculations
DefUnit.other "F", :f,
{
&((&1 - 32.0) * (5.0 / 9.0)),
&((&1 * (9.0 / 5.0)) + 32.0)
},
:c, "Temperature in Farhrenheit"
end
|
test/support/def_unit_example.ex
| 0.881251 | 0.844216 |
def_unit_example.ex
|
starcoder
|
defmodule Premailex.Util do
@moduledoc """
Module that contains utility functions.
"""
@type html_tree :: Premailex.HTMLParser.html_tree()
@type needle :: binary | html_tree
@doc """
Traverses tree searching for needle, and will call provided function on
any occurances.
If the function returns {:halt, any}, traverse will stop, and result will
be {:halt, html_tree}.
## Examples
iex> Premailex.Util.traverse({"div", [], [{"p", [], ["First paragraph"]}, {"p", [], ["Second paragraph"]}]}, "p", fn {name, attrs, _children} -> {name, attrs, ["Updated"]} end)
{"div", [], [{"p", [], ["Updated"]}, {"p", [], ["Updated"]}]}
iex> Premailex.Util.traverse({"div", [], [{"p", [], ["First paragraph"]}, {"p", [], ["Second paragraph"]}]}, {"p", [], ["Second paragraph"]}, fn {name, attrs, _children} -> {name, attrs, ["Updated"]} end)
{"div", [], [{"p", [], ["First paragraph"]}, {"p", [], ["Updated"]}]}
"""
@spec traverse(html_tree, needle, function) :: html_tree | {:halt, html_tree}
def traverse(html, needles, fun) when is_list(needles),
do: Enum.reduce(needles, html, &traverse(&2, &1, fun))
def traverse(children, needle, fun) when is_list(children) do
children
|> Enum.map_reduce(:ok, &maybe_traverse({&1, needle, fun}, &2))
|> case do
{children, :halt} -> {:halt, children}
{children, :ok} -> children
end
end
def traverse(text, _, _) when is_binary(text), do: text
def traverse({name, attrs, children} = element, needle, fun) do
cond do
needle == name -> fun.(element)
needle == element -> fun.(element)
true -> handle_traversed({name, attrs, children}, needle, fun)
end
end
def traverse({:comment, "[if " <> _rest} = comment, _, _), do: comment
def traverse({:comment, "<![endif]" <> _rest} = comment, _, _), do: comment
def traverse({:comment, _}, _, _), do: ""
def traverse(element, _, _), do: element
defp maybe_traverse({element, needle, fun}, :ok) do
case traverse(element, needle, fun) do
{:halt, children} -> {children, :halt}
children -> {children, :ok}
end
end
defp maybe_traverse({element, _needle, _fun}, :halt), do: {element, :halt}
defp handle_traversed({name, attrs, children}, needle, fun) do
case traverse(children, needle, fun) do
{:halt, children} -> {:halt, {name, attrs, children}}
children -> {name, attrs, children}
end
end
@doc """
Traverse all trees in array searching for needle, and will call function with
element and number times needle has been found so far.
## Examples
iex> Premailex.Util.traverse_reduce([{"p", [], ["First paragraph"]}, {"p", [], ["Second paragraph"]}], "p", fn({name, attrs, _children}, acc) -> {name, attrs, ["Updated " <> to_string(acc)]} end)
{[{"p", [], ["Updated 0"]}, {"p", [], ["Updated 1"]}], 2}
"""
@spec traverse_reduce(list, needle, function) :: {html_tree, integer}
def traverse_reduce(children, needle, fun) when is_list(children),
do:
Enum.map_reduce(
children,
0,
&{traverse(&1, needle, fn element -> fun.(element, &2) end), &2 + 1}
)
@doc """
Traverses tree until first match for needle.
## Examples
iex> Premailex.Util.traverse_until_first({"div", [], [{"p", [], ["First paragraph"]}, {"p", [], ["Second paragraph"]}]}, "p", fn {name, attrs, _children} -> {name, attrs, ["Updated"]} end)
{"div", [], [{"p", [], ["Updated"]}, {"p", [], ["Second paragraph"]}]}
"""
@spec traverse_until_first(html_tree, needle, function) :: html_tree
def traverse_until_first(html, needle, fun) do
case traverse(html, needle, &{:halt, fun.(&1)}) do
{:halt, html} -> html
html -> html
end
end
end
|
lib/premailex/util.ex
| 0.797083 | 0.563948 |
util.ex
|
starcoder
|
defmodule Dawdle do
@moduledoc """
API for the Dawdle messaging system.
"""
@type argument :: any()
@type callback :: (argument() -> any())
@type duration :: non_neg_integer()
@type event :: struct()
@type handler :: module()
@doc """
Signals an event.
The event is encoded and enqueued and will be processed by a handler running
on a node running the Dawdle listener. See `Dawdle.Handler` for information
on creating event handlers.
Use the `:delay` option to delay the signaling of the event.
Returns `:ok` when the event is successfully enqueued. Otherwise, returns
an error tuple.
## Examples
```
defmodule MyApp.TestEvent do
defstruct :foo, :bar
end
Dawdle.signal(%MyApp.TestEvent{foo: 1, bar: 2})
Dawdle.signal(%MyApp.TestEvent{foo: 1, bar: 2}, delay: 5)
```
"""
@spec signal(event(), Keyword.t()) :: :ok | {:error, term()}
defdelegate signal(event, opts \\ []), to: Dawdle.Client
@doc """
Registers all known event handlers.
Dawdle searches through all loaded modules for any that implement the
`Dawdle.Handler` behaviour and registers them. This is automatically called
when the `:dawdle` application starts.
"""
@spec register_all_handlers() :: :ok
defdelegate register_all_handlers, to: Dawdle.Client
@doc """
Registers an event handler.
After calling this function, the next time the specified event occurs, then
the handler function will be called with data from that event.
"""
@spec register_handler(handler(), Keyword.t()) :: :ok | {:error, term()}
defdelegate register_handler(handler, opts \\ []), to: Dawdle.Client
@doc """
Unregisters an event handler.
"""
@spec unregister_handler(handler()) :: :ok
defdelegate unregister_handler(handler), to: Dawdle.Client
@doc """
Returns the total number of subscribers.
"""
@spec handler_count :: non_neg_integer()
defdelegate handler_count, to: Dawdle.Client
@doc """
Returns the number of subscribers to a specific event.
"""
@spec handler_count(event()) :: non_neg_integer()
defdelegate handler_count(event), to: Dawdle.Client
@doc """
Starts the pollers if they were not started automatically at application
startup.
"""
@spec start_pollers :: :ok
defdelegate start_pollers, to: Dawdle.Client
@doc """
Stops any running pollers.
"""
@spec stop_pollers :: :ok
defdelegate stop_pollers, to: Dawdle.Client
# Experimental API
# This may be extracted into a separate library in the future
@doc """
Send a function to be executed.
Note that this is an experimental API.
The function is encoded and enqueued and will be executed on a node running
the Dawdle listener. Even if the listener is running on the current node,
the function may still be executed on another node.
The passed function is evaluated for its side effects and any return value
is ignored.
Returns `:ok` when the function is successfully enqueued. Otherwise, returns
an error tuple.
## Examples
```
iex> Dawdle.call(fn ->
...> # Do something expensive...
...> :ok
...> end)
:ok
```
"""
@spec call(fun()) :: :ok | {:error, term()}
defdelegate call(fun), to: Dawdle.Delay.Handler
@doc """
Send a function to be executed after a delay.
Note that this is an experimental API.
The function is encoded and enqueued and will be executed on a node running
the Dawdle listener after the specified delay. Even if the listener is
running on the current node, the function may still be executed on another
node.
The passed function is evaluated for its side effects and any return value
is ignored.
Returns `:ok` when the function is successfully enqueued. Otherwise, returns
an error tuple.
## Examples
```
iex> Dawdle.call_after(5, fn ->
...> # Do something later...
...> :ok
...> end)
:ok
```
"""
@spec call_after(duration(), fun()) :: :ok | {:error, term()}
defdelegate call_after(delay, fun), to: Dawdle.Delay.Handler
end
|
lib/dawdle.ex
| 0.886608 | 0.802826 |
dawdle.ex
|
starcoder
|
defmodule TelemetryMetricsTelegraf.Telegraf.ConfigAdviser do
@moduledoc """
Generates telegraf aggregators config from `Telemetry.Metrics` definitions.
* `Telemetry.Metrics.Distribution` - [histogram](https://github.com/influxdata/telegraf/tree/master/plugins/aggregators/histogram).
* `Telemetry.Metrics.LastValue` - [final](https://github.com/influxdata/telegraf/tree/master/plugins/aggregators/final).
* `Telemetry.Metrics.Summary` - [basicstats](https://github.com/influxdata/telegraf/tree/master/plugins/aggregators/basicstats). A list of stats can be configured via `:summary_stats` option.
* `Telemetry.Metrics.Sum` - [basicstats](https://github.com/influxdata/telegraf/tree/master/plugins/aggregators/basicstats) with `stats = ["sum"]`.
* `Telemetry.Metrics.Counter` - [basicstats](https://github.com/influxdata/telegraf/tree/master/plugins/aggregators/basicstats) with `stats = ["count"]`.
## Usage
TelemetryMetricsTelegraf.Telegraf.ConfigAdviser.render(MyAppWeb.Telemetry.metrics(), [])
"""
alias TelemetryMetricsTelegraf.Telegraf.ConfigTemplates
import TelemetryMetricsTelegraf.AppConfig, only: [app_config: 0]
import TelemetryMetricsTelegraf.Utils,
only: [fetch_option!: 2, fetch_options!: 2, measurement_name: 1]
@spec render([Telemetry.Metrics.t()], keyword()) :: String.t()
@doc """
Renders telegraf aggregations config from `Telemetry.Metrics` definitions list.
TelemetryMetricsTelegraf.Telegraf.ConfigAdviser.render(MyAppWeb.Telemetry.metrics(), [])
See `TelemetryMetricsTelegraf.AppConfig` for a list of supported options.
"""
def render(metrics, opts) do
metrics
|> Enum.group_by(fn m -> m.__struct__ end)
|> Enum.flat_map(fn {metric_type, metrics} -> render_group(metric_type, metrics, opts) end)
|> Enum.join("\n")
end
defp render_group(Telemetry.Metrics.Summary, metrics, opts) do
for {period, measurements} <- group_measurements_by_period(metrics, opts) do
ConfigTemplates.basicstats_aggeregator(measurements,
period: period,
stats: fetch_option!(:summary_stats, [opts, app_config()])
)
end
end
defp render_group(Telemetry.Metrics.Counter, metrics, opts) do
for {period, measurements} <- group_measurements_by_period(metrics, opts) do
ConfigTemplates.basicstats_aggeregator(measurements, period: period, stats: [:count])
end
end
defp render_group(Telemetry.Metrics.Sum, metrics, opts) do
for {period, measurements} <- group_measurements_by_period(metrics, opts) do
ConfigTemplates.basicstats_aggeregator(measurements, period: period, stats: [:sum])
end
end
defp render_group(Telemetry.Metrics.LastValue, metrics, opts) do
for {period, measurements} <- group_measurements_by_period(metrics, opts) do
ConfigTemplates.final_aggeregator(measurements, period: period)
end
end
defp render_group(Telemetry.Metrics.Distribution, metrics, global_opts) do
options_keys = [:period, :histogram_reset, :histogram_cumulative]
metrics
|> Enum.group_by(&Keyword.take(&1.reporter_options, options_keys))
|> Enum.map(fn {repoter_opts, metrics} ->
histogram_opts = fetch_options!(options_keys, [repoter_opts, global_opts, app_config()])
metrics
|> Enum.map(&{measurement_name(&1), &1.buckets})
|> Enum.uniq()
|> ConfigTemplates.histogram_aggregator(histogram_opts)
end)
end
defp render_group(metric_type, metrics, opts) do
ConfigTemplates.unknown_metric_type(metric_type, metrics, opts)
end
defp group_measurements_by_period(metrics, opts) do
metrics
|> Enum.group_by(&fetch_option!(:period, [&1.reporter_options, opts, app_config()]))
|> Enum.into(%{}, fn {period, metrics} ->
measurements = metrics |> Enum.map(&measurement_name/1) |> Enum.uniq()
{period, measurements}
end)
end
end
|
lib/telemetry_metrics_telegraf/telegraf/config_adviser.ex
| 0.880727 | 0.64961 |
config_adviser.ex
|
starcoder
|
defmodule Mix.Tasks.Compile.Elixir do
use Mix.Task.Compiler
@recursive true
@manifest "compile.elixir"
@moduledoc """
Compiles Elixir source files.
Elixir is smart enough to recompile only files that have changed
and their dependencies. This means if `lib/a.ex` is invoking
a function defined over `lib/b.ex`, whenever `lib/b.ex` changes,
`lib/a.ex` is also recompiled.
Note it is important to recompile a file's dependencies as
there are often compile time dependencies between them.
## `__mix_recompile__?/0`
A module may export a `__mix_recompile__?/0` function that can
cause the module to be recompiled using custom rules. For example,
`@external_resource` already adds a compile-time dependency on an
external file, however to depend on a _dynamic_ list of files we
can do:
defmodule MyModule do
paths = Path.wildcard("*.txt")
paths_hash = :erlang.md5(paths)
for path <- paths do
@external_resource path
end
def __mix_recompile__?() do
Path.wildcard("*.txt") |> :erlang.md5() != unquote(paths_hash)
end
end
Compiler calls `__mix_recompile__?/0` for every module being
compiled (or previously compiled) and thus it is very important
to do there as little work as possible to not slow down the
compilation.
If module has `@compile {:autoload, false}`, `__mix_recompile__?/0` will
not be used.
## Command line options
* `--verbose` - prints each file being compiled
* `--force` - forces compilation regardless of modification times
* `--docs` (`--no-docs`) - attaches (or not) documentation to compiled modules
* `--debug-info` (`--no-debug-info`) - attaches (or not) debug info to compiled modules
* `--ignore-module-conflict` - does not emit warnings if a module was previously defined
* `--warnings-as-errors` - treats warnings in the current project as errors and
return a non-zero exit code
* `--long-compilation-threshold N` - sets the "long compilation" threshold
(in seconds) to `N` (see the docs for `Kernel.ParallelCompiler.compile/2`)
* `--profile` - if set to `time`, outputs timing information of compilation steps
* `--all-warnings` - prints warnings even from files that do not need to be recompiled
* `--tracer` - adds a compiler tracer in addition to any specified in the `mix.exs` file
## Configuration
* `:elixirc_paths` - directories to find source files.
Defaults to `["lib"]`.
* `:elixirc_options` - compilation options that apply to Elixir's compiler.
See `Code.put_compiler_option/2` for a complete list of options. These
options are often overridable from the command line using the switches
above.
* `[xref: [exclude: ...]]` - a list of `module` or `{module, function, arity}`
that should not be warned on in case on undefined modules or undefined
application warnings.
"""
@switches [
force: :boolean,
docs: :boolean,
warnings_as_errors: :boolean,
ignore_module_conflict: :boolean,
debug_info: :boolean,
verbose: :boolean,
long_compilation_threshold: :integer,
profile: :string,
all_warnings: :boolean,
tracer: :keep
]
@impl true
def run(args) do
{opts, _, _} = OptionParser.parse(args, switches: @switches)
project = Mix.Project.config()
dest = Mix.Project.compile_path(project)
srcs = project[:elixirc_paths]
unless is_list(srcs) do
Mix.raise(":elixirc_paths should be a list of paths, got: #{inspect(srcs)}")
end
manifest = manifest()
configs = [Mix.Project.config_mtime() | Mix.Tasks.Compile.Erlang.manifests()]
force = opts[:force] || Mix.Utils.stale?(configs, [manifest])
opts =
(project[:elixirc_options] || [])
|> Keyword.merge(opts)
|> xref_exclude_opts(project)
|> tracers_opts()
|> profile_opts()
Mix.Compilers.Elixir.compile(manifest, srcs, dest, [:ex], force, opts)
end
@impl true
def manifests, do: [manifest()]
defp manifest, do: Path.join(Mix.Project.manifest_path(), @manifest)
@impl true
def clean do
dest = Mix.Project.compile_path()
Mix.Compilers.Elixir.clean(manifest(), dest)
end
defp xref_exclude_opts(opts, project) do
exclude = List.wrap(project[:xref][:exclude])
if exclude == [] do
opts
else
Keyword.update(opts, :no_warn_undefined, exclude, &(List.wrap(&1) ++ exclude))
end
end
defp tracers_opts(opts) do
case Keyword.pop_values(opts, :tracer) do
{[], opts} ->
opts
{tracers, opts} ->
tracers = Enum.map(tracers, &Module.concat([&1]))
Keyword.update(opts, :tracers, tracers, &(tracers ++ &1))
end
end
defp profile_opts(opts) do
case Keyword.fetch(opts, :profile) do
{:ok, "time"} -> Keyword.put(opts, :profile, :time)
{:ok, _} -> Keyword.delete(opts, :profile)
:error -> opts
end
end
end
|
lib/mix/lib/mix/tasks/compile.elixir.ex
| 0.814385 | 0.400339 |
compile.elixir.ex
|
starcoder
|
defmodule AWS.Kinesis do
@moduledoc """
Amazon Kinesis Data Streams Service API Reference
Amazon Kinesis Data Streams is a managed service that scales elastically for
real-time processing of streaming big data.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: "Kinesis",
api_version: "2013-12-02",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "kinesis",
global?: false,
protocol: "json",
service_id: "Kinesis",
signature_version: "v4",
signing_name: "kinesis",
target_prefix: "Kinesis_20131202"
}
end
@doc """
Adds or updates tags for the specified Kinesis data stream.
Each time you invoke this operation, you can specify up to 10 tags. If you want
to add more than 10 tags to your stream, you can invoke this operation multiple
times. In total, each stream can have up to 50 tags.
If tags have already been assigned to the stream, `AddTagsToStream` overwrites
any existing tags that correspond to the specified tag keys.
`AddTagsToStream` has a limit of five transactions per second per account.
"""
def add_tags_to_stream(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AddTagsToStream", input, options)
end
@doc """
Creates a Kinesis data stream.
A stream captures and transports data records that are continuously emitted from
different data sources or *producers*. Scale-out within a stream is explicitly
supported by means of shards, which are uniquely identified groups of data
records in a stream.
You specify and control the number of shards that a stream is composed of. Each
shard can support reads up to five transactions per second, up to a maximum data
read total of 2 MiB per second. Each shard can support writes up to 1,000
records per second, up to a maximum data write total of 1 MiB per second. If the
amount of data input increases or decreases, you can add or remove shards.
The stream name identifies the stream. The name is scoped to the AWS account
used by the application. It is also scoped by AWS Region. That is, two streams
in two different accounts can have the same name, and two streams in the same
account, but in two different Regions, can have the same name.
`CreateStream` is an asynchronous operation. Upon receiving a `CreateStream`
request, Kinesis Data Streams immediately returns and sets the stream status to
`CREATING`. After the stream is created, Kinesis Data Streams sets the stream
status to `ACTIVE`. You should perform read and write operations only on an
`ACTIVE` stream.
You receive a `LimitExceededException` when making a `CreateStream` request when
you try to do one of the following:
* Have more than five streams in the `CREATING` state at any point
in time.
* Create more shards than are authorized for your account.
For the default shard limit for an AWS account, see [Amazon Kinesis Data Streams Limits](https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html)
in the *Amazon Kinesis Data Streams Developer Guide*. To increase this limit,
[contact AWS Support](https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html).
You can use `DescribeStream` to check the stream status, which is returned in
`StreamStatus`.
`CreateStream` has a limit of five transactions per second per account.
"""
def create_stream(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateStream", input, options)
end
@doc """
Decreases the Kinesis data stream's retention period, which is the length of
time data records are accessible after they are added to the stream.
The minimum value of a stream's retention period is 24 hours.
This operation may result in lost data. For example, if the stream's retention
period is 48 hours and is decreased to 24 hours, any data already in the stream
that is older than 24 hours is inaccessible.
"""
def decrease_stream_retention_period(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DecreaseStreamRetentionPeriod", input, options)
end
@doc """
Deletes a Kinesis data stream and all its shards and data.
You must shut down any applications that are operating on the stream before you
delete the stream. If an application attempts to operate on a deleted stream, it
receives the exception `ResourceNotFoundException`.
If the stream is in the `ACTIVE` state, you can delete it. After a
`DeleteStream` request, the specified stream is in the `DELETING` state until
Kinesis Data Streams completes the deletion.
**Note:** Kinesis Data Streams might continue to accept data read and write
operations, such as `PutRecord`, `PutRecords`, and `GetRecords`, on a stream in
the `DELETING` state until the stream deletion is complete.
When you delete a stream, any shards in that stream are also deleted, and any
tags are dissociated from the stream.
You can use the `DescribeStream` operation to check the state of the stream,
which is returned in `StreamStatus`.
`DeleteStream` has a limit of five transactions per second per account.
"""
def delete_stream(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteStream", input, options)
end
@doc """
To deregister a consumer, provide its ARN.
Alternatively, you can provide the ARN of the data stream and the name you gave
the consumer when you registered it. You may also provide all three parameters,
as long as they don't conflict with each other. If you don't know the name or
ARN of the consumer that you want to deregister, you can use the
`ListStreamConsumers` operation to get a list of the descriptions of all the
consumers that are currently registered with a given data stream. The
description of a consumer contains its name and ARN.
This operation has a limit of five transactions per second per stream.
"""
def deregister_stream_consumer(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeregisterStreamConsumer", input, options)
end
@doc """
Describes the shard limits and usage for the account.
If you update your account limits, the old limits might be returned for a few
minutes.
This operation has a limit of one transaction per second per account.
"""
def describe_limits(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeLimits", input, options)
end
@doc """
Describes the specified Kinesis data stream.
The information returned includes the stream name, Amazon Resource Name (ARN),
creation time, enhanced metric configuration, and shard map. The shard map is an
array of shard objects. For each shard object, there is the hash key and
sequence number ranges that the shard spans, and the IDs of any earlier shards
that played in a role in creating the shard. Every record ingested in the stream
is identified by a sequence number, which is assigned when the record is put
into the stream.
You can limit the number of shards returned by each call. For more information,
see [Retrieving Shards from a Stream](https://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-retrieve-shards.html)
in the *Amazon Kinesis Data Streams Developer Guide*.
There are no guarantees about the chronological order shards returned. To
process shards in chronological order, use the ID of the parent shard to track
the lineage to the oldest shard.
This operation has a limit of 10 transactions per second per account.
"""
def describe_stream(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeStream", input, options)
end
@doc """
To get the description of a registered consumer, provide the ARN of the
consumer.
Alternatively, you can provide the ARN of the data stream and the name you gave
the consumer when you registered it. You may also provide all three parameters,
as long as they don't conflict with each other. If you don't know the name or
ARN of the consumer that you want to describe, you can use the
`ListStreamConsumers` operation to get a list of the descriptions of all the
consumers that are currently registered with a given data stream.
This operation has a limit of 20 transactions per second per stream.
"""
def describe_stream_consumer(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeStreamConsumer", input, options)
end
@doc """
Provides a summarized description of the specified Kinesis data stream without
the shard list.
The information returned includes the stream name, Amazon Resource Name (ARN),
status, record retention period, approximate creation time, monitoring,
encryption details, and open shard count.
`DescribeStreamSummary` has a limit of 20 transactions per second per account.
"""
def describe_stream_summary(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeStreamSummary", input, options)
end
@doc """
Disables enhanced monitoring.
"""
def disable_enhanced_monitoring(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisableEnhancedMonitoring", input, options)
end
@doc """
Enables enhanced Kinesis data stream monitoring for shard-level metrics.
"""
def enable_enhanced_monitoring(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "EnableEnhancedMonitoring", input, options)
end
@doc """
Gets data records from a Kinesis data stream's shard.
Specify a shard iterator using the `ShardIterator` parameter. The shard iterator
specifies the position in the shard from which you want to start reading data
records sequentially. If there are no records available in the portion of the
shard that the iterator points to, `GetRecords` returns an empty list. It might
take multiple calls to get to a portion of the shard that contains records.
You can scale by provisioning multiple shards per stream while considering
service limits (for more information, see [Amazon Kinesis Data Streams Limits](https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html)
in the *Amazon Kinesis Data Streams Developer Guide*). Your application should
have one thread per shard, each reading continuously from its stream. To read
from a stream continually, call `GetRecords` in a loop. Use `GetShardIterator`
to get the shard iterator to specify in the first `GetRecords` call.
`GetRecords` returns a new shard iterator in `NextShardIterator`. Specify the
shard iterator returned in `NextShardIterator` in subsequent calls to
`GetRecords`. If the shard has been closed, the shard iterator can't return more
data and `GetRecords` returns `null` in `NextShardIterator`. You can terminate
the loop when the shard is closed, or when the shard iterator reaches the record
with the sequence number or other attribute that marks it as the last record to
process.
Each data record can be up to 1 MiB in size, and each shard can read up to 2 MiB
per second. You can ensure that your calls don't exceed the maximum supported
size or throughput by using the `Limit` parameter to specify the maximum number
of records that `GetRecords` can return. Consider your average record size when
determining this limit. The maximum number of records that can be returned per
call is 10,000.
The size of the data returned by `GetRecords` varies depending on the
utilization of the shard. The maximum size of data that `GetRecords` can return
is 10 MiB. If a call returns this amount of data, subsequent calls made within
the next 5 seconds throw `ProvisionedThroughputExceededException`. If there is
insufficient provisioned throughput on the stream, subsequent calls made within
the next 1 second throw `ProvisionedThroughputExceededException`. `GetRecords`
doesn't return any data when it throws an exception. For this reason, we
recommend that you wait 1 second between calls to `GetRecords`. However, it's
possible that the application will get exceptions for longer than 1 second.
To detect whether the application is falling behind in processing, you can use
the `MillisBehindLatest` response attribute. You can also monitor the stream
using CloudWatch metrics and other mechanisms (see
[Monitoring](https://docs.aws.amazon.com/kinesis/latest/dev/monitoring.html) in
the *Amazon Kinesis Data Streams Developer Guide*).
Each Amazon Kinesis record includes a value, `ApproximateArrivalTimestamp`, that
is set when a stream successfully receives and stores a record. This is commonly
referred to as a server-side time stamp, whereas a client-side time stamp is set
when a data producer creates or sends the record to a stream (a data producer is
any data source putting data records into a stream, for example with
`PutRecords`). The time stamp has millisecond precision. There are no guarantees
about the time stamp accuracy, or that the time stamp is always increasing. For
example, records in a shard or across a stream might have time stamps that are
out of order.
This operation has a limit of five transactions per second per shard.
"""
def get_records(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetRecords", input, options)
end
@doc """
Gets an Amazon Kinesis shard iterator.
A shard iterator expires 5 minutes after it is returned to the requester.
A shard iterator specifies the shard position from which to start reading data
records sequentially. The position is specified using the sequence number of a
data record in a shard. A sequence number is the identifier associated with
every record ingested in the stream, and is assigned when a record is put into
the stream. Each stream has one or more shards.
You must specify the shard iterator type. For example, you can set the
`ShardIteratorType` parameter to read exactly from the position denoted by a
specific sequence number by using the `AT_SEQUENCE_NUMBER` shard iterator type.
Alternatively, the parameter can read right after the sequence number by using
the `AFTER_SEQUENCE_NUMBER` shard iterator type, using sequence numbers returned
by earlier calls to `PutRecord`, `PutRecords`, `GetRecords`, or
`DescribeStream`. In the request, you can specify the shard iterator type
`AT_TIMESTAMP` to read records from an arbitrary point in time, `TRIM_HORIZON`
to cause `ShardIterator` to point to the last untrimmed record in the shard in
the system (the oldest data record in the shard), or `LATEST` so that you always
read the most recent data in the shard.
When you read repeatedly from a stream, use a `GetShardIterator` request to get
the first shard iterator for use in your first `GetRecords` request and for
subsequent reads use the shard iterator returned by the `GetRecords` request in
`NextShardIterator`. A new shard iterator is returned by every `GetRecords`
request in `NextShardIterator`, which you use in the `ShardIterator` parameter
of the next `GetRecords` request.
If a `GetShardIterator` request is made too often, you receive a
`ProvisionedThroughputExceededException`. For more information about throughput
limits, see `GetRecords`, and [Streams Limits](https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html)
in the *Amazon Kinesis Data Streams Developer Guide*.
If the shard is closed, `GetShardIterator` returns a valid iterator for the last
sequence number of the shard. A shard can be closed as a result of using
`SplitShard` or `MergeShards`.
`GetShardIterator` has a limit of five transactions per second per account per
open shard.
"""
def get_shard_iterator(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetShardIterator", input, options)
end
@doc """
Increases the Kinesis data stream's retention period, which is the length of
time data records are accessible after they are added to the stream.
The maximum value of a stream's retention period is 168 hours (7 days).
If you choose a longer stream retention period, this operation increases the
time period during which records that have not yet expired are accessible.
However, it does not make previous, expired data (older than the stream's
previous retention period) accessible after the operation has been called. For
example, if a stream's retention period is set to 24 hours and is increased to
168 hours, any data that is older than 24 hours remains inaccessible to consumer
applications.
"""
def increase_stream_retention_period(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "IncreaseStreamRetentionPeriod", input, options)
end
@doc """
Lists the shards in a stream and provides information about each shard.
This operation has a limit of 100 transactions per second per data stream.
This API is a new operation that is used by the Amazon Kinesis Client Library
(KCL). If you have a fine-grained IAM policy that only allows specific
operations, you must update your policy to allow calls to this API. For more
information, see [Controlling Access to Amazon Kinesis Data Streams Resources Using
IAM](https://docs.aws.amazon.com/streams/latest/dev/controlling-access.html).
"""
def list_shards(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListShards", input, options)
end
@doc """
Lists the consumers registered to receive data from a stream using enhanced
fan-out, and provides information about each consumer.
This operation has a limit of 5 transactions per second per stream.
"""
def list_stream_consumers(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListStreamConsumers", input, options)
end
@doc """
Lists your Kinesis data streams.
The number of streams may be too large to return from a single call to
`ListStreams`. You can limit the number of returned streams using the `Limit`
parameter. If you do not specify a value for the `Limit` parameter, Kinesis Data
Streams uses the default limit, which is currently 10.
You can detect if there are more streams available to list by using the
`HasMoreStreams` flag from the returned output. If there are more streams
available, you can request more streams by using the name of the last stream
returned by the `ListStreams` request in the `ExclusiveStartStreamName`
parameter in a subsequent request to `ListStreams`. The group of stream names
returned by the subsequent request is then added to the list. You can continue
this process until all the stream names have been collected in the list.
`ListStreams` has a limit of five transactions per second per account.
"""
def list_streams(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListStreams", input, options)
end
@doc """
Lists the tags for the specified Kinesis data stream.
This operation has a limit of five transactions per second per account.
"""
def list_tags_for_stream(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTagsForStream", input, options)
end
@doc """
Merges two adjacent shards in a Kinesis data stream and combines them into a
single shard to reduce the stream's capacity to ingest and transport data.
Two shards are considered adjacent if the union of the hash key ranges for the
two shards form a contiguous set with no gaps. For example, if you have two
shards, one with a hash key range of 276...381 and the other with a hash key
range of 382...454, then you could merge these two shards into a single shard
that would have a hash key range of 276...454. After the merge, the single child
shard receives data for all hash key values covered by the two parent shards.
`MergeShards` is called when there is a need to reduce the overall capacity of a
stream because of excess capacity that is not being used. You must specify the
shard to be merged and the adjacent shard for a stream. For more information
about merging shards, see [Merge Two Shards](https://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-merge.html)
in the *Amazon Kinesis Data Streams Developer Guide*.
If the stream is in the `ACTIVE` state, you can call `MergeShards`. If a stream
is in the `CREATING`, `UPDATING`, or `DELETING` state, `MergeShards` returns a
`ResourceInUseException`. If the specified stream does not exist, `MergeShards`
returns a `ResourceNotFoundException`.
You can use `DescribeStream` to check the state of the stream, which is returned
in `StreamStatus`.
`MergeShards` is an asynchronous operation. Upon receiving a `MergeShards`
request, Amazon Kinesis Data Streams immediately returns a response and sets the
`StreamStatus` to `UPDATING`. After the operation is completed, Kinesis Data
Streams sets the `StreamStatus` to `ACTIVE`. Read and write operations continue
to work while the stream is in the `UPDATING` state.
You use `DescribeStream` to determine the shard IDs that are specified in the
`MergeShards` request.
If you try to operate on too many streams in parallel using `CreateStream`,
`DeleteStream`, `MergeShards`, or `SplitShard`, you receive a
`LimitExceededException`.
`MergeShards` has a limit of five transactions per second per account.
"""
def merge_shards(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "MergeShards", input, options)
end
@doc """
Writes a single data record into an Amazon Kinesis data stream.
Call `PutRecord` to send data into the stream for real-time ingestion and
subsequent processing, one record at a time. Each shard can support writes up to
1,000 records per second, up to a maximum data write total of 1 MiB per second.
You must specify the name of the stream that captures, stores, and transports
the data; a partition key; and the data blob itself.
The data blob can be any type of data; for example, a segment from a log file,
geographic/location data, website clickstream data, and so on.
The partition key is used by Kinesis Data Streams to distribute data across
shards. Kinesis Data Streams segregates the data records that belong to a stream
into multiple shards, using the partition key associated with each data record
to determine the shard to which a given data record belongs.
Partition keys are Unicode strings, with a maximum length limit of 256
characters for each key. An MD5 hash function is used to map partition keys to
128-bit integer values and to map associated data records to shards using the
hash key ranges of the shards. You can override hashing the partition key to
determine the shard by explicitly specifying a hash value using the
`ExplicitHashKey` parameter. For more information, see [Adding Data to a Stream](https://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream)
in the *Amazon Kinesis Data Streams Developer Guide*.
`PutRecord` returns the shard ID of where the data record was placed and the
sequence number that was assigned to the data record.
Sequence numbers increase over time and are specific to a shard within a stream,
not across all shards within a stream. To guarantee strictly increasing
ordering, write serially to a shard and use the `SequenceNumberForOrdering`
parameter. For more information, see [Adding Data to a Stream](https://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream)
in the *Amazon Kinesis Data Streams Developer Guide*.
After you write a record to a stream, you cannot modify that record or its order
within the stream.
If a `PutRecord` request cannot be processed because of insufficient provisioned
throughput on the shard involved in the request, `PutRecord` throws
`ProvisionedThroughputExceededException`.
By default, data records are accessible for 24 hours from the time that they are
added to a stream. You can use `IncreaseStreamRetentionPeriod` or
`DecreaseStreamRetentionPeriod` to modify this retention period.
"""
def put_record(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutRecord", input, options)
end
@doc """
Writes multiple data records into a Kinesis data stream in a single call (also
referred to as a `PutRecords` request).
Use this operation to send data into the stream for data ingestion and
processing.
Each `PutRecords` request can support up to 500 records. Each record in the
request can be as large as 1 MiB, up to a limit of 5 MiB for the entire request,
including partition keys. Each shard can support writes up to 1,000 records per
second, up to a maximum data write total of 1 MiB per second.
You must specify the name of the stream that captures, stores, and transports
the data; and an array of request `Records`, with each record in the array
requiring a partition key and data blob. The record size limit applies to the
total size of the partition key and data blob.
The data blob can be any type of data; for example, a segment from a log file,
geographic/location data, website clickstream data, and so on.
The partition key is used by Kinesis Data Streams as input to a hash function
that maps the partition key and associated data to a specific shard. An MD5 hash
function is used to map partition keys to 128-bit integer values and to map
associated data records to shards. As a result of this hashing mechanism, all
data records with the same partition key map to the same shard within the
stream. For more information, see [Adding Data to a Stream](https://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream)
in the *Amazon Kinesis Data Streams Developer Guide*.
Each record in the `Records` array may include an optional parameter,
`ExplicitHashKey`, which overrides the partition key to shard mapping. This
parameter allows a data producer to determine explicitly the shard where the
record is stored. For more information, see [Adding Multiple Records with PutRecords](https://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-putrecords)
in the *Amazon Kinesis Data Streams Developer Guide*.
The `PutRecords` response includes an array of response `Records`. Each record
in the response array directly correlates with a record in the request array
using natural ordering, from the top to the bottom of the request and response.
The response `Records` array always includes the same number of records as the
request array.
The response `Records` array includes both successfully and unsuccessfully
processed records. Kinesis Data Streams attempts to process all records in each
`PutRecords` request. A single record failure does not stop the processing of
subsequent records. As a result, PutRecords doesn't guarantee the ordering of
records. If you need to read records in the same order they are written to the
stream, use `PutRecord` instead of `PutRecords`, and write to the same shard.
A successfully processed record includes `ShardId` and `SequenceNumber` values.
The `ShardId` parameter identifies the shard in the stream where the record is
stored. The `SequenceNumber` parameter is an identifier assigned to the put
record, unique to all records in the stream.
An unsuccessfully processed record includes `ErrorCode` and `ErrorMessage`
values. `ErrorCode` reflects the type of error and can be one of the following
values: `ProvisionedThroughputExceededException` or `InternalFailure`.
`ErrorMessage` provides more detailed information about the
`ProvisionedThroughputExceededException` exception including the account ID,
stream name, and shard ID of the record that was throttled. For more information
about partially successful responses, see [Adding Multiple Records with PutRecords](https://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-add-data-to-stream.html#kinesis-using-sdk-java-putrecords)
in the *Amazon Kinesis Data Streams Developer Guide*.
After you write a record to a stream, you cannot modify that record or its order
within the stream.
By default, data records are accessible for 24 hours from the time that they are
added to a stream. You can use `IncreaseStreamRetentionPeriod` or
`DecreaseStreamRetentionPeriod` to modify this retention period.
"""
def put_records(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutRecords", input, options)
end
@doc """
Registers a consumer with a Kinesis data stream.
When you use this operation, the consumer you register can then call
`SubscribeToShard` to receive data from the stream using enhanced fan-out, at a
rate of up to 2 MiB per second for every shard you subscribe to. This rate is
unaffected by the total number of consumers that read from the same stream.
You can register up to 20 consumers per stream. A given consumer can only be
registered with one stream at a time.
For an example of how to use this operations, see [Enhanced Fan-Out Using the Kinesis Data Streams
API](/streams/latest/dev/building-enhanced-consumers-api.html).
The use of this operation has a limit of five transactions per second per
account. Also, only 5 consumers can be created simultaneously. In other words,
you cannot have more than 5 consumers in a `CREATING` status at the same time.
Registering a 6th consumer while there are 5 in a `CREATING` status results in a
`LimitExceededException`.
"""
def register_stream_consumer(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RegisterStreamConsumer", input, options)
end
@doc """
Removes tags from the specified Kinesis data stream.
Removed tags are deleted and cannot be recovered after this operation
successfully completes.
If you specify a tag that does not exist, it is ignored.
`RemoveTagsFromStream` has a limit of five transactions per second per account.
"""
def remove_tags_from_stream(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RemoveTagsFromStream", input, options)
end
@doc """
Splits a shard into two new shards in the Kinesis data stream, to increase the
stream's capacity to ingest and transport data.
`SplitShard` is called when there is a need to increase the overall capacity of
a stream because of an expected increase in the volume of data records being
ingested.
You can also use `SplitShard` when a shard appears to be approaching its maximum
utilization; for example, the producers sending data into the specific shard are
suddenly sending more than previously anticipated. You can also call
`SplitShard` to increase stream capacity, so that more Kinesis Data Streams
applications can simultaneously read data from the stream for real-time
processing.
You must specify the shard to be split and the new hash key, which is the
position in the shard where the shard gets split in two. In many cases, the new
hash key might be the average of the beginning and ending hash key, but it can
be any hash key value in the range being mapped into the shard. For more
information, see [Split a Shard](https://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-split.html)
in the *Amazon Kinesis Data Streams Developer Guide*.
You can use `DescribeStream` to determine the shard ID and hash key values for
the `ShardToSplit` and `NewStartingHashKey` parameters that are specified in the
`SplitShard` request.
`SplitShard` is an asynchronous operation. Upon receiving a `SplitShard`
request, Kinesis Data Streams immediately returns a response and sets the stream
status to `UPDATING`. After the operation is completed, Kinesis Data Streams
sets the stream status to `ACTIVE`. Read and write operations continue to work
while the stream is in the `UPDATING` state.
You can use `DescribeStream` to check the status of the stream, which is
returned in `StreamStatus`. If the stream is in the `ACTIVE` state, you can call
`SplitShard`. If a stream is in `CREATING` or `UPDATING` or `DELETING` states,
`DescribeStream` returns a `ResourceInUseException`.
If the specified stream does not exist, `DescribeStream` returns a
`ResourceNotFoundException`. If you try to create more shards than are
authorized for your account, you receive a `LimitExceededException`.
For the default shard limit for an AWS account, see [Kinesis Data Streams Limits](https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html)
in the *Amazon Kinesis Data Streams Developer Guide*. To increase this limit,
[contact AWS Support](https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html).
If you try to operate on too many streams simultaneously using `CreateStream`,
`DeleteStream`, `MergeShards`, and/or `SplitShard`, you receive a
`LimitExceededException`.
`SplitShard` has a limit of five transactions per second per account.
"""
def split_shard(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "SplitShard", input, options)
end
@doc """
Enables or updates server-side encryption using an AWS KMS key for a specified
stream.
Starting encryption is an asynchronous operation. Upon receiving the request,
Kinesis Data Streams returns immediately and sets the status of the stream to
`UPDATING`. After the update is complete, Kinesis Data Streams sets the status
of the stream back to `ACTIVE`. Updating or applying encryption normally takes a
few seconds to complete, but it can take minutes. You can continue to read and
write data to your stream while its status is `UPDATING`. Once the status of the
stream is `ACTIVE`, encryption begins for records written to the stream.
API Limits: You can successfully apply a new AWS KMS key for server-side
encryption 25 times in a rolling 24-hour period.
Note: It can take up to 5 seconds after the stream is in an `ACTIVE` status
before all records written to the stream are encrypted. After you enable
encryption, you can verify that encryption is applied by inspecting the API
response from `PutRecord` or `PutRecords`.
"""
def start_stream_encryption(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartStreamEncryption", input, options)
end
@doc """
Disables server-side encryption for a specified stream.
Stopping encryption is an asynchronous operation. Upon receiving the request,
Kinesis Data Streams returns immediately and sets the status of the stream to
`UPDATING`. After the update is complete, Kinesis Data Streams sets the status
of the stream back to `ACTIVE`. Stopping encryption normally takes a few seconds
to complete, but it can take minutes. You can continue to read and write data to
your stream while its status is `UPDATING`. Once the status of the stream is
`ACTIVE`, records written to the stream are no longer encrypted by Kinesis Data
Streams.
API Limits: You can successfully disable server-side encryption 25 times in a
rolling 24-hour period.
Note: It can take up to 5 seconds after the stream is in an `ACTIVE` status
before all records written to the stream are no longer subject to encryption.
After you disabled encryption, you can verify that encryption is not applied by
inspecting the API response from `PutRecord` or `PutRecords`.
"""
def stop_stream_encryption(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopStreamEncryption", input, options)
end
@doc """
This operation establishes an HTTP/2 connection between the consumer you specify
in the `ConsumerARN` parameter and the shard you specify in the `ShardId`
parameter.
After the connection is successfully established, Kinesis Data Streams pushes
records from the shard to the consumer over this connection. Before you call
this operation, call `RegisterStreamConsumer` to register the consumer with
Kinesis Data Streams.
When the `SubscribeToShard` call succeeds, your consumer starts receiving events
of type `SubscribeToShardEvent` over the HTTP/2 connection for up to 5 minutes,
after which time you need to call `SubscribeToShard` again to renew the
subscription if you want to continue to receive records.
You can make one call to `SubscribeToShard` per second per registered consumer
per shard. For example, if you have a 4000 shard stream and two registered
stream consumers, you can make one `SubscribeToShard` request per second for
each combination of shard and registered consumer, allowing you to subscribe
both consumers to all 4000 shards in one second.
If you call `SubscribeToShard` again with the same `ConsumerARN` and `ShardId`
within 5 seconds of a successful call, you'll get a `ResourceInUseException`. If
you call `SubscribeToShard` 5 seconds or more after a successful call, the first
connection will expire and the second call will take over the subscription.
For an example of how to use this operations, see [Enhanced Fan-Out Using the Kinesis Data Streams
API](/streams/latest/dev/building-enhanced-consumers-api.html).
"""
def subscribe_to_shard(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "SubscribeToShard", input, options)
end
@doc """
Updates the shard count of the specified stream to the specified number of
shards.
Updating the shard count is an asynchronous operation. Upon receiving the
request, Kinesis Data Streams returns immediately and sets the status of the
stream to `UPDATING`. After the update is complete, Kinesis Data Streams sets
the status of the stream back to `ACTIVE`. Depending on the size of the stream,
the scaling action could take a few minutes to complete. You can continue to
read and write data to your stream while its status is `UPDATING`.
To update the shard count, Kinesis Data Streams performs splits or merges on
individual shards. This can cause short-lived shards to be created, in addition
to the final shards. These short-lived shards count towards your total shard
limit for your account in the Region.
When using this operation, we recommend that you specify a target shard count
that is a multiple of 25% (25%, 50%, 75%, 100%). You can specify any target
value within your shard limit. However, if you specify a target that isn't a
multiple of 25%, the scaling action might take longer to complete.
This operation has the following default limits. By default, you cannot do the
following:
* Scale more than ten times per rolling 24-hour period per stream
* Scale up to more than double your current shard count for a stream
* Scale down below half your current shard count for a stream
* Scale up to more than 500 shards in a stream
* Scale a stream with more than 500 shards down unless the result is
less than 500 shards
* Scale up to more than the shard limit for your account
For the default limits for an AWS account, see [Streams Limits](https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html)
in the *Amazon Kinesis Data Streams Developer Guide*. To request an increase in
the call rate limit, the shard limit for this API, or your overall shard limit,
use the [limits form](https://console.aws.amazon.com/support/v1#/case/create?issueType=service-limit-increase&limitType=service-code-kinesis).
"""
def update_shard_count(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateShardCount", input, options)
end
end
|
lib/aws/generated/kinesis.ex
| 0.941419 | 0.629461 |
kinesis.ex
|
starcoder
|
defmodule SpeediviewUI.Component.OnScreenKeyboard do
@moduledoc """
The Keyboard component provides two styles of keyboard: a full querty keyboard and a numeric pad.
To use this component in your scene add it to a graph with ScenicUI.Keyboard.add_to_graph/3. You
can use one of the provided keyboard layouts by passing `:default` or `:num_pad` as the first argument
or you can call `Keyboard.default/0` or `Keyboard.num_pad/0` to fetch the configuration maps and modify
specific parameters. The following parameters are configurable on each keyboard:
* `top` - the y position of the keyboard component, defaults to container bottom - keyboard height
* `c_width` - container width (by default this is the viewport width)
* `c_height` - container height (by default this is the viewport height)
* `btn_width` - defaults to 5% of the container width
* `btn_height` - the default is calculated from the keyboard height
* `height` - overall keyboard height
* `font_size` - button font size
* `margin` - the margin around each button
* `style` - style applied to the keyboard component
* `layout` - a map of keyboard modes (`default` and `shift`) containing keys
* `btn_style` - a function that gets called for each button allowing custom styles to be applied
* `transform` - a function called when a button is clicked that allows the button contents to be transformed before being sent in the key event
"""
use Scenic.Component, has_children: true
alias Scenic.Graph
alias Scenic.ViewPort
# alias Scenic.Primitive.Style.Theme
import Scenic.Primitives
import Scenic.Components
@english_simple %{
# top: 300,
# c_width: 500,
# c_height: 600,
# btn_width: 20,
# btn_height: 30,
height: 180,
font_size: 18,
margin: 5,
style: [fill: {48, 48, 48}],
layout: %{
default: [
~w(` 1 2 3 4 5 6 7 8 9 0 - = Backspace),
~w(Tab q w e r t y u i o p [ ] \\),
["Caps Lock"] ++ ~w(a s d f g h j k l ; ' Enter),
~w(Shift z x c v b n m , . / Shift),
~w(@ Space)
],
shift: [
~w(~ ! @ # $ % ^ & * \( \) _ + Backspace),
~w(Tab Q W E R T Y U I O P { } |),
["Caps Lock"] ++ ~w(A S D F G H J K L : " Enter),
~w(Shift Z X C V B N M < > ? Shift),
~w(@ Space)
]
},
btn_style: &__MODULE__.btn_style/2,
transform: &__MODULE__.transform/1
}
@num_pad %{
font_size: 15,
layout: %{
default: [
~w(= \( \) Back),
~w(Clear / * -),
~w(7 8 9 +),
~w(4 5 6),
~w(1 2 3 Enter),
~w(0 .)
]
},
btn_style: &__MODULE__.num_pad_btn_style/2
}
# --------------------------------------------------------
def info(_data) do
"""
#{IO.ANSI.red()}The first argument to Keyboard.add_to_graph/2 must be `:default`, `:num_pad`, or a custom map (see Keyboard.default/0)
#{IO.ANSI.yellow()}
#{IO.ANSI.default_color()}There are two configuration maps provided that you can modify: Keyboard.default/0 and Keyboard/num_pad/0
"""
end
# --------------------------------------------------------
def verify(:default) do
{:ok, @english_simple}
end
def verify(:num_pad) do
{:ok, @num_pad}
end
def verify(keyboard) when is_map(keyboard), do: {:ok, keyboard}
def verify(_), do: :invalid_data
# --------------------------------------------------------
def init(:default, opts), do: init(@english_simple, opts)
def init(:num_pad, opts), do: init(@num_pad, opts)
def init(keyboard, opts) do
{:ok, %ViewPort.Status{size: {vp_width, vp_height}}} = ViewPort.info(opts[:viewport])
keyboard =
keyboard
|> Enum.reduce(@english_simple, fn {key, val}, acc -> Map.put(acc, key, val) end)
|> Map.put_new(:c_width, vp_width)
|> Map.put_new(:c_height, vp_height)
keyboard =
keyboard
|> Map.put_new(:btn_width, keyboard.c_width * 0.05)
|> Map.put_new(
:btn_height,
(keyboard.height - keyboard.margin) / length(keyboard.layout.default) - keyboard.margin
)
state = %{layout: nil, keyboard: keyboard, height: vp_height, width: vp_width}
layout =
Enum.reduce(keyboard.layout, %{}, fn {name, layout}, acc ->
Map.put(acc, name, build_layout(%{state | layout: layout}, name))
end)
graph = Map.get(layout, :default)
state =
Map.merge(state, %{
graph: graph,
layout: layout,
shift: false,
caps_lock: false,
id: opts[:id] || :keyboard
})
{:ok, state, push: graph}
end
@doc """
Returns the default configuration map for the querty keyboard
"""
def default, do: @english_simple
@doc """
Returns the default configuration map for the numeric keypad
"""
def num_pad, do: @num_pad
def filter_event({:click, btn}, context, %{keyboard: keyboard} = state) do
filter_event({:key_up, apply(keyboard.transform, [btn])}, context, state)
end
def filter_event({:key_up, :caps_lock}, context, %{caps_lock: caps_lock} = state) do
filter_event({:key_up, :shift}, context, %{state | caps_lock: !caps_lock})
end
def filter_event({:key_up, :shift}, _, %{layout: layout, shift: false} = state) do
graph = layout |> Map.get(:shift)
{:halt, %{state | graph: graph, shift: true}, push: graph}
end
def filter_event({:key_up, :shift}, _, %{layout: layout, shift: true} = state) do
graph = layout |> Map.get(:default)
{:halt, %{state | graph: graph, shift: false}, push: graph}
end
def filter_event(
{:key_up, _char} = evt,
_,
%{layout: layout, caps_lock: false} = state
) do
graph = layout |> Map.get(:default)
{:cont, evt, %{state | graph: graph, shift: false}, push: graph}
end
def filter_event({:key_up, _} = evt, _, state) do
{:cont, evt, state}
end
defp build_layout(%{keyboard: keyboard, layout: layout}, _selected_layout) do
Graph.build(
font_size: keyboard.font_size,
translate: {0, Map.get(keyboard, :top, keyboard.c_height - keyboard.height)},
hidden: false
)
|> rect({keyboard.c_width, 5}, translate: {0, -5}, fill: :white)
|> rect({keyboard.c_width, keyboard.height}, keyboard.style)
|> build_row(layout, keyboard, 0)
end
defp build_row(graph, [], _, _), do: graph
defp build_row(graph, [row | tail], keyboard, top_offset) do
large_btn_count = Enum.filter(row, &(byte_size(&1) > 1)) |> length()
small_btn_count = length(row) - large_btn_count
graph
|> group(
fn g ->
build_btn(g, row, keyboard, 0, large_btn_count, small_btn_count)
end,
t: {0, top_offset + keyboard.margin}
)
|> build_row(tail, keyboard, top_offset + keyboard.btn_height + keyboard.margin)
end
defp build_btn(group, [], _, _, _, _), do: group
defp build_btn(group, [char | row], keyboard, x, large_btn_count, small_btn_count)
when byte_size(char) == 1 do
width = keyboard.btn_width
default_styles = [
width: width,
height: keyboard.btn_height,
button_font_size: keyboard.font_size,
theme: :secondary
]
style = apply(keyboard.btn_style, [char, keyboard])
width = Keyword.get(style, :width, width)
group
|> button(char, [id: char, t: {x + keyboard.margin, 0}] ++ default_styles ++ style)
|> build_btn(row, keyboard, x + width + keyboard.margin, large_btn_count, small_btn_count)
end
defp build_btn(group, [char | row], keyboard, x, large_btn_count, small_btn_count) do
used = (keyboard.btn_width + keyboard.margin) * small_btn_count
width =
(keyboard.c_width - used - large_btn_count * keyboard.margin - keyboard.margin) /
large_btn_count
default_styles = [
width: width,
height: keyboard.btn_height,
button_font_size: keyboard.font_size,
theme: :secondary
]
style = apply(keyboard.btn_style, [char, keyboard])
width = Keyword.get(style, :width, width)
group
|> button(char, [id: char, t: {x + keyboard.margin, 0}] ++ default_styles ++ style)
|> build_btn(row, keyboard, x + width + keyboard.margin, large_btn_count, small_btn_count)
end
@doc """
A callback that allows a button's contents to be transformed before being sent as input
"""
def transform("Space"), do: " "
def transform("Tab"), do: " "
def transform("Backpace"), do: :backspace
def transform("Shift"), do: :shift
def transform("Caps Lock"), do: :caps_lock
def transform("Enter"), do: :enter
def transform(char), do: char
@doc """
A callback that allows custom styles to be applied to each button.
"""
def btn_style(_char, _keyboard), do: []
@doc false
def num_pad_btn_style("+", keyboard),
do: [height: keyboard.btn_height * 2 + keyboard.margin, width: keyboard.c_width * 0.05]
def num_pad_btn_style("Enter", keyboard),
do: [height: keyboard.btn_height * 2 + keyboard.margin, width: keyboard.c_width * 0.05]
def num_pad_btn_style("0", keyboard), do: [width: keyboard.c_width * 0.05 * 2 + keyboard.margin]
def num_pad_btn_style(_char, keyboard), do: [width: keyboard.c_width * 0.05]
end
|
lib/speediview_ui/components/on_screen_keyboard.ex
| 0.896016 | 0.471527 |
on_screen_keyboard.ex
|
starcoder
|
defmodule Forth do
@opaque word :: integer | String.t()
@opaque actions :: %{name: String.t(), action: Function.t()}
@opaque stack :: [word]
@opaque evaluator :: {stack, actions}
@non_words ~r/[^\s \x00\x01]+/
@doc """
Create a new evaluator.
"""
@spec new() :: evaluator
def new() do
{
[],
%{
"*" => &multiply/1,
"+" => &add/1,
"-" => &subtract/1,
"/" => ÷/1,
"DROP" => &drop/1,
"DUP" => &duplicate/1,
"OVER" => &over/1,
"SWAP" => &swap/1
}
}
end
@doc """
Evaluate an input string, updating the evaluator state.
"""
@spec eval(evaluator, String.t()) :: evaluator
def eval({stack, actions}, s) do
do_eval(to_words(s), actions, stack)
end
@doc """
Return the current stack as a string with the element on top of the stack
being the rightmost element in the string.
"""
@spec format_stack(evaluator) :: String.t()
def format_stack({stack, _}) do
stack
|> Enum.reverse()
|> Enum.join(" ")
end
## Helper Functions
@spec to_words(String.t()) :: stack
defp to_words(string) do
Regex.scan(@non_words, String.upcase(string))
|> List.flatten()
|> Enum.map(&int_or_cmd/1)
end
@spec int_or_cmd(String.t()) :: word
defp int_or_cmd(word) do
case Integer.parse(word) do
{n, ""} -> n
:error -> word
end
end
@spec do_eval(stack, actions, [integer]) :: evaluator
defp do_eval([], actions, stack), do: {stack, actions}
defp do_eval([":" | rest], actions, stack) do
{tail, new_actions} = define(rest, actions)
do_eval(tail, new_actions, stack)
end
defp do_eval([i | rest], actions, stack) when is_integer(i) do
do_eval(rest, actions, [i | stack])
end
defp do_eval([word | rest], actions, stack) do
case Map.get(actions, word) do
nil -> raise Forth.UnknownWord
action -> do_eval(rest, actions, action.(stack))
end
end
@spec define(stack, actions, String.t(), Function.t()) :: evaluator
defp define(stack, actions, name \\ nil, action \\ &Function.identity/1)
defp define([name | _], _, nil, _) when is_integer(name), do: raise(Forth.InvalidWord)
defp define([";" | rest], actions, name, action) do
{rest, Map.put(actions, name, action)}
end
defp define([name | rest], actions, nil, action) do
define(rest, actions, name, action)
end
defp define([n | rest], actions, name, _) when is_integer(n) do
define(rest, actions, name, fn stack -> [n | stack] end)
end
defp define([word | rest], actions, name, action) do
case Map.get(actions, word) do
nil -> :undefined
existing -> define(rest, actions, name, fn stack -> existing.(action.(stack)) end)
end
end
## Default Stack Functions
defp add([n, m | rest]), do: [m + n | rest]
defp add(_), do: raise(Forth.StackUnderflow)
defp divide([0 | _]), do: raise(Forth.DivisionByZero)
defp divide([n, m | rest]), do: [div(m, n) | rest]
defp divide(_), do: raise(Forth.StackUnderflow)
defp drop([_ | rest]), do: rest
defp drop(_), do: raise(Forth.StackUnderflow)
defp duplicate([n | rest]), do: [n, n | rest]
defp duplicate(_), do: raise(Forth.StackUnderflow)
defp multiply([n, m | rest]), do: [m * n | rest]
defp multiply(_), do: raise(Forth.StackUnderflow)
defp over(stack = [_, m | _]), do: [m | stack]
defp over(_), do: raise(Forth.StackUnderflow)
defp subtract([n, m | rest]), do: [m - n | rest]
defp subtract(_), do: raise(Forth.StackUnderflow)
defp swap([n, m | rest]), do: [m, n | rest]
defp swap(_), do: raise(Forth.StackUnderflow)
## Exceptions
defmodule StackUnderflow do
defexception []
def message(_), do: "stack underflow"
end
defmodule InvalidWord do
defexception word: nil
def message(e), do: "invalid word: #{inspect(e.word)}"
end
defmodule UnknownWord do
defexception word: nil
def message(e), do: "unknown word: #{inspect(e.word)}"
end
defmodule DivisionByZero do
defexception []
def message(_), do: "division by zero"
end
end
|
exercism/forth/forth.ex
| 0.719679 | 0.487734 |
forth.ex
|
starcoder
|
defmodule Calendarize do
@moduledoc """
A small utility that generates a calendar month array given a date.
"""
alias Date
alias Timex
@type build_options :: [week_start: Timex.Types.weekstart()]
@doc """
Generates a calendar month array given a date. Currently, `week_start` is the only option, and it can be any integer 1..7 or
any string accepted by [Timex.day_to_num()](https://hexdocs.pm/timex/Timex.html#day_to_num/1). If not specified, `week_start`
defaults to :sun
## Examples
iex> Calendarize.build(~D[2020-05-15])
[
[0, 0, 0, 0, 0, 1, 2],
[3, 4, 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14, 15, 16],
[17, 18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29, 30],
[31, 0, 0, 0, 0, 0, 0]
]
iex> Calendarize.build(~D[2020-05-15], week_start: :mon)
[
[0, 0, 0, 0, 1, 2, 3],
[4, 5, 6, 7, 8, 9, 10],
[11, 12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23, 24],
[25, 26, 27, 28, 29, 30, 31],
[0, 0, 0, 0, 0, 0, 0]
]
iex> Calendarize.build(Timex.now, week_start: :mon)
[
[0, 0, 0, 0, 1, 2, 3],
[4, 5, 6, 7, 8, 9, 10],
[11, 12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23, 24],
[25, 26, 27, 28, 29, 30, 31],
[0, 0, 0, 0, 0, 0, 0]
]
"""
@spec build(DateTime.t() | Date.t(), build_options()) :: list()
def build(%dt{} = date, opts \\ []) when dt in [DateTime, Date] do
get_dates(date, Keyword.get(opts, :week_start, :sun))
end
defp get_dates(date, week_start) do
{:ok, first_day_of_month} = Date.new(date.year, date.month, 1)
end_day = Date.days_in_month(date)
start_day = Timex.days_to_beginning_of_week(first_day_of_month, week_start)
for week <- 1..6 do
for day <- 0..6 do
get_date(week, day, start_day, end_day)
end
end
end
defp get_date(week, day_in_week, start_day, end_day) do
day_in_month = (week - 1) * 7 + day_in_week
if day_in_month < start_day or day_in_month > end_day + start_day - 1 do
0
else
day_in_month - start_day + 1
end
end
end
|
lib/calendarize.ex
| 0.902512 | 0.645616 |
calendarize.ex
|
starcoder
|
defmodule Credo.Execution.ExecutionTiming do
@moduledoc """
The `ExecutionTiming` module can help in timing the execution of code parts and
storing those timing inside the `Credo.Execution` struct.
"""
use GenServer
alias Credo.Execution
@doc """
Runs the given `fun` and prints the time it took with the given `label`.
iex> Credo.Execution.ExecutionTiming.inspect("foo", fn -> some_complicated_stuff() end)
foo: 51284
"""
def inspect(label, fun) do
{time, result} = :timer.tc(fun)
time
|> format_time()
|> IO.inspect(label: label)
# credo:disable-for-previous-line Credo.Check.Warning.IoInspect
result
end
@doc """
Returns the current timestamp in the same format (microseconds) as the returned starting times of `run/1`.
"""
def now(), do: :os.system_time(:microsecond)
@doc """
Runs the given `fun` and returns a tuple of `{started_at, time, result}`.
iex> Credo.Execution.ExecutionTiming.run(fn -> some_complicated_stuff() end)
{1540540119448181, 51284, [:whatever, :fun, :returned]}
"""
def run(fun) do
started_at = now()
{time, result} = :timer.tc(fun)
{started_at, time, result}
end
@doc "Same as `run/1` but takes `fun` and `args` separately."
def run(fun, args) do
started_at = now()
{time, result} = :timer.tc(fun, args)
{started_at, time, result}
end
@doc """
Adds a timing to the given `exec` using the given values of `tags`, `started_at` and `duration`.
"""
def append(%Execution{timing_pid: pid}, tags, started_at, duration) do
spawn(fn ->
GenServer.call(pid, {:append, tags, started_at, duration})
end)
end
@doc """
Returns all timings for the given `exec`.
"""
def all(%Execution{timing_pid: pid}) do
GenServer.call(pid, :all)
end
@doc """
Groups all timings for the given `exec` and `tag_name`.
"""
def grouped_by_tag(exec, tag_name) do
map =
exec
|> all()
|> Enum.filter(fn {tags, _started_at, _time} -> tags[tag_name] end)
|> Enum.group_by(fn {tags, _started_at, _time} -> tags[tag_name] end)
map
|> Map.keys()
|> Enum.map(fn map_key ->
sum = Enum.reduce(map[map_key], 0, fn {_tags, _, time}, acc -> time + acc end)
{[{tag_name, map_key}, {:accumulated, true}], nil, sum}
end)
end
@doc """
Returns all timings for the given `exec` and `tag_name`.
"""
def by_tag(exec, tag_name) do
exec
|> all()
|> Enum.filter(fn {tags, _started_at, _time} -> tags[tag_name] end)
end
@doc """
Returns all timings for the given `exec` and `tag_name` where the tag's value also matches the given `regex`.
"""
def by_tag(exec, tag_name, regex) do
exec
|> all()
|> Enum.filter(fn {tags, _started_at, _time} ->
tags[tag_name] && to_string(tags[tag_name]) =~ regex
end)
end
@doc """
Returns the earliest timestamp for the given `exec`.
"""
def started_at(exec) do
{_, started_at, _} =
exec
|> all()
|> List.first()
started_at
end
@doc """
Returns the latest timestamp plus its duration for the given `exec`.
"""
def ended_at(exec) do
{_, started_at, duration} =
exec
|> all()
|> List.last()
started_at + duration
end
defp format_time(time) do
cond do
time > 1_000_000 ->
"#{div(time, 1_000_000)}s"
time > 1_000 ->
"#{div(time, 1_000)}ms"
true ->
"#{time}μs"
end
end
# callbacks
@doc false
def start_server(exec) do
{:ok, pid} = GenServer.start_link(__MODULE__, [])
%Execution{exec | timing_pid: pid}
end
@doc false
def init(_) do
{:ok, []}
end
@doc false
def handle_call({:append, tags, started_at, time}, _from, current_state) do
new_current_state = [{tags, started_at, time} | current_state]
{:reply, :ok, new_current_state}
end
@doc false
def handle_call(:all, _from, current_state) do
list = Enum.sort_by(current_state, fn {_, started_at, _} -> started_at end)
{:reply, list, current_state}
end
end
|
lib/credo/execution/execution_timing.ex
| 0.887229 | 0.547525 |
execution_timing.ex
|
starcoder
|
defmodule NewRelic.Plug.Repo do
@moduledoc """
Defines a module that provides instrumented methods for a standard `Ecto.Repo`.
```
defmodule MyApp.Repo do
use Ecto.Repo, otp_application: :my_app
defmodule NewRelic do
use NewRelic.Plug.Repo, repo: MyApp.Repo
end
end
```
Anywhere that the original repository is used to make a database call, the wrapper module can be
used by adding a `Plug.Conn` as the first argument. For example, `MyApp.Repo.all(User)` can be
replaced with `MyApp.Repo.NewRelic.all(conn, User)`. No changes are needed for `transaction()`
and `rollback()`. The `Plug.Conn` that's used with query methods must be one that was setup by
`NewRelic.Plug.Phoenix`.
"""
defmacro __using__(opts) do
quote bind_quoted: [opts: opts] do
repo = Keyword.fetch!(opts, :repo)
@repo repo
import NewRelic.Plug.Instrumentation
@spec transaction(Keyword.t, fun) :: {:ok, any} | {:error, any}
def transaction(opts \\ [], fun) when is_list(opts) do
repo.transaction(opts, fun)
end
@spec rollback(any) :: no_return
def rollback(value) do
repo.rollback(value)
end
@spec all(Ecto.Query.t, Keyword.t) :: [Ecto.Schema.t] | no_return
def all(queryable, opts \\ []) do
instrument_db(:all, queryable, opts, fn() ->
repo.all(queryable, opts)
end)
end
@spec get(Ecto.Queryable.t, term, Keyword.t) :: Ecto.Schema.t | nil | no_return
def get(queryable, id, opts \\ []) do
instrument_db(:get, queryable, opts, fn() ->
repo.get(queryable, id, opts)
end)
end
@spec get!(Ecto.Queryable.t, term, Keyword.t) :: Ecto.Schema.t | nil | no_return
def get!(queryable, id, opts \\ []) do
instrument_db(:get!, queryable, opts, fn() ->
repo.get!(queryable, id, opts)
end)
end
@spec get_by(Ecto.Queryable.t, Keyword.t | Map.t, Keyword.t) :: Ecto.Schema.t | nil | no_return
def get_by(queryable, clauses, opts \\ []) do
instrument_db(:get_by, queryable, opts, fn() ->
repo.get_by(queryable, clauses, opts)
end)
end
@spec get_by!(Ecto.Queryable.t, Keyword.t | Map.t, Keyword.t) :: Ecto.Schema.t | nil | no_return
def get_by!(queryable, clauses, opts \\ []) do
instrument_db(:get_by!, queryable, opts, fn() ->
repo.get_by!(queryable, clauses, opts)
end)
end
@spec one(Ecto.Queryable.t, Keyword.t) :: Ecto.Schema.t | nil | no_return
def one(queryable, opts \\ []) do
instrument_db(:one, queryable, opts, fn() ->
repo.one(queryable, opts)
end)
end
@spec one!(Ecto.Queryable.t, Keyword.t) :: Ecto.Schema.t | nil | no_return
def one!(queryable, opts \\ []) do
instrument_db(:one!, queryable, opts, fn() ->
repo.one!(queryable, opts)
end)
end
@spec update_all(Macro.t, Keyword.t, Keyword.t) :: {integer, nil} | no_return
def update_all(queryable, updates, opts \\ []) do
instrument_db(:update_all, queryable, opts, fn() ->
repo.update_all(queryable, updates, opts)
end)
end
@spec delete_all(Ecto.Queryable.t, Keyword.t) :: {integer, nil} | no_return
def delete_all(queryable, opts \\ []) do
instrument_db(:delete_all, queryable, opts, fn() ->
repo.delete_all(queryable, opts)
end)
end
@spec insert(Ecto.Schema.t | Ecto.Changeset.t, Keyword.t) :: {:ok, Ecto.Schema.t} | {:error, Ecto.Changeset.t}
def insert(model, opts \\ []) do
instrument_db(:insert, model, opts, fn() ->
repo.insert(model, opts)
end)
end
@spec update(Ecto.Changeset.t, Keyword.t) :: {:ok, Ecto.Schema.t} | {:error, Ecto.Changeset.t}
def update(model, opts \\ []) do
instrument_db(:update, model, opts, fn() ->
repo.update(model, opts)
end)
end
@spec insert_or_update(Ecto.Changeset.t, Keyword.t) :: {:ok, Ecto.Schema.t} | {:error, Ecto.Changeset.t}
def insert_or_update(changeset, opts \\ []) do
instrument_db(:insert_or_update, changeset, opts, fn() ->
repo.insert_or_update(changeset, opts)
end)
end
@spec delete(Ecto.Schema.t, Keyword.t) :: {:ok, Ecto.Schema.t} | {:error, Ecto.Changeset.t}
def delete(model, opts \\ []) do
instrument_db(:delete, model, opts, fn() ->
repo.delete(model, opts)
end)
end
@spec insert!(Ecto.Schema.t, Keyword.t) :: Ecto.Schema.t | no_return
def insert!(model, opts \\ []) do
instrument_db(:insert!, model, opts, fn() ->
repo.insert!(model, opts)
end)
end
@spec update!(Ecto.Schema.t, Keyword.t) :: Ecto.Schema.t | no_return
def update!(model, opts \\ []) do
instrument_db(:update!, model, opts, fn() ->
repo.update!(model, opts)
end)
end
@spec insert_or_update!(Ecto.Changeset.t, Keyword.t) :: Ecto.Schema.t | no_return
def insert_or_update!(changeset, opts \\ []) do
instrument_db(:insert_or_update!, changeset, opts, fn() ->
repo.insert_or_update!(changeset, opts)
end)
end
@spec delete!(Ecto.Schema.t, Keyword.t) :: Ecto.Schema.t | no_return
def delete!(model, opts \\ []) do
instrument_db(:delete!, model, opts, fn() ->
repo.delete!(model, opts)
end)
end
@spec preload([Ecto.Schema.t] | Ecto.Schema.t, preloads :: term) :: [Ecto.Schema.t] | Ecto.Schema.t
def preload(model_or_models, preloads, opts \\ []) do
instrument_db(:preload, model_or_models, opts, fn() ->
repo.preload(model_or_models, preloads)
end)
end
@spec repo :: Ecto.Repo.t
defp repo do
@repo
end
end
end
end
|
lib/new_relic/plug/repo.ex
| 0.893474 | 0.753002 |
repo.ex
|
starcoder
|
defmodule Nostrum.Util do
@moduledoc """
Utility functions
"""
alias Nostrum.{Api, Constants, Snowflake}
alias Nostrum.Shard.Session
alias Nostrum.Struct.WSState
require Logger
@doc """
Helper for defining all the methods used for struct and encoding transformations.
## Example
``` Elixir
Nostrum.Util.nostrum_struct(%{
author: User,
mentions: [User],
mention_roles: [User],
embeds: [Embed]
})
```
"""
defmacro nostrum_struct(body) do
quote do
@derive [Poison.Encoder]
defstruct Map.keys(unquote(body))
def p_encode do
encoded =
for {k, v} <- unquote(body), v != nil, into: %{} do
case v do
[v] -> {k, [v.p_encode]}
v -> {k, v.p_encode}
end
end
struct(__ENV__.module, encoded)
end
def to_struct(map) do
alias Nostrum.Util
new_map =
for {k, v} <- unquote(body), into: %{} do
case v do
nil -> {k, Map.get(map, k)}
[v] -> {k, Util.enum_to_struct(Map.get(map, k), v)}
v -> {k, apply(v, :to_struct, [Map.get(map, k)])}
end
end
struct(__ENV__.module, new_map)
end
end
end
@doc """
Returns the number of milliseconds since unix epoch.
"""
@spec now() :: integer
def now do
DateTime.utc_now()
|> DateTime.to_unix(:millisecond)
end
@doc """
Returns the number of microseconds since unix epoch.
"""
@spec usec_now() :: integer
def usec_now do
DateTime.utc_now()
|> DateTime.to_unix(:microsecond)
end
@doc """
Returns the current date as an ISO formatted string.
"""
@spec now_iso() :: String.t()
def now_iso do
DateTime.utc_now()
|> DateTime.to_iso8601()
end
@doc false
def list_to_struct_list(list, struct) when is_list(list) do
Enum.map(list, &struct.to_struct(&1))
end
def enum_to_struct(nil, _struct), do: nil
def enum_to_struct(enum, struct) when is_list(enum), do: Enum.map(enum, &struct.to_struct(&1))
def enum_to_struct(enum, struct) when is_map(enum) do
for {k, v} <- enum, into: %{} do
{k, struct.to_struct(v)}
end
end
@doc """
Returns the number of shards.
This is not the number of currently active shards, but the number of shards specified
in your config.
"""
@spec num_shards() :: integer
def num_shards do
num =
with :auto <- Application.get_env(:nostrum, :num_shards, :auto),
{_url, shards} <- gateway(),
do: shards
if num == nil, do: 1, else: num
end
@doc false
def bangify_find(to_bang, find, cache_name) do
case to_bang do
{:ok, res} ->
res
{:error} ->
raise(Nostrum.Error.CacheError, finding: find, cache_name: cache_name)
{:error, _other} ->
raise(Nostrum.Error.CacheError, finding: find, cache_name: cache_name)
end
end
@doc """
Returns the gateway url and shard count for current websocket connections.
If by chance no gateway connection has been made, will fetch the url to use and store it
for future use.
"""
@spec gateway() :: {String.t(), integer}
def gateway do
case :ets.lookup(:gateway_url, "url") do
[] -> get_new_gateway_url()
[{"url", url, shards}] -> {url, shards}
end
end
defp get_new_gateway_url do
case Api.request(:get, Constants.gateway_bot(), "") do
{:error, %{status_code: 401}} ->
raise("Authentication rejected, invalid token")
{:error, %{status_code: code, message: message}} ->
raise(Nostrum.Error.ApiError, status_code: code, message: message)
{:ok, body} ->
body = Poison.decode!(body)
"wss://" <> url = body["url"]
shards = if body["shards"], do: body["shards"], else: 1
:ets.insert(:gateway_url, {"url", url, shards})
{url, shards}
end
end
@doc """
Converts a map into an atom-keyed map.
Given a map with variable type keys, returns the same map with all keys as `atoms`.
To support maps keyed with integers (such as in Discord's interaction data),
binaries that appear to be integers will be parsed as such.
This function will attempt to convert keys to an existing atom, and if that fails will default to
creating a new atom while displaying a warning. The idea here is that we should be able to see
if any results from Discord are giving variable keys. Since we *will* define all
types of objects returned by Discord, the amount of new atoms created *SHOULD* be 0. 👀
"""
@spec safe_atom_map(map) :: map
def safe_atom_map(term) do
cond do
is_map(term) ->
for {key, value} <- term, into: %{}, do: {maybe_to_atom(key), safe_atom_map(value)}
is_list(term) ->
Enum.map(term, fn item -> safe_atom_map(item) end)
true ->
term
end
end
@doc """
Attempts to convert a string to an atom.
Binary `token`s that consist of digits are assumed to be snowflakes, and will
be parsed as such.
If atom does not currently exist, will warn that we're doing an unsafe conversion.
"""
@spec maybe_to_atom(atom | String.t()) :: atom | Integer.t()
def maybe_to_atom(token) when is_atom(token), do: token
def maybe_to_atom(<<head, _rest::binary>> = token) when head in ?1..?9 do
case Integer.parse(token) do
{snowflake, ""} ->
snowflake
_ ->
:erlang.binary_to_atom(token)
end
end
def maybe_to_atom(token) do
String.to_existing_atom(token)
rescue
_ ->
Logger.debug(fn -> "Converting string to non-existing atom: #{token}" end)
String.to_atom(token)
end
# Generic casting function
@doc false
@spec cast(term, module | {:list, term} | {:struct, term} | {:index, [term], term}) :: term
def cast(value, type)
def cast(nil, _type), do: nil
def cast(values, {:list, type}) when is_list(values) do
Enum.map(values, fn value ->
cast(value, type)
end)
end
# Handles the case where the given term is already indexed
def cast(values, {:index, _index_by, _type}) when is_map(values), do: values
def cast(values, {:index, index_by, type}) when is_list(values) do
values
|> Enum.into(%{}, &{&1 |> get_in(index_by) |> cast(Snowflake), cast(&1, type)})
end
def cast(value, {:struct, module}) when is_map(value) do
module.to_struct(value)
end
def cast(value, module) do
case module.cast(value) do
{:ok, result} -> result
_ -> value
end
end
@doc false
@spec fullsweep_after() :: {:fullsweep_after, non_neg_integer}
def fullsweep_after do
{:fullsweep_after,
Application.get_env(
:nostrum,
:fullsweep_after_default,
:erlang.system_info(:fullsweep_after) |> elem(1)
)}
end
@doc """
Gets the latency of the shard connection from a `Nostrum.Struct.WSState.t()` struct.
Returns the latency in milliseconds as an integer, returning nil if unknown.
"""
@spec get_shard_latency(WSState.t()) :: non_neg_integer | nil
def get_shard_latency(%WSState{last_heartbeat_ack: nil}), do: nil
def get_shard_latency(%WSState{last_heartbeat_send: nil}), do: nil
def get_shard_latency(%WSState{} = state) do
latency = DateTime.diff(state.last_heartbeat_ack, state.last_heartbeat_send, :millisecond)
max(0, latency + if(latency < 0, do: state.heartbeat_interval, else: 0))
end
@doc """
Gets the latencies of all shard connections.
Calls `get_shard_latency/1` on all shards and returns a map whose keys are
shard nums and whose values are latencies in milliseconds.
"""
@spec get_all_shard_latencies :: %{WSState.shard_num() => non_neg_integer | nil}
def get_all_shard_latencies do
ShardSupervisor
|> Supervisor.which_children()
|> Enum.filter(fn {_id, _pid, _type, [modules]} -> modules == Nostrum.Shard end)
|> Enum.map(fn {_id, pid, _type, _modules} -> Supervisor.which_children(pid) end)
|> List.flatten()
|> Enum.map(fn {_id, pid, _type, _modules} -> Session.get_ws_state(pid) end)
|> Enum.reduce(%{}, fn s, m -> Map.put(m, s.shard_num, get_shard_latency(s)) end)
end
@doc """
Since we're being sacrilegious and converting strings to atoms from the WS, there will be some
atoms that we see that aren't defined in any Discord structs. This method mainly serves as a
means to define those atoms once so the user isn't warned about them in the
`Nostrum.Util.maybe_to_atom/1` function when they are in fact harmless.
"""
def unused_atoms do
[
:active,
:audio,
:audio_codec,
:audio_ssrc,
:channel_overrides,
:convert_emoticons,
:detect_platform_accounts,
:developer_mode,
:enable_tts_command,
:encodings,
:experiments,
:friend_source_flags,
:friend_sync,
:guild_positions,
:inline_attachment_media,
:inline_embed_media,
:last_message_id,
:locale,
:max_bitrate,
:media_session_id,
:message_display_compact,
:message_notifications,
:mobile_push,
:modes,
:muted,
:recipients,
:referenced_message,
:render_embeds,
:render_reactions,
:require_colons,
:restricted_guilds,
:rid,
:rtx_ssrc,
:scale_resolution_down_by,
:show_current_game,
:suppress_everyone,
:theme,
:video,
:video_codec,
:video_ssrc,
:visibility
]
end
end
|
lib/nostrum/util.ex
| 0.899707 | 0.744285 |
util.ex
|
starcoder
|
defmodule Membrane.RTP.H264.Depayloader do
@moduledoc """
Depayloads H264 RTP payloads into H264 NAL Units.
Based on [RFC 6184](https://tools.ietf.org/html/rfc6184).
Supported types: Single NALU, FU-A, STAP-A.
"""
use Membrane.Filter
require Membrane.Logger
alias Membrane.Buffer
alias Membrane.Event.Discontinuity
alias Membrane.H264
alias Membrane.{RemoteStream, RTP}
alias Membrane.RTP.H264.{FU, NAL, StapA}
@frame_prefix <<1::32>>
def_input_pad :input, caps: RTP, demand_mode: :auto
def_output_pad :output,
caps: {RemoteStream, content_format: H264, type: :packetized},
demand_mode: :auto
defmodule State do
@moduledoc false
defstruct parser_acc: nil
end
@impl true
def handle_init(_opts) do
{:ok, %State{}}
end
@impl true
def handle_caps(:input, _caps, _context, state) do
caps = %RemoteStream{content_format: H264, type: :packetized}
{{:ok, caps: {:output, caps}}, state}
end
@impl true
def handle_process(:input, %Buffer{payload: ""}, _ctx, state) do
Membrane.Logger.debug("Received empty RTP packet. Ignoring")
{:ok, state}
end
@impl true
def handle_process(:input, buffer, _ctx, state) do
with {:ok, {header, _payload} = nal} <- NAL.Header.parse_unit_header(buffer.payload),
unit_type = NAL.Header.decode_type(header),
{{:ok, actions}, state} <- handle_unit_type(unit_type, nal, buffer, state) do
{{:ok, actions}, state}
else
{:error, reason} ->
log_malformed_buffer(buffer, reason)
{:ok, %State{state | parser_acc: nil}}
end
end
@impl true
def handle_event(:input, %Discontinuity{} = event, _context, %State{parser_acc: %FU{}} = state),
do: {{:ok, forward: event}, %State{state | parser_acc: nil}}
@impl true
def handle_event(pad, event, context, state), do: super(pad, event, context, state)
defp handle_unit_type(:single_nalu, _nal, buffer, state) do
buffer_output(buffer.payload, buffer, state)
end
defp handle_unit_type(:fu_a, {header, data}, buffer, state) do
%Buffer{metadata: %{rtp: %{sequence_number: seq_num}}} = buffer
case FU.parse(data, seq_num, map_state_to_fu(state)) do
{:ok, {data, type}} ->
data = NAL.Header.add_header(data, 0, header.nal_ref_idc, type)
buffer_output(data, buffer, %State{state | parser_acc: nil})
{:incomplete, fu} ->
{{:ok, []}, %State{state | parser_acc: fu}}
{:error, _reason} = error ->
error
end
end
defp handle_unit_type(:stap_a, {_header, data}, buffer, state) do
with {:ok, result} <- StapA.parse(data) do
buffers = Enum.map(result, &%Buffer{buffer | payload: add_prefix(&1)})
{{:ok, buffer: {:output, buffers}}, state}
end
end
defp buffer_output(data, buffer, state),
do: {{:ok, action_from_data(data, buffer)}, state}
defp action_from_data(data, buffer) do
[buffer: {:output, %Buffer{buffer | payload: add_prefix(data)}}]
end
defp add_prefix(data), do: @frame_prefix <> data
defp map_state_to_fu(%State{parser_acc: %FU{} = fu}), do: fu
defp map_state_to_fu(_state), do: %FU{}
defp log_malformed_buffer(packet, reason) do
Membrane.Logger.warn("""
An error occurred while parsing H264 RTP payload.
Reason: #{reason}
Packet: #{inspect(packet, limit: :infinity)}
""")
end
end
|
lib/rtp_h264/depayloader.ex
| 0.767036 | 0.400251 |
depayloader.ex
|
starcoder
|
defmodule Nostrum.Cache.GuildCache do
@table_name :nostrum_guilds
@moduledoc """
Functions for retrieving guild states.
The ETS table name associated with the Guild Cache is `#{@table_name}`.
Besides the methods provided here, you can call any other ETS methods
on the table.
"""
alias Nostrum.Cache.Mapping.ChannelGuild
alias Nostrum.Snowflake
alias Nostrum.Struct.Channel
alias Nostrum.Struct.Emoji
alias Nostrum.Struct.Guild
alias Nostrum.Struct.Guild.Member
alias Nostrum.Struct.Guild.Role
alias Nostrum.Struct.Message
alias Nostrum.Util
import Nostrum.Snowflake, only: [is_snowflake: 1]
@type clause ::
{:id, Guild.id()}
| {:channel_id, Channel.id()}
| {:message, Message.t()}
@type clauses :: [clause] | map
@type selector :: (Guild.t() -> any)
@type reason ::
:id_not_found
| :id_not_found_on_guild_lookup
defguardp is_selector(term) when is_function(term, 1)
@doc "Retrieve the ETS table name used for the cache."
@spec tabname :: atom()
def tabname, do: @table_name
@doc """
Retrieves all `Nostrum.Struct.Guild` from the cache as a list.
"""
@spec all() :: Enum.t()
def all do
@table_name
|> :ets.tab2list()
|> Stream.map(&elem(&1, 1))
end
@doc """
Selects values using a `selector` from all `Nostrum.Struct.Guild` in the cache.
"""
@spec select_all(selector) :: Enum.t()
def select_all(selector)
def select_all(selector) when is_selector(selector) do
:ets.foldl(fn {_id, guild}, acc -> [selector.(guild) | acc] end, [], @table_name)
end
@doc """
Retrives a single `Nostrum.Struct.Guild` from the cache via its `id`.
Returns `{:error, reason}` if no result was found.
## Examples
```Elixir
iex> Nostrum.Cache.GuildCache.get(0)
{:ok, %Nostrum.Struct.Guild{id: 0}}
iex> Nostrum.Cache.GuildCache.get(10)
{:error, :id_not_found_on_guild_lookup}
```
"""
@spec get(Guild.id()) :: {:ok, Guild.t()} | {:error, reason}
def get(id) do
select(id, fn guild -> guild end)
end
@doc ~S"""
Same as `get/1`, but raises `Nostrum.Error.CacheError` in case of failure.
"""
@spec get!(Guild.id()) :: Guild.t() | no_return
def get!(id), do: get(id) |> Util.bangify_find(id, __MODULE__)
@doc """
Retrives a single `Nostrum.Struct.Guild` where it matches the `clauses`.
Returns `{:error, reason}` if no result was found.
```Elixir
iex> Nostrum.Cache.GuildCache.get_by(id: 0)
{:ok, %Nostrum.Struct.Guild{id: 0}}
iex> Nostrum.Cache.GuildCache.get_by(%{id: 0})
{:ok, %Nostrum.Struct.Guild{id: 0}}
iex> Nostrum.Cache.GuildCache.get_by(id: 10)
{:error, :id_not_found_on_guild_lookup}
```
"""
@spec get_by(clauses) :: {:ok, Guild.t()} | {:error, reason}
def get_by(clauses) do
select_by(clauses, fn guild -> guild end)
end
@doc ~S"""
Same as `get_by/1`, but raises `Nostrum.Error.CacheError` in case of failure.
"""
@spec get_by!(clauses) :: Guild.t() | no_return
def get_by!(clauses), do: get_by(clauses) |> Util.bangify_find(clauses, __MODULE__)
@doc """
Selects values using a `selector` from a `Nostrum.Struct.Guild`.
Returns `{:error, reason}` if no result was found.
## Examples
```Elixir
iex> Nostrum.Cache.GuildCache.select(0, fn guild -> guild.id end)
{:ok, 0}
iex> Nostrum.Cache.GuildCache.select(10, fn guild -> guild.id end)
{:error, :id_not_found_on_guild_lookup}
```
"""
@spec select(Guild.id(), selector) :: {:ok, any} | {:error, reason}
def select(id, selector) do
select_by(%{id: id}, selector)
end
@doc ~S"""
Same as `select/2`, but raises `Nostrum.Error.CacheError` in case of failure.
"""
@spec select!(Guild.id(), selector) :: any | no_return
def select!(id, selector), do: select(id, selector) |> Util.bangify_find(id, __MODULE__)
@doc """
Selects values using a `selector` from a `Nostrum.Struct.Guild` that matches
the `clauses`.
Returns `{:error, reason}` if no result was found.
```Elixir
iex> Nostrum.Cache.GuildCache.select_by([id: 0], fn guild -> guild.id end)
{:ok, 0}
iex> Nostrum.Cache.GuildCache.select_by(%{id: 0}, fn guild -> guild.id end)
{:ok, 0}
iex> Nostrum.Cache.GuildCache.select_by([id: 10], fn guild -> guild.id end)
{:error, :id_not_found_on_guild_lookup}
```
"""
@spec select_by(clauses, selector) :: {:ok, any} | {:error, reason}
def select_by(clauses, selector)
def select_by(clauses, selector) when is_list(clauses) and is_selector(selector),
do: select_by(Map.new(clauses), selector)
def select_by(%{id: id}, selector) when is_snowflake(id) and is_selector(selector) do
case :ets.lookup(@table_name, id) do
[{^id, guild}] ->
selection = selector.(guild)
{:ok, selection}
[] ->
{:error, :id_not_found_on_guild_lookup}
end
end
def select_by(%{channel_id: channel_id}, selector)
when is_snowflake(channel_id) and is_selector(selector) do
case ChannelGuild.get_guild(channel_id) do
{:ok, guild_id} -> select_by(%{id: guild_id}, selector)
{:error, _} = error -> error
end
end
def select_by(%{message: %Message{channel_id: channel_id}}, selector) do
select_by(%{channel_id: channel_id}, selector)
end
@doc ~S"""
Same as `select_by/2`, but raises `Nostrum.Error.CacheError` in case of failure.
"""
@spec select_by!(clauses, selector) :: any | no_return
def select_by!(clauses, selector),
do: select_by(clauses, selector) |> Util.bangify_find(clauses, __MODULE__)
# IMPLEMENTATION
@doc false
@spec create(Guild.t()) :: true
def create(guild) do
true = :ets.insert_new(@table_name, {guild.id, guild})
end
@doc false
@spec update(map()) :: {Guild.t(), Guild.t()}
def update(payload) do
[{_id, old_guild}] = :ets.lookup(@table_name, payload.id)
casted = Util.cast(payload, {:struct, Guild})
new_guild = Guild.merge(old_guild, casted)
true = :ets.update_element(@table_name, payload.id, {2, new_guild})
{old_guild, new_guild}
end
@doc false
@spec delete(Guild.id()) :: Guild.t() | nil
def delete(guild_id) do
# Returns the old guild, if cached
case :ets.take(@table_name, guild_id) do
[guild] -> guild
[] -> nil
end
end
@doc false
@spec channel_create(Guild.id(), map()) :: Channel.t()
def channel_create(guild_id, channel) do
[{_id, guild}] = :ets.lookup(@table_name, guild_id)
new_channel = Util.cast(channel, {:struct, Channel})
new_channels = Map.put(guild.channels, channel.id, new_channel)
new_guild = %{guild | channels: new_channels}
true = :ets.update_element(@table_name, guild_id, {2, new_guild})
new_channel
end
@doc false
@spec channel_delete(Guild.id(), Channel.id()) :: Channel.t() | :noop
def channel_delete(guild_id, channel_id) do
[{_id, guild}] = :ets.lookup(@table_name, guild_id)
{popped, new_channels} = Map.pop(guild.channels, channel_id)
new_guild = %{guild | channels: new_channels}
true = :ets.update_element(@table_name, guild_id, {2, new_guild})
if popped, do: popped, else: :noop
end
@doc false
@spec channel_update(Guild.id(), map()) :: {Channel.t(), Channel.t()}
def channel_update(guild_id, channel) do
[{_id, guild}] = :ets.lookup(@table_name, guild_id)
{old, new, new_channels} = upsert(guild.channels, channel.id, channel, Channel)
new_guild = %{guild | channels: new_channels}
true = :ets.update_element(@table_name, guild_id, {2, new_guild})
{old, new}
end
@doc false
@spec emoji_update(Guild.id(), [map()]) :: {[Emoji.t()], [Emoji.t()]}
def emoji_update(guild_id, emojis) do
[{_id, guild}] = :ets.lookup(@table_name, guild_id)
casted = Util.cast(emojis, {:list, {:struct, Emoji}})
new = %{guild | emojis: casted}
true = :ets.update_element(@table_name, guild_id, {2, new})
{guild.emojis, casted}
end
@doc false
@spec member_add(Guild.id(), map()) :: Member.t()
def member_add(guild_id, payload) do
[{_id, guild}] = :ets.lookup(@table_name, guild_id)
{_old, member, new_members} = upsert(guild.members, payload.user.id, payload, Member)
new = %{guild | members: new_members, member_count: guild.member_count + 1}
true = :ets.update_element(@table_name, guild_id, {2, new})
member
end
@doc false
@spec member_remove(Guild.id(), map()) :: {Guild.id(), Member.t()} | :noop
def member_remove(guild_id, user) do
[{_id, guild}] = :ets.lookup(@table_name, guild_id)
{popped, new_members} = Map.pop(guild.members, user.id)
new_guild = %{guild | members: new_members, member_count: guild.member_count - 1}
true = :ets.update_element(@table_name, guild_id, {2, new_guild})
if popped, do: {guild_id, popped}, else: :noop
end
@doc false
@spec member_update(Guild.id(), map()) :: {Guild.id(), Member.t() | nil, Member.t()}
def member_update(guild_id, member) do
# We may retrieve a GUILD_MEMBER_UPDATE event for our own user even if we
# have the required intents to retrieve it for other members disabled, as
# outlined in issue https://github.com/Kraigie/nostrum/issues/293. In
# that case, we will not have the guild cached.
case :ets.lookup(@table_name, guild_id) do
[{_id, guild}] ->
{old, new, new_members} = upsert(guild.members, member.user.id, member, Member)
new_guild = %{guild | members: new_members}
true = :ets.update_element(@table_name, guild_id, {2, new_guild})
{guild_id, old, new}
[] ->
new = Util.cast(member, {:struct, Member})
{guild_id, nil, new}
end
end
@doc false
def member_chunk(guild_id, member_chunk) do
# CHONK like that one cat of craig
[{_id, guild}] = :ets.lookup(@table_name, guild_id)
new_members =
Enum.reduce(member_chunk, guild.members, fn m, acc ->
Map.put(acc, m.user.id, Util.cast(m, {:struct, Member}))
end)
# XXX: do we not need to update member count here?
new = %{guild | members: new_members}
true = :ets.update_element(@table_name, guild_id, {2, new})
end
@doc false
@spec role_create(Guild.id(), map()) :: Role.t()
def role_create(guild_id, role) do
[{_id, guild}] = :ets.lookup(@table_name, guild_id)
{_old, new, new_roles} = upsert(guild.roles, role.id, role, Role)
new_guild = %{guild | roles: new_roles}
true = :ets.update_element(@table_name, guild_id, {2, new_guild})
new
end
@doc false
@spec role_delete(Guild.id(), Role.id()) :: {Guild.id(), Role.t()} | :noop
def role_delete(guild_id, role_id) do
[{_id, guild}] = :ets.lookup(@table_name, guild_id)
{popped, new_roles} = Map.pop(guild.roles, role_id)
new_guild = %{guild | roles: new_roles}
true = :ets.update_element(@table_name, guild_id, {2, new_guild})
if popped, do: {guild_id, popped}, else: :noop
end
@doc false
@spec role_update(Guild.id(), map()) :: {Role.t(), Role.t()}
def role_update(guild_id, role) do
[{_id, guild}] = :ets.lookup(@table_name, guild_id)
{old, new_role, new_roles} = upsert(guild.roles, role.id, role, Role)
new_guild = %{guild | roles: new_roles}
true = :ets.update_element(@table_name, guild_id, {2, new_guild})
{old, new_role}
end
@doc false
@spec voice_state_update(Guild.id(), map()) :: {Guild.id(), [map()]}
def voice_state_update(guild_id, payload) do
[{_id, guild}] = :ets.lookup(@table_name, guild_id)
# Trim the `member` from the update payload.
# Remove both `"member"` and `:member` in case of future key changes.
trimmed_update = Map.drop(payload, [:member, "member"])
state_without_user = Enum.reject(guild.voice_states, &(&1.user_id == trimmed_update.user_id))
# If the `channel_id` is nil, then the user is leaving.
# Otherwise, the voice state was updated.
new_state =
if(is_nil(trimmed_update.channel_id),
do: state_without_user,
else: [trimmed_update | state_without_user]
)
new_guild = %{guild | voice_states: new_state}
true = :ets.update_element(@table_name, guild_id, {2, new_guild})
{guild_id, new_state}
end
@spec upsert(%{required(Snowflake.t()) => struct}, Snowflake.t(), map, atom) ::
{struct | nil, struct, %{required(Snowflake.t()) => struct}}
defp upsert(map, key, new, struct) do
if Map.has_key?(map, key) do
old = Map.get(map, key)
new =
old
|> Map.from_struct()
|> Map.merge(new)
|> Util.cast({:struct, struct})
new_map = Map.put(map, key, new)
{old, new, new_map}
else
new = Util.cast(new, {:struct, struct})
{nil, new, Map.put(map, key, new)}
end
end
end
|
lib/nostrum/cache/guild_cache.ex
| 0.821403 | 0.613613 |
guild_cache.ex
|
starcoder
|
defmodule Ecto.Pools.SojournBroker do
@moduledoc """
Start a pool of connections using `sbroker`.
### Options
* `:pool_name` - The name of the pool supervisor
* `:pool_size` - The number of connections to keep in the pool (default: 10)
* `:min_backoff` - The minimum backoff on failed connect in milliseconds (default: 50)
* `:max_backoff` - The maximum backoff on failed connect in milliseconds (default: 5000)
* `:broker` - The `sbroker` module to use (default: `Ecto.Pools.SojournBroker.Timeout`)
* `:lazy` - When true, initial connections to the repo are lazily started (default: true)
* `:shutdown` - The shutdown method for the connections (default: 5000) (see Supervisor.Spec)
"""
alias Ecto.Pools.SojournBroker.Worker
@behaviour Ecto.Pool
@doc """
Starts a pool of connections for the given connection module and options.
* `conn_mod` - The connection module, see `Ecto.Adapters.Connection`
* `opts` - The options for the pool, the broker and the connections
"""
def start_link(conn_mod, opts) do
{:ok, _} = Application.ensure_all_started(:sbroker)
{name, mod, size, opts} = split_opts(opts)
import Supervisor.Spec
args = [{:local, name}, mod, opts, [time_unit: :micro_seconds]]
broker = worker(:sbroker, args)
workers = for id <- 1..size do
worker(Worker, [conn_mod, name, opts], [id: id])
end
worker_sup_opts = [strategy: :one_for_one, max_restarts: size]
worker_sup = supervisor(Supervisor, [workers, worker_sup_opts])
children = [broker, worker_sup]
sup_opts = [strategy: :rest_for_one, name: Module.concat(name, Supervisor)]
Supervisor.start_link(children, sup_opts)
end
@doc false
def checkout(pool, timeout) do
ask(pool, :run, timeout)
end
@doc false
def checkin(_, {worker, ref}, _) do
Worker.done(worker, ref)
end
@doc false
def open_transaction(pool, timeout) do
ask(pool, :transaction, timeout)
end
@doc false
def close_transaction(_, {worker, ref}, _) do
Worker.done(worker, ref)
end
@doc false
def break(_, {worker, ref}, timeout) do
Worker.break(worker, ref, timeout)
end
## Helpers
defp ask(pool, fun, timeout) do
case :sbroker.ask(pool, {fun, self()}) do
{:go, ref, {worker, :lazy}, _, queue_time} ->
lazy_connect(worker, ref, queue_time, timeout)
{:go, ref, {worker, mod_conn}, _, queue_time} ->
{:ok, {worker, ref}, mod_conn, queue_time}
{:drop, _} ->
{:error, :noconnect}
end
end
## Helpers
defp split_opts(opts) do
opts =
case Keyword.pop(opts, :size) do
{nil, opts} ->
opts
{size, opts} ->
repo = Keyword.get(opts, :repo, Ecto.Pool)
IO.puts "[warning] the :size option when configuring #{inspect repo} is deprecated, " <>
"please use :pool_size instead"
Keyword.put(opts, :pool_size, size)
end
{pool_opts, conn_opts} = Keyword.split(opts, [:pool_name, :pool_size, :broker])
conn_opts =
conn_opts
|> Keyword.put_new(:queue_timeout, Keyword.get(opts, :timeout, 5_000))
|> Keyword.put(:timeout, Keyword.get(opts, :connect_timeout, 5_000))
name = Keyword.fetch!(pool_opts, :pool_name)
broker = Keyword.get(pool_opts, :broker, Ecto.Pools.SojournBroker.Timeout)
size = Keyword.get(pool_opts, :pool_size, 10)
{name, broker, size, conn_opts}
end
defp lazy_connect(worker, ref, queue_time, timeout) do
try do
:timer.tc(Worker, :mod_conn, [worker, ref, timeout])
catch
class, reason ->
stack = System.stacktrace()
Worker.done(worker, ref)
:erlang.raise(class, reason, stack)
else
{connect_time, {:ok, mod_conn}} ->
{:ok, {worker, ref}, mod_conn, queue_time + connect_time}
{_, {:error, :noconnect} = error} ->
Worker.done(worker, ref)
error
end
end
end
|
deps/ecto/lib/ecto/pools/sojourn_broker.ex
| 0.751603 | 0.503052 |
sojourn_broker.ex
|
starcoder
|
defmodule Day16 do
@moduledoc """
AoC 2019, Day 16 - Flawed Frequency Transmission
"""
@doc """
After 100 phases of FFT, what are the first eight digits in the final output list?
"""
def part1 do
Util.priv_file(:day16, "day16_input.txt")
|> File.read!()
|> String.trim()
|> fft(100)
|> String.slice(0..7)
end
def part2 do
Util.priv_file(:day16, "day16_input.txt")
|> File.read!()
|> String.trim()
|> decode()
end
@doc """
Compute the FFT of the given string for phase_cnt cycles
"""
def fft(str, phase_cnt) do
digits =
String.graphemes(str)
|> Enum.map(&String.to_integer/1)
phase(digits, phase_cnt)
end
@doc """
Decode a message
"""
def decode(str, phase_cnt \\ 100) do
skip = String.slice(str, 0, 7)
|> String.to_integer()
String.graphemes(str)
|> List.duplicate(10_000)
|> List.flatten()
|> Enum.drop(skip)
|> Enum.map(&String.to_integer/1)
|> Enum.take(skip)
|> do_phases(phase_cnt)
|> Enum.take(8)
|> Enum.join()
end
def do_phases(lst, 0), do: lst
def do_phases(add, phase_cnt) do
do_phases(reverse_sum(add), phase_cnt - 1)
end
def reverse_sum(lst) do
rolling_sum(Enum.reverse(lst), 0, [])
end
def rolling_sum([], _sum, acc), do: acc
def rolling_sum([head | rest], sum, acc) do
new = sum + head
d = Integer.digits(new)
|> Enum.take(-1)
|> hd()
rolling_sum(rest, new, [d | acc])
end
@base_pattern [0, 1, 0, -1]
def pattern(digit) do
Stream.flat_map(@base_pattern, &make_dups(&1, digit + 1))
|> Stream.cycle()
|> Stream.drop(1)
end
defp make_dups(val, cnt) do
List.duplicate(val, cnt)
end
def phase(digits, 0) do
Enum.join(digits)
end
def phase(digits, cnt) do
Enum.with_index(digits)
|> Enum.map(fn {_val, idx} -> output_element(idx, digits) end)
|> phase(cnt - 1)
end
def output_element(idx, digits) do
p = pattern(idx)
Enum.zip(digits, p)
|> Enum.map(fn {d, p} -> d * p end)
|> Enum.sum()
|> abs()
|> Integer.digits()
|> Enum.take(-1)
|> hd()
end
end
|
apps/day16/lib/day16.ex
| 0.715821 | 0.451447 |
day16.ex
|
starcoder
|
defmodule Kcl do
@moduledoc """
pure Elixir NaCl crypto suite substitute
The `box` and `unbox` functions exposed here are the equivalent
of NaCl's:
- `crypto_box_curve25519xsalsa20poly1305`
- `crypto_box_curve25519xsalsa20poly1305_open`
"""
@typedoc """
shared nonce
"""
@type nonce :: binary
@typedoc """
public or private key
"""
@type key :: binary
@typedoc """
computed signature
"""
@type signature :: binary
@typedoc """
key varieties
"""
@type key_variety :: :sign | :encrypt
defp first_level_key(k), do: k |> pad(16) |> Salsa20.hash(sixteen_zeroes())
defp second_level_key(k, n) when byte_size(n) == 24,
do: k |> Salsa20.hash(binary_part(n, 0, 16))
defp pad(s, n) when byte_size(s) >= n, do: s
defp pad(s, n) when byte_size(s) < n, do: pad(<<0>> <> s, n)
defp sixteen_zeroes, do: <<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0>>
defp thirtytwo_zeroes, do: sixteen_zeroes() <> sixteen_zeroes()
@doc """
generate a `{private, public}` key pair
"""
@spec generate_key_pair(key_variety) :: {key, key} | :error
def generate_key_pair(variety \\ :encrypt)
def generate_key_pair(:encrypt), do: Curve25519.generate_key_pair()
def generate_key_pair(:sign), do: Ed25519.generate_key_pair()
@doc """
derive a public key from a private key
"""
@spec derive_public_key(key, key_variety) :: key | :error
def derive_public_key(private_key, variety \\ :encrypt)
def derive_public_key(private_key, :encrypt), do: Curve25519.derive_public_key(private_key)
def derive_public_key(private_key, :sign), do: Ed25519.derive_public_key(private_key)
@doc """
pre-compute a shared key
Mainly useful in a situation where many messages will be exchanged.
"""
def shared_secret(our_private, their_public) do
case Curve25519.derive_shared_secret(our_private, their_public) do
:error -> :error
val -> first_level_key(val)
end
end
@doc """
box up an authenticated packet
"""
@spec box(binary, nonce, key, key) :: {binary, Kcl.State.t()}
def box(msg, nonce, our_private, their_public),
do: box(msg, nonce, our_private |> Kcl.State.init() |> Kcl.State.new_peer(their_public))
@spec box(binary, nonce, Kcl.State.t()) :: {binary, Kcl.State.t()}
def box(msg, nonce, state) when is_map(state),
do: {secretbox(msg, nonce, state.shared_secret), struct(state, previous_nonce: nonce)}
@spec secretbox(binary, nonce, key) :: binary
@doc """
box based on a shared secret
"""
def secretbox(msg, nonce, key) do
<<pnonce::binary-size(32), c::binary>> =
Salsa20.crypt(
thirtytwo_zeroes() <> msg,
second_level_key(key, nonce),
binary_part(nonce, 16, 8)
)
Poly1305.hmac(c, pnonce) <> c
end
@doc """
unbox an authenticated packet
Returns `:error` when the packet contents cannot be authenticated, otherwise
the decrypted payload and updated state.
"""
@spec unbox(binary, nonce, key, key) :: {binary, Kcl.State.t()} | :error
def unbox(packet, nonce, our_private, their_public),
do:
packet |> unbox(nonce, our_private |> Kcl.State.init() |> Kcl.State.new_peer(their_public))
def unbox(packet, nonce, state) do
case {nonce > state.previous_nonce, secretunbox(packet, nonce, state.shared_secret)} do
{false, _} -> {:error, "nonce"}
{true, :error} -> {:error, "decode"}
{true, m} -> {m, struct(state, previous_nonce: nonce)}
end
end
@doc """
unbox based on a shared secret
"""
@spec secretunbox(binary, nonce, key) :: binary | :error
def secretunbox(packet, nonce, key)
def secretunbox(<<mac::binary-size(16), c::binary>>, nonce, key) do
<<pnonce::binary-size(32), m::binary>> =
Salsa20.crypt(
thirtytwo_zeroes() <> c,
second_level_key(key, nonce),
binary_part(nonce, 16, 8)
)
case c |> Poly1305.hmac(pnonce) |> Poly1305.same_hmac?(mac) do
true -> m
_ -> :error
end
end
@doc """
create an inital state for a peer connection
A convenience wrapper around `Kcl.State.init` and `Kcl.State.new_peer`
"""
@spec new_connection_state(key, key | nil, key) :: Kcl.State.t()
def new_connection_state(our_private, our_public \\ nil, their_public) do
our_private |> Kcl.State.init(our_public) |> Kcl.State.new_peer(their_public)
end
@doc """
sign a message
If only the secret key is provided, the public key will be derived therefrom.
This can add significant overhead to the signing operation.
"""
@spec sign(binary, key, key) :: signature
def sign(message, secret_key, public_key \\ nil),
do: Ed25519.signature(message, secret_key, public_key)
@doc """
validate a message signature
"""
@spec valid_signature?(signature, binary, key) :: boolean
def valid_signature?(signature, message, public_key),
do: Ed25519.valid_signature?(signature, message, public_key)
@doc """
`crypto_auth` equivalent
"""
@spec auth(binary, key) :: signature
def auth(message, key),
do: :crypto.macN(:hmac, :sha512, :binary.bin_to_list(key), :binary.bin_to_list(message), 32)
@doc """
Compare `auth` HMAC
"""
@spec valid_auth?(signature, binary, key) :: boolean
def valid_auth?(signature, message, key),
do: auth(message, key) |> Equivalex.equal?(signature)
end
|
lib/kcl.ex
| 0.847669 | 0.511473 |
kcl.ex
|
starcoder
|
defmodule Rotn do
@moduledoc """
Rotn takes a compatible binary and an integer value and rotates
the corresponding characters of the binary by the integer value
around a circle of ASCII values from 32 - 126 inclusive.
"""
@doc ~S"""
Returns an `{:ok, "encoded string"}` tuple.
## Examples
iex> Rotn.encode("I must not fear", 13)
{:ok, "V-z$\"#-{|#-srn!"}
iex> Rotn.encode(68.9, 13)
{:error, "Cannot encode non-binary"}
iex> Rotn.encode("the water belongs to the tribe", 2.5)
{:error, "Incompatible shift value"}
iex> Rotn.encode("Fear is the mindkiller", -20)
{:ok, "Zyu(4})4*|y4#}$x!}\"\"y("}
"""
@spec encode(binary(), integer()) :: {:ok | :error, binary()}
defdelegate encode(text, delta), to: Rotn.Cipher, as: :encode
@doc ~S"""
Returns an encoded string, raising an ArgumentError if the provided
text cannot be encoded or the shift value is invalid.
## Examples
iex> Rotn.encode!("I must not fear", 13)
"V-z$\"#-{|#-srn!"
iex> Rotn.encode!(68.9, 13)
** (ArgumentError) Cannot encode non-binary
iex> Rotn.encode!("the water belongs to the tribe", 2.5)
** (ArgumentError) Incompatible shift value
"""
@spec encode!(binary(), integer()) :: binary() | no_return()
defdelegate encode!(text, delta), to: Rotn.Cipher, as: :encode!
@doc ~S"""
Returns an `{:ok, "decoded string"}` tuple.
## Examples
iex> Rotn.decode("V-z$\"#-{|#-srn!", 13)
{:ok, "I must not fear"}
iex> Rotn.decode(68.9, 13)
{:error, "Cannot decode non-binary"}
iex> Rotn.decode("/# 92z/ -9{ '*)\".9/*9/# 9/-${ ", 2.5)
{:error, "Incompatible shift value"}
iex> Rotn.decode("Zyu(4})4*|y4#}$x!}\"\"y(", -20)
{:ok, "Fear is the mindkiller"}
"""
@spec decode(binary(), integer()) :: {:ok | :error, binary()}
defdelegate decode(text, delta), to: Rotn.Cipher, as: :decode
@doc ~S"""
Returns an decoded string, raising an ArgumentError if the provided
text cannot be decoded or the shift value is invalid.
## Examples
iex> Rotn.decode!("V-z$\"#-{|#-srn!", 13)
"I must not fear"
iex> Rotn.decode!(68.9, 13)
** (ArgumentError) Cannot decode non-binary
iex> Rotn.decode!("/# 92z/ -9{ '*)\".9/*9/# 9/-${ ", 2.5)
** (ArgumentError) Incompatible shift value
"""
@spec decode!(binary(), integer()) :: binary() | no_return()
defdelegate decode!(text, delta), to: Rotn.Cipher, as: :decode!
end
|
lib/rotn.ex
| 0.932538 | 0.40439 |
rotn.ex
|
starcoder
|
defmodule FeedexUi.Cache.PushState do
alias FeedexUi.Cache.UiState
@moduledoc """
Persistent store for User's PushState.
The PushState is simply the user's UiState at a given moment in time, with a
mostly-unique (we use a truncated hash string) hash value as a lookup key.
The idea here is to post an updated UiState every time the user clicks on the
LiveView page. The reason we use a hash lookup instead of posting the
UiState as URL params is that the full UiState is too long for a URL.
- the uistate hash is posted as part of the browser url
- the back button works
- one UiState per hash
Note that the usr_id is encoded in the UiState, preventing
"""
@doc """
Save the PushState
Save a UiState into the store, and return the hash_key.
"""
def save(ui_state) do
hash_key = gen_hash(ui_state)
sig()
|> Pets.insert({hash_key, ui_state})
hash_key
end
@doc """
Return the PushState for a given lookup key.
"""
def lookup(hash_key, usr_id \\ 1) do
result = Pets.lookup(sig(), hash_key)
case result do
[] -> %UiState{usr_id: usr_id}
nil -> %UiState{usr_id: usr_id}
[{_, uistate}] -> uistate
_ -> raise("Error: badval")
end
end
@doc """
Return all records.
"""
def all do
sig()
|> Pets.all()
|> Enum.map(&elem(&1, 1))
end
def cleanup do
sig()
|> Pets.cleanup()
end
def purge_old do
sig()
|> Pets.all()
|> Enum.filter(&(&1))
|> Enum.each(&(Pets.remove(sig(), elem(&1, 0))))
:ok
end
@env Mix.env()
defp sig do
case @env do
:dev -> %{filepath: "/tmp/pushstate_dev.dat" , tablekey: :pushstate_dev}
:test -> %{filepath: "/tmp/pushstate_test.dat", tablekey: :pushstate_test}
:prod -> %{filepath: "/tmp/pushstate_prod.dat", tablekey: :pushstate_prod}
end
end
# the hash_key is truncated, for brevity as a the browser Url
# we expect a small number of pushStates per user
# so the risk of cross-user collision is small
# note also that the usr_id is encoded in the push_state
defp gen_hash(ui_state) do
:md5
|> :crypto.hash(inspect(ui_state))
|> Base.url_encode64(padding: false)
|> String.slice(1..6)
end
end
|
apps/feedex_ui/lib/feedex_ui/cache/push_state.ex
| 0.652131 | 0.4474 |
push_state.ex
|
starcoder
|
defmodule Chunky.Grid do
@moduledoc """
Functions for creating and manipulating a two dimenesional grid.
Internally the grid is ordered as lists of lists, rather than a single list.
"""
defstruct [:data, :width, :height]
alias Chunky.Grid
def new(width, height, fun)
when is_integer(width) and is_integer(height) and is_function(fun, 1) do
if valid_dimensions?(width, height) do
%Grid{
data: gen_data(width, height, fun),
width: width,
height: height
}
else
{:error, :invalid_grid_dimensions}
end
end
def new(width, height, val) when is_integer(width) and is_integer(height) do
if valid_dimensions?(width, height) do
%Grid{
data: gen_data(width, height, val),
width: width,
height: height
}
else
{:error, :invalid_grid_dimensions}
end
end
def get_at(%Grid{} = grid, {x, y}) when is_integer(x) and is_integer(y), do: get_at(grid, x, y)
def get_at(%Grid{} = grid, x, y) when is_integer(x) and is_integer(y) do
case valid_coordinate(grid, x, y) do
{false, reason} -> {:error, reason}
true -> grid.data |> Kernel.get_in([Access.at(y), Access.at(x)])
end
end
def put_at(%Grid{} = grid, {x, y}, v), do: put_at(grid, x, y, v)
def put_at(%Grid{} = grid, x, y, v) do
case valid_coordinate(grid, x, y) do
{false, reason} ->
{:error, reason}
true ->
n_data = grid.data |> put_in([Access.at(y), Access.at(x)], v)
%{grid | data: n_data}
end
end
def valid_coordinate?(%Grid{} = grid, {x, y}) when is_integer(x) and is_integer(y),
do: valid_coordinate?(grid, x, y)
def valid_coordinate?(%Grid{} = grid, x, y) when is_integer(x) and is_integer(y) do
case valid_coordinate(grid, x, y) do
{false, _} -> false
true -> true
end
end
def valid_coordinate(%Grid{} = grid, {x, y}) when is_integer(x) and is_integer(y),
do: valid_coordinate(grid, x, y)
def valid_coordinate(%Grid{} = grid, x, y) when is_integer(x) and is_integer(y) do
cond do
x < 0 -> {false, :invalid_x_coordinate}
x >= grid.width -> {false, :invalid_x_coordinate}
y < 0 -> {false, :invalid_y_coordinate}
y >= grid.height -> {false, :invalid_y_coordinate}
true -> true
end
end
defp valid_dimensions?(width, height) when is_integer(width) and is_integer(height) do
width > 0 && height > 0
end
@doc """
Add a list of point data to a grid. Points can be specified as `{x, y, value}` tuples
or as maps of `%{x: x, y: y, value: value}`.
## Example
#iex> Grid.new(10, 10) |> put_all([ {1, 1, "#"}, {3, 2, "."}, ...])
"""
def put_all(%Grid{} = grid, [datum]) do
case datum do
{x, y, val} -> grid |> Grid.put_at(x, y, val)
%{x: x, y: y, value: val} -> grid |> Grid.put_at(x, y, val)
end
end
def put_all(%Grid{} = grid, [datum | data]) do
case datum do
{x, y, val} -> grid |> Grid.put_at(x, y, val) |> put_all(data)
%{x: x, y: y, value: val} -> grid |> Grid.put_at(x, y, val) |> put_all(data)
end
end
def find_index(%Grid{} = grid, val) do
0..(grid.height - 1)
|> Enum.map(fn y ->
0..(grid.width - 1)
|> Enum.map(fn x ->
{grid |> Grid.get_at(x, y) == val, x, y}
end)
end)
|> List.flatten()
|> Enum.filter(fn {has, _, _} -> has end)
|> Enum.map(fn {_, x, y} -> {x, y} end)
end
# generate the raw lists for a grid
defp gen_data(width, height, fun) when is_function(fun, 1) do
0..(height - 1)
|> Enum.map(fn y ->
0..(width - 1)
|> Enum.map(fn x ->
fun.({x, y})
end)
end)
end
defp gen_data(width, height, val) do
gen_data(width, height, fn _ -> val end)
end
end
|
lib/grid.ex
| 0.848784 | 0.796411 |
grid.ex
|
starcoder
|
defmodule Akd.Fetch.Git do
@moduledoc """
A native Hook module that comes shipped with Akd.
This module uses `Akd.Hook`.
Provides a set of operations that fetch source code using `git` to a destination,
checks out a given branch (defaults to `master`) and pulls the latest version
of the branch on the destination.
Ensures to clean up and empty the desination directory. (Doesn't run this by
default)
Doesn't have any Rollback operations.
# Options:
* `run_ensure`: `boolean`. Specifies whether to a run a command or not.
* `ignore_failure`: `boolean`. Specifies whether to continue if this hook fails.
* `src`: `string`. Source/Repo from where to clone the project. This is a required
option while using this hook.
* `branch`: `string`. Branch of the git repo that is being deployed.
# Defaults:
* `run_ensure`: `false`
* `ignore_failure`: `false`
* `branch`: `master`
"""
use Akd.Hook
@default_opts [run_ensure: false, ignore_failure: false]
@errmsg %{no_src: "No `src` given to `Akd.Fetch.Git`. Expected a git repo."}
@doc """
Callback implementation for `get_hooks/2`.
This function returns a list of operations that can be used to fetch a source
code using `git` from a branch.
## Examples
When no `src` is given with `opts`:
iex> deployment = %Akd.Deployment{mix_env: "prod",
...> build_at: Akd.Destination.local("."),
...> publish_to: Akd.Destination.local("."),
...> name: "name",
...> vsn: "0.1.1"}
iex> Akd.Fetch.Git.get_hooks(deployment, [])
** (RuntimeError) No `src` given to `Akd.Fetch.Git`. Expected a git repo.
When a `src` is given:
iex> deployment = %Akd.Deployment{mix_env: "prod",
...> build_at: Akd.Destination.local("."),
...> publish_to: Akd.Destination.local("."),
...> name: "name",
...> vsn: "0.1.1"}
iex> Akd.Fetch.Git.get_hooks(deployment, [src: "url"])
[%Akd.Hook{ensure: [%Akd.Operation{cmd: "rm -rf ./*", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "rm -rf ./.*", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}}], ignore_failure: false,
main: [%Akd.Operation{cmd: "git status; if [[ $? != 0 ]]; then git clone url .; fi", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "git fetch", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "git reset --hard", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "git clean -fd", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "git checkout master", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "git pull", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}}], rollback: [], run_ensure: false}]
When a `git_src` is part of deployment data:
iex> deployment = %Akd.Deployment{mix_env: "prod",
...> build_at: Akd.Destination.local("."),
...> publish_to: Akd.Destination.local("."),
...> name: "name",
...> vsn: "0.1.1",
...> data: %{git_src: "url"}}
iex> Akd.Fetch.Git.get_hooks(deployment)
[%Akd.Hook{ensure: [%Akd.Operation{cmd: "rm -rf ./*", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "rm -rf ./.*", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}}], ignore_failure: false,
main: [%Akd.Operation{cmd: "git status; if [[ $? != 0 ]]; then git clone url .; fi", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "git fetch", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "git reset --hard", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "git clean -fd", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "git checkout master", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "git pull", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}}], rollback: [], run_ensure: false}]
When a `git_branch` is part of deployment data:
iex> deployment = %Akd.Deployment{mix_env: "prod",
...> build_at: Akd.Destination.local("."),
...> publish_to: Akd.Destination.local("."),
...> name: "name",
...> vsn: "0.1.1",
...> data: %{git_src: "url", git_branch: "branch"}}
iex> Akd.Fetch.Git.get_hooks(deployment)
[%Akd.Hook{ensure: [%Akd.Operation{cmd: "rm -rf ./*", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "rm -rf ./.*", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}}], ignore_failure: false,
main: [%Akd.Operation{cmd: "git status; if [[ $? != 0 ]]; then git clone url .; fi", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "git fetch", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "git reset --hard", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "git clean -fd", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "git checkout branch", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "git pull", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}}], rollback: [], run_ensure: false}]
When a `branch` is part of options:
iex> deployment = %Akd.Deployment{mix_env: "prod",
...> build_at: Akd.Destination.local("."),
...> publish_to: Akd.Destination.local("."),
...> name: "name",
...> vsn: "0.1.1",
...> data: %{git_src: "url"}}
iex> Akd.Fetch.Git.get_hooks(deployment, branch: "branch")
[%Akd.Hook{ensure: [%Akd.Operation{cmd: "rm -rf ./*", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "rm -rf ./.*", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}}], ignore_failure: false,
main: [%Akd.Operation{cmd: "git status; if [[ $? != 0 ]]; then git clone url .; fi", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "git fetch", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "git reset --hard", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "git clean -fd", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "git checkout branch", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "git pull", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}}], rollback: [], run_ensure: false}]
"""
@spec get_hooks(Akd.Deployment.t(), Keyword.t()) :: list(Akd.Hook.t())
def get_hooks(deployment, opts \\ []) do
opts = uniq_merge(opts, @default_opts)
src = Keyword.get(opts, :src) || Map.get(deployment.data, :git_src)
branch =
Keyword.get(opts, :branch) ||
Map.get(deployment.data, :git_branch) || "master"
destination = Akd.DestinationResolver.resolve(:build, deployment)
[fetch_hook(src, branch, destination, opts)]
end
# This function takes a source, branch, destination and options and
# returns an Akd.Hook.t struct using the form_hook DSL.
defp fetch_hook(nil, _, _, _), do: raise(@errmsg[:no_src])
defp fetch_hook(src, branch, destination, opts) do
form_hook opts do
main("git status; if [[ $? != 0 ]]; then git clone #{src} .; fi", destination)
main("git fetch", destination)
main("git reset --hard", destination)
main("git clean -fd", destination)
main("git checkout #{branch}", destination)
main("git pull", destination)
ensure("rm -rf ./*", destination)
ensure("rm -rf ./.*", destination)
end
end
# This function takes two keyword lists and merges them keeping the keys
# unique. If there are multiple values for a key, it takes the value from
# the first value of keyword1 corresponding to that key.
defp uniq_merge(keyword1, keyword2) do
keyword2
|> Keyword.merge(keyword1)
|> Keyword.new()
end
end
|
lib/akd/base/fetch/git.ex
| 0.842798 | 0.57946 |
git.ex
|
starcoder
|
defmodule Utils.Table do
def comparison_examples do
[
%{comparison: "5 === 5", result: true},
%{comparison: "5 === 5.0", result: false},
%{comparison: "5 == 5.0", result: true},
%{comparison: "5 === 4", result: false},
%{comparison: "5 > 4", result: true},
%{comparison: "4 > 5", result: false},
%{comparison: "5 < 4", result: false},
%{comparison: "4 < 5", result: true},
%{comparison: "5 >= 5", result: true},
%{comparison: "5 >= 4", result: true},
%{comparison: "4 >= 5", result: false},
%{comparison: "5 <= 5", result: true},
%{comparison: "4 <= 5", result: true},
%{comparison: "5 <= 4", result: false}
]
end
def example do
Enum.map(1..5, fn each ->
%{
number: each
}
end)
end
def exponential_growth do
Enum.map(1..100, fn each ->
%{
"# of elements": each,
result: 10 ** each,
equation: "100 ** #{each}"
}
end)
end
def factorial_complexity do
Enum.map(1..10, fn each ->
equation =
Enum.map(each..1, fn
^each -> "#{each}"
n -> " * #{n}"
end)
|> Enum.join()
%{"# of elements": each, result: each ** each, equation: equation}
end)
end
def fib_cache do
defmodule Fib do
def get(n) do
sequence =
Stream.unfold({1, 1}, fn {a, b} ->
{a, {b, a + b}}
end)
|> Enum.take(n)
[0 | sequence]
end
end
Fib.get(150)
|> Enum.with_index()
|> Enum.map(fn {value, index} -> %{input: index, output: value} end)
end
def lists_vs_tuples do
[
[operation: "length", tuple: "O(1)", list: "O(n)"],
[operation: "prepend", tuple: "O(n)", list: "O(1)"],
[operation: "insert", tuple: "O(n)", list: "O(n*)"],
[operation: "access", tuple: "O(1)", list: "O(n*)"],
[operation: "update/replace", tuple: "O(n)", list: "O(n*)"],
[operation: "delete", tuple: "O(n)", list: "O(n*)"],
[operation: "concatenation", tuple: "O(n1 + n2)", list: "O(n1)"]
]
end
def n2 do
Enum.map(1..1000, fn each ->
%{
"# of elements": each,
result: each ** 2,
notation: "#{each}**2",
equation: "#{each} * #{each}"
}
end)
end
def n3 do
Enum.map(1..1000, fn each ->
%{
"# of elements": each,
result: each ** 3,
notation: "#{each}**3",
equation: "#{each} * #{each} * #{each}"
}
end)
end
def unicode do
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|> String.codepoints()
|> Enum.map(fn char ->
<<code_point::utf8>> = char
%{character: char, code_point: code_point}
end)
end
def measurements do
[
[unit: :millimeter, value: 1, centimeter: 0.1],
[unit: :meter, value: 1, centimeter: 100],
[unit: :kilometer, value: 1, centimeter: 100_000],
[unit: :inch, value: 1, centimeter: 2.54],
[unit: :feet, value: 1, centimeter: 30],
[unit: :yard, value: 1, centimeter: 91],
[unit: :mile, value: 1, centimeter: 160_000]
]
end
def code_points do
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|> String.codepoints()
|> Enum.map(fn char ->
<<code_point::utf8>> = char
%{character: char, code_point: code_point}
end)
end
def base_2_10_comparison do
Enum.map(1..500, fn integer ->
binary = Integer.digits(integer, 2) |> Enum.join() |> String.to_integer()
%{base10: integer, base2: binary}
end)
end
def hexadecimal do
"1,2,3,4,5,6,7,8,9,10,a,b,c,d,e,f"
|> String.split(",")
|> Enum.with_index()
|> Enum.map(fn {symbol, index} -> %{integer: "#{index}", hexadecimal: symbol} end)
end
end
|
utils/lib/table.ex
| 0.649579 | 0.643728 |
table.ex
|
starcoder
|
defmodule Snitch.Tools.Validations do
@moduledoc """
A bunch of data validations for `Ecto.Changeset`.
## Note
All validations in this module are check ONLY the value present under the
`:changes` key in the `Ecto.Changeset.t()`.
The validations are non-strict, and will not complain if the key is not
present under `:changes`.
"""
import Ecto.Changeset
@doc """
Validates that the amount (of type `Money.t`) under the `key` in `changeset`
is non-negative.
"""
@spec validate_amount(Ecto.Changeset.t(), atom) :: Ecto.Changeset.t()
def validate_amount(%Ecto.Changeset{} = changeset, key) when is_atom(key) do
case fetch_change(changeset, key) do
{:ok, %Money{amount: amount}} ->
if Decimal.cmp(Decimal.reduce(amount), Decimal.new(0)) == :lt do
add_error(changeset, key, "must be equal or greater than 0", validation: :number)
else
changeset
end
:error ->
changeset
end
end
@doc """
Validates that the given date (of type `DateTime.t`) under the `key` in
`changeset` is in the future wrt. `DateTime.utc_now/0`.
"""
@spec validate_future_date(Ecto.Changeset.t(), atom) :: Ecto.Changeset.t()
def validate_future_date(%Ecto.Changeset{valid?: true} = changeset, key)
when is_atom(key) do
case fetch_change(changeset, key) do
{:ok, date} ->
current_time = DateTime.utc_now()
if DateTime.compare(date, current_time) == :gt do
changeset
else
add_error(changeset, key, "date should be in future", validation: :number)
end
:error ->
changeset
end
end
def validate_future_date(changeset, _), do: changeset
@doc """
Runs validations for embedded data and returns a changeset.
Takes as input a `changeset`, `module` and `key`.
Runs validations for the target embedded data by using the supplied `module`
name under the module_key. The module_key can be different for different models.
The module should implement a changeset function inorder for this to
work. The `key` is used for identifying the type to which the data or in case
of error an error, has to be added.
## See
`Snitch.Data.Schema.PromotionAction`
"""
@spec validate_embedded_data(changeset :: Ecto.Changeset.t(), module :: atom(), key :: atom) ::
Ecto.Changeset.t()
def validate_embedded_data(%Ecto.Changeset{valid?: true} = changeset, module_key, key) do
with {:ok, preferences} <- fetch_change(changeset, key),
{:ok, module_key} <- fetch_change(changeset, module_key) do
preference_changeset = module_key.changeset(struct(module_key), preferences)
add_preferences_change(preference_changeset, changeset, key)
else
:error ->
changeset
{:error, message} ->
add_error(changeset, module_key, message)
end
end
def validate_embedded_data(changeset, _module_key, _key), do: changeset
defp add_preferences_change(%Ecto.Changeset{valid?: true} = embed_changeset, changeset, key) do
data = embed_changeset.changes
put_change(changeset, key, data)
end
defp add_preferences_change(pref_changeset, changeset, key) do
additional_info =
pref_changeset
|> traverse_errors(fn {msg, opts} ->
Enum.reduce(opts, msg, fn {key, value}, acc ->
String.replace(acc, "%{#{key}}", to_string(value))
end)
end)
add_error(changeset, key, "invalid_preferences", additional_info)
end
end
|
apps/snitch_core/lib/core/tools/validations.ex
| 0.856782 | 0.449876 |
validations.ex
|
starcoder
|
defmodule Nebulex.RPC do
@moduledoc """
RPC utilities for distributed task execution.
This module uses supervised tasks underneath `Task.Supervisor`.
"""
@typedoc "Task callback"
@type callback :: {module, atom, [term]}
@typedoc "Group entry: node -> callback"
@type node_callback :: {node, callback}
@typedoc "Node group"
@type node_group :: %{optional(node) => callback} | [node_callback]
@typedoc "Reducer spec"
@type reducer :: {acc :: term, ({:ok, term} | {:error, term}, node_callback, term -> term)}
@doc """
Evaluates `apply(mod, fun, args)` on node `node` and returns the corresponding
evaluation result, or `{:badrpc, reason}` if the call fails.
A timeout, in milliseconds or `:infinity`, can be given with a default value
of `5000`. It uses `Task.await/2` internally.
## Example
iex> Nebulex.RPC.call(:my_task_sup, :node1, Kernel, :to_string, [1])
"1"
"""
@spec call(Supervisor.supervisor(), node, module, atom, [term], timeout) ::
term | {:badrpc, term}
def call(supervisor, node, mod, fun, args, timeout \\ 5000)
def call(_supervisor, node, mod, fun, args, _timeout) when node == node() do
apply(mod, fun, args)
rescue
# FIXME: this is because coveralls does not check this as covered
# coveralls-ignore-start
exception ->
{:badrpc, exception}
# coveralls-ignore-stop
end
def call(supervisor, node, mod, fun, args, timeout) do
{supervisor, node}
|> Task.Supervisor.async_nolink(
__MODULE__,
:call,
[supervisor, node, mod, fun, args, timeout]
)
|> Task.await(timeout)
end
@doc """
In contrast to a regular single-node RPC, a multicall is an RPC that is sent
concurrently from one client to multiple servers. The function evaluates
`apply(mod, fun, args)` on each `node_group` entry and collects the answers.
Then, evaluates the `reducer` function (set in the `opts`) on each answer.
This function is similar to `:rpc.multicall/5`.
## Options
* `:timeout` - A timeout, in milliseconds or `:infinity`, can be given with
a default value of `5000`. It uses `Task.yield_many/2` internally.
* `:reducer` - Reducer function to be executed on each collected result.
(check out `reducer` type).
## Example
iex> Nebulex.RPC.multi_call(
...> :my_task_sup,
...> %{
...> node1: {Kernel, :to_string, [1]},
...> node2: {Kernel, :to_string, [2]}
...> },
...> timeout: 10_000,
...> reducer: {
...> [],
...> fn
...> {:ok, res}, _node_callback, acc ->
...> [res | acc]
...>
...> {:error, _}, _node_callback, acc ->
...> acc
...> end
...> }
...> )
["1", "2"]
"""
@spec multi_call(Supervisor.supervisor(), node_group, Keyword.t()) :: term
def multi_call(supervisor, node_group, opts \\ []) do
node_group
|> Enum.map(fn {node, {mod, fun, args}} ->
Task.Supervisor.async_nolink({supervisor, node}, mod, fun, args)
end)
|> handle_multi_call(node_group, opts)
end
@doc """
Similar to `multi_call/3` but the same `node_callback` (given by `module`,
`fun`, `args`) is executed on all `nodes`; Internally it creates a
`node_group` with the same `node_callback` for each node.
## Options
Same options as `multi_call/3`.
## Example
iex> Nebulex.RPC.multi_call(
...> :my_task_sup,
...> [:node1, :node2],
...> Kernel,
...> :to_string,
...> [1],
...> timeout: 5000,
...> reducer: {
...> [],
...> fn
...> {:ok, res}, _node_callback, acc ->
...> [res | acc]
...>
...> {:error, _}, _node_callback, acc ->
...> acc
...> end
...> }
...> )
["1", "1"]
"""
@spec multi_call(Supervisor.supervisor(), [node], module, atom, [term], Keyword.t()) :: term
def multi_call(supervisor, nodes, mod, fun, args, opts \\ []) do
multi_call(supervisor, Enum.map(nodes, &{&1, {mod, fun, args}}), opts)
end
## Private Functions
defp handle_multi_call(tasks, node_group, opts) do
{reducer_acc, reducer_fun} = Keyword.get(opts, :reducer, default_reducer())
tasks
|> Task.yield_many(opts[:timeout] || 5000)
|> :lists.zip(node_group)
|> Enum.reduce(reducer_acc, fn
{{_task, {:ok, res}}, group}, acc ->
reducer_fun.({:ok, res}, group, acc)
{{_task, {:exit, reason}}, group}, acc ->
reducer_fun.({:error, {:exit, reason}}, group, acc)
{{task, nil}, group}, acc ->
_ = Task.shutdown(task, :brutal_kill)
reducer_fun.({:error, :timeout}, group, acc)
end)
end
defp default_reducer do
{
{[], []},
fn
{:ok, res}, _node_callback, {ok, err} ->
{[res | ok], err}
{:error, reason}, node_callback, {ok, err} ->
{ok, [{reason, node_callback} | err]}
end
}
end
end
|
lib/nebulex/rpc.ex
| 0.814754 | 0.408719 |
rpc.ex
|
starcoder
|
defmodule Stargate.Producer.QueryParams do
@moduledoc """
This modules provides the function to generate query parameters
for establishing a producer connection to a topic with Pulsar.
"""
@doc """
Generates a query parameter string to apped to the URL and path
parameters when creating a Stargate.Producer connection.
Stargate does not generate explicit query parameters for default
values when not supplied by the calling application as Pulsar itself
assumes default values when not supplied.
Query parameters with nil values are removed from the resulting
connection string so only those with explicit values will be
passed to Pulsar when creating a connection.
"""
@spec build_params(map() | nil) :: String.t()
def build_params(nil), do: ""
def build_params(config) when is_map(config) do
routing_mode = get_param(config, :routing_mode)
compression_type = get_param(config, :compression_type)
hashing_scheme = get_param(config, :hashing_scheme)
%{
"sendTimeoutMillis" => Map.get(config, :send_timeout),
"batchingEnabled" => Map.get(config, :batch_enabled),
"batchingMaxMessages" => Map.get(config, :batch_max_msg),
"maxPendingMessages" => Map.get(config, :max_pending_msg),
"batchingMaxPublishDelay" => Map.get(config, :batch_max_delay),
"messageRoutingMode" => routing_mode,
"compressionType" => compression_type,
"producerName" => Map.get(config, :name),
"initialSequenceId" => Map.get(config, :initial_seq_id),
"hashingScheme" => hashing_scheme
}
|> Enum.map(fn {key, value} -> key <> "=" <> to_string(value) end)
|> Enum.filter(fn param -> String.last(param) != "=" end)
|> Enum.join("&")
end
defp get_param(config, :routing_mode) do
case Map.get(config, :routing_mode) do
:round_robin -> "RoundRobinPartition"
:single -> "SinglePartition"
_ -> ""
end
end
defp get_param(config, :compression_type) do
case Map.get(config, :compression_type) do
:lz4 -> "LZ4"
:zlib -> "ZLIB"
:none -> "NONE"
_ -> ""
end
end
defp get_param(config, :hashing_scheme) do
case Map.get(config, :hashing_scheme) do
:java_string -> "JavaStringHash"
:murmur3 -> "Murmur3_32Hash"
_ -> ""
end
end
end
|
lib/stargate/producer/query_params.ex
| 0.706798 | 0.44903 |
query_params.ex
|
starcoder
|
defmodule Gyx.Qstorage.QGenServer do
@moduledoc """
This module is intended to be used as a Q table representation.
It is based on a single GenServer process, using a Map to hold Q table data
as part of process state.
Note that this is a hand made version of an Agent OTP implementation,
which would be preferable that this.
"""
use GenServer
@heatmap_color :color8
defstruct state_value_table: %{}, actions: nil
@type t :: %__MODULE__{state_value_table: %{}, actions: MapSet.t()}
def init(_) do
{:ok, %__MODULE__{state_value_table: %{}, actions: MapSet.new()}}
end
def start_link(_, opts) do
GenServer.start_link(__MODULE__, [], opts)
end
def q_get(qgenserver, env_state, action) do
GenServer.call(qgenserver, {:q_get, {env_state, action}})
end
def q_set(qgenserver, env_state, action, value) do
GenServer.call(qgenserver, {:q_set, {env_state, action, value}})
end
def get_q(qgenserver) do
GenServer.call(qgenserver, :get_q)
end
def get_q_matrix(qgenserver) do
GenServer.call(qgenserver, :get_q_matrix)
end
def print_q_matrix(qgenserver) do
GenServer.call(qgenserver, :print_q_matrix)
end
def get_max_action(qgenserver, env_state) do
GenServer.call(qgenserver, {:get_max_action, env_state})
end
def handle_call(:get_q, _from, state = %__MODULE__{}),
do: {:reply, state.state_value_table, state}
def handle_call(:get_q_matrix, _from, state = %__MODULE__{}) do
{:reply,
map_to_matrix(
state.state_value_table,
MapSet.size(state.actions)
), state}
end
def handle_call(:print_q_matrix, _from, state = %__MODULE__{}) do
map_to_matrix(
state.state_value_table,
MapSet.size(state.actions)
)
@heatmap_color
|> Matrex.heatmap()
|> (fn _ -> :ok end).()
{:reply, :ok, state}
end
def handle_call(
{:q_get, {env_state, action}},
_from,
state = %__MODULE__{}
) do
expected_reward = state.state_value_table[inspect(env_state)][action]
{:reply, if(expected_reward, do: expected_reward, else: 0.0), state}
end
def handle_call(
{:q_set, {env_state, action, value}},
_from,
state = %__MODULE__{actions: actions}
) do
k_state = inspect(env_state)
state = %{
state
| state_value_table: Map.put_new_lazy(state.state_value_table, k_state, fn -> %{} end),
actions: MapSet.put(actions, action)
}
new_state =
Map.put(
state.state_value_table,
k_state,
Map.put(state.state_value_table[k_state], action, value)
)
{:reply, new_state,
%{
state
| state_value_table: new_state,
actions: MapSet.put(actions, action)
}}
end
def handle_call(
{:get_max_action, env_state},
_from,
state = %__MODULE__{}
) do
k_state = inspect(env_state)
state = %{
state
| state_value_table: Map.put_new_lazy(state.state_value_table, k_state, fn -> %{} end)
}
with [{action, _}] <-
state.state_value_table[k_state]
|> Enum.sort_by(fn {_, v} -> v end, &>=/2)
|> Enum.take(1) do
{:reply, {:ok, action}, state}
else
_ -> {:reply, {:error, "Environment state has not been observed."}, state}
end
end
defp map_to_matrix(_, actions_size) when actions_size < 2 do
Matrex.new([[0, 0], [0, 0]])
end
defp map_to_matrix(map_state_value_table, actions_size) do
map_state_value_table
|> Map.values()
|> Enum.map(fn vs -> Map.values(vs) end)
|> Enum.filter(&(length(&1) == actions_size))
|> (fn l ->
if length(l) < actions_size do
[[0, 0], [0, 0]]
else
l
end
end).()
|> Matrex.new()
end
end
|
lib/qstorage/qlocalstatemap.ex
| 0.745954 | 0.61891 |
qlocalstatemap.ex
|
starcoder
|
defmodule ServerComms.Ws.WebsocketHandler do
@moduledoc """
Communicates with the server, primarily sending camera images up to form the MPEG stream.
Uses the traditional Jermy Ong Websocket client but forked [here](https://github.com/paulanthonywilson/websocket_client)
as some of the Erlang is deprecated.
Also deals with image refresh; has the potential for dealing with other demands
such as changing the camera settings.
"""
@behaviour :websocket_client_handler
alias Common.Tick
require Logger
defstruct parent: nil, next_image_time: 0
@type t :: %__MODULE__{parent: pid(), next_image_time: integer()}
# Limit sending to every milliseconds to save bandwidth in case of fast
# connection. 50 milliseconds is 20fps
@send_every_millis 50
def init(args, _connection_state) do
send(self(), :send_next_frame)
parent = Keyword.fetch!(args, :parent)
{:ok, %__MODULE__{parent: parent}}
end
def websocket_handle({:binary, "\n"}, _, %{next_image_time: next_image_time} = state) do
schedule_next_send(next_image_time)
{:ok, state}
end
def websocket_handle({:binary, <<131>> <> _ = message}, _, state) do
message
|> :erlang.binary_to_term()
|> handle_message()
{:ok, state}
end
def websocket_handle(message, _, state) do
Logger.info(fn -> "Unexpected websocket_handle message: #{inspect(message)}" end)
{:ok, state}
end
def websocket_info(:send_next_frame, _, state) do
Tick.tick(:server_comms_send_image_tick)
{:reply, {:binary, Camera.next_frame()},
%{state | next_image_time: System.monotonic_time(:millisecond) + @send_every_millis}}
end
def websocket_info(message, _, state) do
Logger.warn("Unexpected websocket_info mesage: #{inspect(message)}")
{:ok, state}
end
def websocket_terminate(_, _connection_state, %{parent: parent}) do
send(parent, :websocket_terminated)
:ok
end
defp handle_message({:token_refresh, token}) do
Configure.set_registration_token(token)
end
defp handle_message(unexpected) do
Logger.info(fn -> "Unexpected binary message: #{inspect(unexpected)}" end)
end
defp schedule_next_send(next_image_time) do
delay = max(0, next_image_time - System.monotonic_time(:millisecond))
Process.send_after(self(), :send_next_frame, delay)
end
end
|
apps/server_comms/lib/server_comms/ws/websocket_handler.ex
| 0.820254 | 0.432303 |
websocket_handler.ex
|
starcoder
|
defmodule EspEx.EventTransformer do
@moduledoc """
Helps converting from and to a raw event. A raw event is basically a map as
it comes from the database.
It's a behavior (fill-in the types for callbacks)
It can be "used" with `use EspEx.EventTransformer` which would:
- @behavior EspEx.EventTransformer
- provide a default `to_event` which catches any event and convert them (use
the created `EspEx.EventTransformer.to_event`)
"""
alias EspEx.RawEvent
alias EspEx.Event.Unknown
alias EspEx.Logger
@callback to_event(raw_event :: EspEx.RawEvent.t()) ::
struct | EspEx.Event.Unknown.t()
defmacro __using__(opts \\ []) do
events_module = Keyword.get(opts, :events_module, __CALLER__.module)
quote location: :keep do
@behaviour unquote(__MODULE__)
@impl unquote(__MODULE__)
def to_event(%RawEvent{} = raw_event) do
events_module = unquote(events_module)
case events_module do
nil -> EspEx.EventTransformer.to_event(__MODULE__, raw_event)
_ -> EspEx.EventTransformer.to_event(events_module, raw_event)
end
end
end
end
@doc ~S"""
Converts from a RawEvent to an Event, which is a struct defined
by the user, in a module defined by the user, the only known things is that
it has the `event_id` field and the `raw_event` field.
Takes a %RawEvent and it creates a new Event, based on events_module plus the
`:type` field in RawEvent. So it becomes `#{events_module}.#{type}` (check
for errors, create a custom struct %EspEx.Events.Unknown if it's missing).
Then copy `event_id` to `event_id`. Then, it grabs all the remaining
fields in RawEvent excluding `data` and it stores it
in `:raw_event` field. Finally all fields in `data` are
copied in the Event (which is a map)
"""
def to_event(events_module, %RawEvent{type: type} = raw_event)
when is_atom(events_module) do
with {:ok, event_module} <- to_event_module(events_module, type) do
struct(event_module, raw_event.data)
else
{:unknown, _} -> %Unknown{raw_event: raw_event}
{:no_struct, _} -> %Unknown{raw_event: raw_event}
end
end
def to_event_module(events_module, type)
when is_atom(events_module) and is_bitstring(type) do
try do
event_module = Module.safe_concat([events_module, type])
load_and_to_result(event_module)
rescue
ArgumentError ->
Logger.warn(fn ->
"Event #{events_module}.#{type} doesn't exist"
end)
{:unknown, {events_module, type}}
end
end
defp load_and_to_result(event_module) do
loaded = Code.ensure_loaded(event_module)
event_module_result(event_module, loaded)
end
defp event_module_result(event_module, {:module, _}) do
if function_exported?(event_module, :__struct__, 0) do
{:ok, event_module}
else
Logger.warn(fn -> "Event #{event_module} has no struct" end)
{:no_struct, event_module}
end
end
defp event_module_result(event_module, error) do
Logger.error(fn ->
"Event #{event_module} is a not a valid module: #{inspect(error)}"
end)
{:invalid_module, event_module}
end
end
|
lib/esp_ex/event_transformer.ex
| 0.76856 | 0.436142 |
event_transformer.ex
|
starcoder
|
defmodule Forcex.Client do
defstruct access_token: nil,
api_version: "36.0",
token_type: nil,
endpoint: "https://login.salesforce.com",
services: %{}
require Logger
@doc """
Initially signs into Force.com API.
Login credentials may be supplied. Order for locating credentials:
1. Map supplied to `login/1`
2. Environment variables
3. Applications configuration
Supplying a Map of login credentials must be in the form of
```elixir
%{
username: "...",
password: "...",
security_token: "...",
client_id: "...",
client_secret: "..."
}
```
Environment variables
- `SALESFORCE_USERNAME`
- `SALESFORCE_PASSWORD`
- `SALESFORCE_SECURITY_TOKEN`
- `SALESFORCE_CLIENT_ID`
- `SALESFORCE_CLIENT_SECRET`
Application configuration
```elixir
config :forcex, Forcex.Client,
username: "<EMAIL>",
password: "<PASSWORD>",
security_token: "<PASSWORD>",
client_id: "CONNECTED_APP_OAUTH_CLIENT_ID",
client_secret: "CONNECTED_APP_OAUTH_CLIENT_SECRET"
```
Will require additional call to `locate_services/1` to identify which Force.com
services are availabe for your deployment.
```elixir
client =
Forcex.Client.login
|> Forcex.Client.locate_services
```
"""
@spec login(map, integer) :: %Forcex.Client{}
@spec login(map, %Forcex.Client{}, integer) :: %Forcex.Client{}
def login(c \\ default_config(), index \\ 0) do
login(c, %__MODULE__{}, index)
end
def login(conf, starting_struct, index) do
conf
|> Enum.into(%{}, fn {key, value} ->
_set_credentials(key, value, index)
end)
|> (&Map.put(&1, :password, "#{&1.password}#{&1.security_token}")).()
|> Map.put(:grant_type, "password")
|> URI.encode_query()
|> (&Forcex.post("/services/oauth2/token?#{&1}", starting_struct)).()
|> handle_login_response
end
@spec _set_credentials(atom, String.t(), integer) :: tuple
defp _set_credentials(key, value, index) do
values = String.split(value, ",")
if Enum.at(values, index) != nil do
{key, Enum.at(values, index)}
else
{key, Enum.at(values, 0)}
end
end
def locate_services(client) do
%{client | services: Forcex.services(client)}
end
def create_sobject(client \\ %__MODULE__{}, name \\ "SOBject", map \\ %{})
def create_sobject(client, name, map) when is_atom(name) do
name =
name
|> Atom.to_string()
|> String.capitalize()
client
|> create_sobject(name, map)
end
def create_sobject(client, name, map) do
Forcex.post("/services/data/v20.0/sobjects/#{name}", map, client)
end
defp handle_login_response(%{
access_token: token,
token_type: token_type,
instance_url: endpoint
}) do
%__MODULE__{access_token: token, token_type: token_type, endpoint: endpoint}
end
defp handle_login_response({status_code, error_message}) do
Logger.warn(
"Cannot log into SFDC API. Please ensure you have Forcex properly configured. Got error code #{
status_code
} and message #{inspect(error_message)}"
)
%__MODULE__{}
end
def default_config() do
[:username, :password, :security_token, :client_id, :client_secret]
|> Enum.map(&{&1, get_val_from_env(&1)})
|> Enum.into(%{})
end
defp get_val_from_env(key) do
key
|> env_var
|> System.get_env()
|> case do
nil ->
Application.get_env(:forcex, __MODULE__, [])
|> Keyword.get(key)
val ->
val
end
end
defp env_var(key), do: "SALESFORCE_#{key |> to_string |> String.upcase()}"
end
|
lib/forcex/client.ex
| 0.757794 | 0.653065 |
client.ex
|
starcoder
|
defmodule DayFour do
def solve(input) do
input
|> String.split("\n\n", trim: true)
|> Stream.map(&construct_passport/1)
|> Stream.map(&validate_passport/1)
|> Enum.count(& &1)
end
@docp """
Constructs a keyword list from string of passport data.
"""
defp construct_passport(string) do
string
|> String.split([" ", "\n"], trim: true)
|> Enum.map(fn
x ->
[key, val] = String.split(x, ":", trim: true)
{String.to_atom(key), val}
end)
end
# Is there a more idiomatic way of doing this in elixir?
defp validate_passport(passport) do
with {:ok, byr} <- Keyword.fetch(passport, :byr),
:ok <- validate_byr(String.to_integer(byr)),
{:ok, iyr} <- Keyword.fetch(passport, :iyr),
:ok <- validate_iyr(String.to_integer(iyr)),
{:ok, eyr} <- Keyword.fetch(passport, :eyr),
:ok <- validate_eyr(String.to_integer(eyr)),
{:ok, hgt} <- Keyword.fetch(passport, :hgt),
:ok <- validate_hgt(hgt),
{:ok, hcl} <- Keyword.fetch(passport, :hcl),
:ok <- validate_hcl(hcl),
{:ok, ecl} <- Keyword.fetch(passport, :ecl),
:ok <- validate_ecl(ecl),
{:ok, pid} <- Keyword.fetch(passport, :pid),
:ok <- validate_pid(pid) do
true
else
_ -> false
end
end
defp validate_byr(byr) when is_integer(byr) and byr >= 1920 and byr <= 2002, do: :ok
defp validate_byr(_), do: :error
defp validate_iyr(iyr) when is_integer(iyr) and iyr >= 2010 and iyr <= 2020, do: :ok
defp validate_iyr(_), do: :error
defp validate_eyr(eyr) when is_integer(eyr) and eyr >= 2020 and eyr <= 2030, do: :ok
defp validate_eyr(_), do: :error
defp validate_hgt(hgt) do
# greedy, leaves the rest of the string as the unit
{h, units} = Integer.parse(hgt)
cond do
units == "cm" and h >= 150 and h <= 193 -> :ok
units == "in" and h >= 59 and h <= 76 -> :ok
true -> :error
end
end
defp validate_hcl(hcl) do
case String.match?(hcl, ~r/^#[a-fA-F0-9]{6}$/) do
true -> :ok
_ -> :error
end
end
defp validate_ecl(ecl) when ecl in ["amb", "blu", "brn", "gry", "grn", "hzl", "oth"], do: :ok
defp validate_ecl(_), do: :error
defp validate_pid(pid) do
case String.match?(pid, ~r/^[0-9]{9}$/) do
true -> :ok
_ -> :error
end
end
end
|
adv_2020/lib/day_4.ex
| 0.555676 | 0.451327 |
day_4.ex
|
starcoder
|
defmodule Grizzly.ZWave.Commands.ConfigurationBulkSet do
@moduledoc """
This command is used to set the value of one or more configuration parameters.
Params:
* `:default` - This field is used to specify if the default value is to be restored for the specified configuration
parameters. Use carefully: Some devices will incorrectly reset ALL configuration values to default. (required)
* `:size` - This field is used to specify the number of bytes (1, 2 or 4) of the parameter values (required)
* `:handshake` - This field is used to indicate if a Configuration Bulk Report Command is to be returned when the
specified configuration parameters have been stored in non-volatile memory. (required)
* `:offset` - This field is used to specify the first parameter in a range of one or more parameters. (required)
* `:values` - These fields carry the values -of the same size)- to be assigned. (required)
"""
@behaviour Grizzly.ZWave.Command
alias Grizzly.ZWave.Command
alias Grizzly.ZWave.CommandClasses.Configuration
@type param ::
{:default, boolean}
| {:size, 1 | 2 | 4}
| {:handshake, boolean}
| {:offset, non_neg_integer()}
| {:values, [integer]}
@impl true
@spec new([param()]) :: {:ok, Command.t()}
def new(params) do
command = %Command{
name: :configuration_bulk_set,
command_byte: 0x07,
command_class: Configuration,
params: params,
impl: __MODULE__
}
{:ok, command}
end
@impl true
@spec encode_params(Command.t()) :: binary()
def encode_params(command) do
default? = Command.param(command, :default, false)
default_bit = if default?, do: 1, else: 0
handshake? = Command.param(command, :handshake, false)
handshake_bit = if handshake?, do: 1, else: 0
size = Command.param!(command, :size)
offset = Command.param!(command, :offset)
values = Command.param!(command, :values)
count = Enum.count(values)
values_bin = for value <- values, into: <<>>, do: <<value::signed-integer-size(size)-unit(8)>>
<<offset::size(16), count, default_bit::size(1), handshake_bit::size(1), 0x00::size(3),
size::size(3)>> <> values_bin
end
@impl true
def decode_params(
<<offset::size(16), _count, default_bit::size(1), handshake_bit::size(1),
_reserved::size(3), size::size(3), values_bin::binary>>
) do
values = for <<value::signed-integer-size(size)-unit(8) <- values_bin>>, do: value
{:ok,
[
offset: offset,
default: default_bit == 1,
handshake: handshake_bit == 1,
size: size,
values: values
]}
end
end
|
lib/grizzly/zwave/commands/configuration_bulk_set.ex
| 0.917617 | 0.569673 |
configuration_bulk_set.ex
|
starcoder
|
defmodule Robotica.Devices.Lifx do
@moduledoc """
Provides LIFX support functions.
"""
alias RoboticaCommon.Strings
defmodule HSBKA do
@moduledoc "A color with alpha channel"
@type t :: %__MODULE__{
hue: integer(),
saturation: integer(),
brightness: integer(),
kelvin: integer(),
alpha: integer()
}
defstruct hue: 120,
saturation: 100,
brightness: 100,
kelvin: 4000,
alpha: 100
end
@type callback :: (boolean | nil, list(HSBKA | nil) -> :ok)
@doc """
Eval a dictionary of strings into a Lifx HSBK object.
iex> import Robotica.Devices.Lifx
iex> values = %{
...> "frame" => 1,
...> "light" => 2
...> }
iex> eval_color(%{
...> brightness: 100,
...> hue: "{frame}*2",
...> saturation: "{light}*2",
...> kelvin: 3500,
...> alpha: 100,
...> }, values)
{:ok, %Robotica.Devices.Lifx.HSBKA{
brightness: 100,
hue: 2,
saturation: 4,
kelvin: 3500,
alpha: 100
}}
"""
@spec eval_color(map(), map()) :: {:ok, HSBKA.t()} | {:error, String.t()}
def eval_color(color, values) do
alpha = if color.alpha == nil, do: 100, else: color.alpha
with {:ok, brightness} <- Strings.eval_string(color.brightness, values),
{:ok, hue} <- Strings.eval_string(color.hue, values),
{:ok, saturation} <- Strings.eval_string(color.saturation, values),
{:ok, kelvin} <- Strings.eval_string(color.kelvin, values),
{:ok, alpha} <- Strings.eval_string(alpha, values) do
color = %HSBKA{
brightness: brightness,
hue: hue,
saturation: saturation,
kelvin: kelvin,
alpha: alpha
}
{:ok, color}
else
{:error, error} -> {:error, error}
end
end
@doc """
Expand and eval a condensed list of colors.
iex> import Robotica.Devices.Lifx
iex> color = %{
...> brightness: 100,
...> hue: "{frame}*30",
...> saturation: "{light}*100",
...> kelvin: 3500,
...> alpha: 100,
...> }
iex> colors = [
...> %{
...> count: "{frame}",
...> colors: [color, color]
...> }
...> ]
iex> expand_colors(colors, 0)
{:ok, []}
iex> expand_colors(colors, 1)
{:ok, [
%Robotica.Devices.Lifx.HSBKA{brightness: 100, hue: 30, saturation: 0, kelvin: 3500, alpha: 100},
%Robotica.Devices.Lifx.HSBKA{brightness: 100, hue: 30, saturation: 0, kelvin: 3500, alpha: 100},
]}
iex> expand_colors(colors, 2)
{:ok, [
%Robotica.Devices.Lifx.HSBKA{brightness: 100, hue: 60, saturation: 0, kelvin: 3500, alpha: 100},
%Robotica.Devices.Lifx.HSBKA{brightness: 100, hue: 60, saturation: 0, kelvin: 3500, alpha: 100},
%Robotica.Devices.Lifx.HSBKA{brightness: 100, hue: 60, saturation: 100, kelvin: 3500, alpha: 100},
%Robotica.Devices.Lifx.HSBKA{brightness: 100, hue: 60, saturation: 100, kelvin: 3500, alpha: 100},
]}
iex> expand_colors(colors, 3)
{:ok, [
%Robotica.Devices.Lifx.HSBKA{brightness: 100, hue: 90, saturation: 0, kelvin: 3500, alpha: 100},
%Robotica.Devices.Lifx.HSBKA{brightness: 100, hue: 90, saturation: 0, kelvin: 3500, alpha: 100},
%Robotica.Devices.Lifx.HSBKA{brightness: 100, hue: 90, saturation: 100, kelvin: 3500, alpha: 100},
%Robotica.Devices.Lifx.HSBKA{brightness: 100, hue: 90, saturation: 100, kelvin: 3500, alpha: 100},
%Robotica.Devices.Lifx.HSBKA{brightness: 100, hue: 90, saturation: 200, kelvin: 3500, alpha: 100},
%Robotica.Devices.Lifx.HSBKA{brightness: 100, hue: 90, saturation: 200, kelvin: 3500, alpha: 100},
]}
"""
def expand_colors(list, frame_n), do: loop_colors(list, frame_n, [])
defp loop_colors([], _, result), do: {:ok, Enum.reverse(result)}
defp loop_colors([head | tail], frame_n, result) do
values = %{
"frame" => frame_n
}
with {:ok, count} <- Strings.eval_string(head.count, values),
{:ok, result} <- expand_repeat(head.colors, frame_n, count, 0, result) do
loop_colors(tail, frame_n, result)
else
{:error, error} -> {:error, error}
end
end
defp expand_repeat(_, _, count, light_n, result) when light_n >= count do
{:ok, result}
end
defp expand_repeat(list, frame_n, count, light_n, result) do
case expand_list(list, frame_n, light_n, result) do
{:ok, result} ->
expand_repeat(list, frame_n, count, light_n + 1, result)
{:error, error} ->
{:error, error}
end
end
defp expand_list([], _, _, result), do: {:ok, result}
defp expand_list([head | tail], frame_n, light_n, result) do
values = %{
"frame" => frame_n,
"light" => light_n
}
case eval_color(head, values) do
{:ok, color} ->
result = [color | result]
expand_list(tail, frame_n, light_n, result)
{:error, error} ->
{:error, error}
end
end
@spec replicate(any(), integer()) :: list(any())
defp replicate(x, n), do: for(i <- 0..n, i > 0, do: x)
@spec fill_colors(list(HSBKA.t()), integer()) :: list(HSBKA.t() | nil)
defp fill_colors(list_hsbks, index) do
replicate(nil, index) ++ list_hsbks
end
@spec get_colors_from_command(integer, map(), integer()) ::
{:ok, list(HSBKA.t() | nil)} | {:error, String.t()}
def get_colors_from_command(number, frame, frame_n) do
cond do
not is_nil(frame.colors) and number > 0 ->
colors_index =
case frame.colors_index do
nil -> 0
index -> index
end
case expand_colors(frame.colors, frame_n) do
{:ok, colors} ->
colors = fill_colors(colors, colors_index)
{:ok, colors}
{:error, error} ->
{:error, "Got error in lifx expand_colors: #{error}"}
end
not is_nil(frame.color) ->
values = %{"frame" => frame_n}
case eval_color(frame.color, values) do
{:ok, color} ->
colors = replicate(color, number)
{:ok, colors}
{:error, error} ->
{:error, "Got error in lifx eval_color: #{error}"}
end
true ->
{:error, "No assigned color in get_colors_from_command"}
end
end
end
|
robotica/lib/robotica/devices/lifx.ex
| 0.852307 | 0.512815 |
lifx.ex
|
starcoder
|
defmodule Extensor.Tensor do
@moduledoc """
This is a simple wrapper struct for a tensorflow tensor. It holds the
data type, shape (dimensions), and binary data buffer for a tensor.
The layout of the buffer is the same as what is used in tensorflow -
row-major dimension ordering with native endian byte order. Extensor
performs very little manipulation of the data buffer, in order to minimize
the performance impact of using tensorflow from Elixir.
The following atoms are used to represent the corresponding tensorflow
data types.
|atom|tensorflow type|
|-|-|
|`:float`|`TF_FLOAT`|
|`:double`|`TF_DOUBLE`|
|`:int32`|`TF_INT32`|
|`:uint8`|`TF_UINT8`|
|`:int16`|`TF_INT16`|
|`:int8`|`TF_INT8`|
|`:string`|`TF_STRING`|
|`:complex64`|`TF_COMPLEX64`|
|`:complex`|`TF_COMPLEX`|
|`:int64`|`TF_INT64`|
|`:bool`|`TF_BOOL`|
|`:qint8`|`TF_QINT8`|
|`:quint8`|`TF_QUINT8`|
|`:qint32`|`TF_QINT32`|
|`:bfloat16`|`TF_BFLOAT16`|
|`:qint16`|`TF_QINT16`|
|`:quint16`|`TF_QUINT16`|
|`:uint16`|`TF_UINT16`|
|`:complex128`|`TF_COMPLEX128`|
|`:half`|`TF_HALF`|
|`:resource`|`TF_RESOURCE`|
|`:variant`|`TF_VARIANT`|
|`:uint32`|`TF_UINT32`|
|`:uint64`|`TF_UINT64`|
For convenience, though, functions are provided for constructing tensors
from (nested) lists. These functions use binary pattern matching and
concatenation to convert Elixir data types to/from the tensorflow binary
standard.
Example:
```elixir
iex> tensor = Extensor.Tensor.from_list([[1, 2], [3, 4]], :double)
%Extensor.Tensor{
type: :double,
shape: {2, 2},
data: <<0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64>>
}
iex> Extensor.Tensor.to_list(tensor)
[[1.0, 2.0], [3.0, 4.0]]
```
This module can also be used to verify that a tensor's shape is consistent
with its binary size. This can avoid segfaults in tensorflow when
shape/size don't match.
"""
@type data_type ::
:float
| :double
| :int32
| :uint8
| :int16
| :int8
| :string
| :complex64
| :complex
| :int64
| :bool
| :qint8
| :quint8
| :qint32
| :bfloat16
| :qint16
| :quint16
| :uint16
| :complex128
| :half
| :resource
| :variant
| :uint32
| :uint64
@type t :: %__MODULE__{
type: data_type(),
shape: tuple(),
data: binary()
}
defstruct type: :float, shape: {0}, data: <<>>
@type_byte_size %{
float: 4,
double: 8,
int32: 4,
uint8: 1,
int16: 2,
int8: 1,
string: nil,
complex64: 16,
complex: 8,
int64: 8,
bool: 1,
qint8: 1,
quint8: 1,
qint32: 4,
bfloat16: 2,
qint16: 2,
quint16: 2,
uint16: 2,
complex128: 32,
half: 2,
resource: nil,
variant: nil,
uint32: 4,
uint64: 8
}
@doc "converts a (nested) list to a tensor structure"
@spec from_list(list :: list(), type :: data_type()) :: t()
def from_list(list, type \\ :float) do
shape = List.to_tuple(list_shape(list))
data =
list
|> List.flatten()
|> Enum.map(&to_binary(&1, type))
|> IO.iodata_to_binary()
%__MODULE__{type: type, shape: shape, data: data}
end
defp list_shape([head | _] = list), do: [length(list) | list_shape(head)]
defp list_shape([]), do: [0]
defp list_shape(_), do: []
@doc "converts a tensor to a nested list"
@spec to_list(tensor :: t()) :: list()
def to_list(%__MODULE__{data: <<>>} = _tensor) do
[]
end
def to_list(tensor) do
to_list(
tensor.data,
tensor.type,
_offset = 0,
_size = byte_size(tensor.data),
Tuple.to_list(tensor.shape)
)
end
defp to_list(data, type, offset, size, [dim | shape]) do
dim_size = div(size, dim)
Enum.map(0..(dim - 1), fn i ->
to_list(data, type, offset + i * dim_size, dim_size, shape)
end)
end
defp to_list(data, type, offset, size, []) do
from_binary(binary_part(data, offset, size), type)
end
defp from_binary(<<v::native-float-32>>, :float), do: v
defp from_binary(<<v::native-float-64>>, :double), do: v
defp from_binary(<<v::native-integer-32>>, :int32), do: v
defp from_binary(<<v::native-unsigned-integer-8>>, :uint8), do: v
defp from_binary(<<v::native-integer-16>>, :int16), do: v
defp from_binary(<<v::native-integer-8>>, :int8), do: v
defp from_binary(<<v::native-integer-64>>, :int64), do: v
defp from_binary(<<v::native-integer-8>>, :bool), do: v
defp from_binary(<<v::native-integer-8>>, :qint8), do: v
defp from_binary(<<v::native-unsigned-integer-8>>, :quint8), do: v
defp from_binary(<<v::native-integer-32>>, :qint32), do: v
defp from_binary(<<v::native-integer-16>>, :qint16), do: v
defp from_binary(<<v::native-unsigned-integer-16>>, :quint16), do: v
defp from_binary(<<v::native-unsigned-integer-32>>, :uint32), do: v
defp from_binary(<<v::native-unsigned-integer-64>>, :uint64), do: v
defp to_binary(v, :float), do: <<v::native-float-32>>
defp to_binary(v, :double), do: <<v::native-float-64>>
defp to_binary(v, :int32), do: <<v::native-integer-32>>
defp to_binary(v, :uint8), do: <<v::native-unsigned-integer-8>>
defp to_binary(v, :int16), do: <<v::native-integer-16>>
defp to_binary(v, :int8), do: <<v::native-integer-8>>
defp to_binary(v, :int64), do: <<v::native-integer-64>>
defp to_binary(v, :bool), do: <<v::native-integer-8>>
defp to_binary(v, :qint8), do: <<v::native-integer-8>>
defp to_binary(v, :quint8), do: <<v::native-unsigned-integer-8>>
defp to_binary(v, :qint32), do: <<v::native-integer-32>>
defp to_binary(v, :qint16), do: <<v::native-integer-16>>
defp to_binary(v, :quint16), do: <<v::native-unsigned-integer-16>>
defp to_binary(v, :uint32), do: <<v::native-unsigned-integer-32>>
defp to_binary(v, :uint64), do: <<v::native-unsigned-integer-64>>
@doc "validates the tensor shape/size"
@spec validate(tensor :: t()) :: :ok | {:error, any()}
def validate(tensor) do
validate!(tensor)
rescue
e -> {:error, e}
end
@doc "validates the tensor shape/size"
@spec validate!(tensor :: t()) :: :ok | no_return()
def validate!(tensor) do
if !Map.has_key?(@type_byte_size, tensor.type) do
raise ArgumentError, "invalid tensor type: #{tensor.type}"
end
if type_size = Map.fetch!(@type_byte_size, tensor.type) do
expect =
tensor.shape
|> Tuple.to_list()
|> Enum.reduce(type_size, &(&1 * &2))
actual = byte_size(tensor.data)
if expect !== actual do
raise ArgumentError, "tensor size mismatch: #{actual} != #{expect}"
end
end
:ok
end
end
|
lib/extensor/tensor.ex
| 0.878855 | 0.953144 |
tensor.ex
|
starcoder
|
defmodule Sanbase.Alert.Trigger.EthWalletTriggerSettings do
@moduledoc ~s"""
The EthWallet alert is triggered when the balance of a wallet or set of wallets
changes by a predefined amount for a specified asset (Ethereum, SAN tokens, etc.)
The alert can follow a single ethereum address, a list of ethereum addresses
or a project. When a list of addresses or a project is followed, all the addresses
are considered to be owned by a single entity and the transfers between them
are excluded.
"""
@behaviour Sanbase.Alert.Trigger.Settings.Behaviour
use Vex.Struct
import Sanbase.Validation
import Sanbase.Alert.Validation
import Sanbase.Alert.OperationEvaluation
import Sanbase.DateTimeUtils, only: [str_to_sec: 1, round_datetime: 2]
alias __MODULE__
alias Sanbase.Alert.Type
alias Sanbase.Model.Project
alias Sanbase.Clickhouse.HistoricalBalance
@trigger_type "eth_wallet"
@derive {Jason.Encoder, except: [:filtered_target, :triggered?, :payload, :template_kv]}
@enforce_keys [:type, :channel, :target, :asset]
defstruct type: @trigger_type,
channel: nil,
target: nil,
asset: nil,
operation: nil,
time_window: "1d",
# Private fields, not stored in DB.
filtered_target: %{list: []},
triggered?: false,
payload: %{},
template_kv: %{}
@type t :: %__MODULE__{
type: Type.trigger_type(),
channel: Type.channel(),
target: Type.complex_target(),
asset: Type.asset(),
operation: Type.operation(),
time_window: Type.time_window(),
# Private fields, not stored in DB.
filtered_target: Type.filtered_target(),
triggered?: boolean(),
payload: Type.payload(),
template_kv: Type.tempalte_kv()
}
validates(:channel, &valid_notification_channel?/1)
validates(:target, &valid_eth_wallet_target?/1)
validates(:asset, &valid_slug?/1)
validates(:operation, &valid_absolute_change_operation?/1)
validates(:time_window, &valid_time_window?/1)
@spec type() :: String.t()
def type(), do: @trigger_type
def post_create_process(_trigger), do: :nochange
def post_update_process(_trigger), do: :nochange
def get_data(
%__MODULE__{
filtered_target: %{list: target_list, type: :eth_address}
} = settings
) do
to = Timex.now()
from = Timex.shift(to, seconds: -str_to_sec(settings.time_window))
target_list
|> Enum.map(fn addr ->
case balance_change(addr, settings.asset.slug, from, to) do
{:ok, [%{address: ^addr} = result]} ->
{addr, from,
%{
balance_start: result.balance_start,
balance_end: result.balance_end,
balance_change: result.balance_change_amount
}}
_ ->
nil
end
end)
|> Enum.reject(&is_nil/1)
end
def get_data(%__MODULE__{filtered_target: %{list: target_list, type: :slug}} = settings) do
to = Timex.now()
from = Timex.shift(to, seconds: -str_to_sec(settings.time_window))
target_list
|> Project.by_slug()
|> Enum.map(fn %Project{} = project ->
{:ok, eth_addresses} = Project.eth_addresses(project)
{:ok, project_balance_data} =
eth_addresses
|> Enum.map(&String.downcase/1)
|> balance_change(settings.asset.slug, from, to)
{balance_start, balance_end, balance_change} =
project_balance_data
|> Enum.reduce({0, 0, 0}, fn
%{} = map, {start_acc, end_acc, change_acc} ->
{
start_acc + map.balance_start,
end_acc + map.balance_end,
change_acc + map.balance_change_amount
}
end)
{project, from,
%{
balance_start: balance_start,
balance_end: balance_end,
balance_change: balance_change
}}
end)
end
defp balance_change(addresses, slug, from, to) do
cache_key =
{__MODULE__, :balance_change, addresses, slug, round_datetime(from, second: 60),
round_datetime(to, second: 60)}
|> Sanbase.Cache.hash()
Sanbase.Cache.get_or_store(:alerts_evaluator_cache, cache_key, fn ->
selector = %{infrastructure: "ETH", slug: slug}
case HistoricalBalance.balance_change(selector, addresses, from, to) do
{:ok, result} -> {:ok, result}
_ -> {:ok, []}
end
end)
end
defimpl Sanbase.Alert.Settings, for: EthWalletTriggerSettings do
def triggered?(%EthWalletTriggerSettings{triggered?: triggered}), do: triggered
def evaluate(%EthWalletTriggerSettings{} = settings, _trigger) do
case EthWalletTriggerSettings.get_data(settings) do
list when is_list(list) and list != [] ->
build_result(list, settings)
_ ->
%EthWalletTriggerSettings{settings | triggered?: false}
end
end
# The result heavily depends on `last_triggered`, so just the settings are not enough
def cache_key(%EthWalletTriggerSettings{}), do: :nocache
defp build_result(list, settings) do
template_kv =
Enum.reduce(list, %{}, fn
{project_or_addr, from, %{balance_change: balance_change} = balance_data}, acc ->
case operation_triggered?(balance_change, settings.operation) do
true ->
Map.put(
acc,
to_identifier(project_or_addr),
template_kv(project_or_addr, settings, balance_data, from)
)
false ->
acc
end
end)
%EthWalletTriggerSettings{
settings
| template_kv: template_kv,
triggered?: template_kv != %{}
}
end
defp to_identifier(%Project{slug: slug}), do: slug
defp to_identifier(addr) when is_binary(addr), do: addr
defp operation_text(%{amount_up: _}), do: "increased"
defp operation_text(%{amount_down: _}), do: "decreased"
defp template_kv(%Project{} = project, settings, balance_data, from) do
kv = %{
type: EthWalletTriggerSettings.type(),
operation: settings.operation,
project_name: project.name,
project_slug: project.slug,
asset: settings.asset.slug,
since: DateTime.truncate(from, :second),
balance_change_text: operation_text(settings.operation),
balance_change: balance_data.balance_change,
balance_change_abs: abs(balance_data.balance_change),
balance: balance_data.balance_end,
previous_balance: balance_data.balance_start
}
template = """
🔔 \#{{project_ticker}} | **{{project_name}}**'s {{asset}} balance {{balance_change_text}} by {{balance_change}} since {{since}}.
was: {{previous_balance}}, now: {{balance}}.
"""
{template, kv}
end
defp template_kv(address, settings, balance_data, from) do
asset = settings.asset.slug
kv = %{
type: EthWalletTriggerSettings.type(),
operation: settings.operation,
target: settings.target,
asset: asset,
address: address,
historical_balance_link: SanbaseWeb.Endpoint.historical_balance_url(address, asset),
since: DateTime.truncate(from, :second),
balance_change: balance_data.balance_change,
balance_change_abs: abs(balance_data.balance_change),
balance: balance_data.balance_end,
previous_balance: balance_data.balance_start
}
template = """
🔔The address {{address}}'s {{asset}} balance #{operation_text(settings.operation)} by {{balance_change_abs}} since {{since}}.
was: {{previous_balance}}, now: {{balance}}
"""
{template, kv}
end
end
end
|
lib/sanbase/alerts/trigger/settings/eth_wallet_trigger_settings.ex
| 0.749637 | 0.401981 |
eth_wallet_trigger_settings.ex
|
starcoder
|
defmodule FlowAssertions.MapA do
use FlowAssertions.Define
alias FlowAssertions.Messages
alias FlowAssertions.{MiscA,EnumA}
@moduledoc """
Assertions that apply to Maps and structures and sometimes to keyword lists.
`assert_fields/2` and `assert_same_map/3` are the most important.
"""
@doc """
Test the existence and value of multiple fields with a single assertion:
assert_fields(some_map, key1: 12, key2: "hello")
You can test just for existence:
assert_fields(some_map, [:key1, :key2]
The keyword list need not contain all of the fields in `some_map`.
Values in the keyword list are compared as with
`FlowAssertions.MiscA.assert_good_enough/2`. For example, regular
expressions can be used to check strings:
assert_fields(some_map, name: ~r/_cohort/)
`assert_fields` can also take a map as its second argument. That's
useful when the map to be tested has non-keyword arguments:
assert_fields(string_map, %{"a" => 3})
"""
# Credit: <NAME>reeman inspired this.
defchain assert_fields(kvs, list_or_map) do
assert_present = fn key ->
struct_must_have_key!(kvs, key)
elaborate_assert(Map.has_key?(kvs, key),
"Field `#{inspect key}` is missing",
left: kvs,
right: list_or_map)
key
end
refute_single_error = fn key, expected ->
adjust_assertion_error(fn ->
MiscA.assert_good_enough(Map.get(kvs, key), expected)
end,
message: Messages.wrong_field_value(key),
expr: AssertionError.no_value)
end
list_or_map
|> Enum.map(fn
{key, expected} ->
key |> assert_present.() |> refute_single_error.(expected)
key ->
assert_present.(key)
end)
end
@doc """
Same as `assert_fields/2` but more pleasingly grammatical
when testing only one field:
assert_field(some_map, key: "value")
When checking existence, you don't have to use a list:
assert_field(some_map, :key)
"""
defchain assert_field(kvs, list) when is_list(list) do
assert_fields(kvs, list)
end
defchain assert_field(kvs, singleton) do
assert_fields(kvs, [singleton])
end
# ----------------------------------------------------------------------------
@doc """
Fail if any of the fields in the `field_list` are present.
%{a: 1} |> refute_fields([:a, :b]) # fails
"""
defchain refute_fields(some_map, field_list) when is_list(field_list) do
for field <- field_list do
elaborate_refute(Map.has_key?(some_map, field),
Messages.field_wrongly_present(field),
left: some_map)
end
end
def refute_fields(some_map, field),
do: refute_fields(some_map, [field])
@doc """
Same as refute_fields/2, but for a single field.
%{a: 1} |> refute_field(:a) # fails
"""
def refute_field(some_map, field) when is_list(field),
do: refute_fields(some_map, field)
def refute_field(some_map, field),
do: refute_fields(some_map, [field])
# ----------------------------------------------------------------------------
@doc """
An equality comparison of two maps that gives control over
which fields should not be compared or should be compared differently.
It is typically used after some `old` map has been transformed to make a
`new` one.
To exclude some fields from the comparison:
assert_same_map(new, old, ignoring: [:lock_version, :updated_at])
To compare only some of the keys:
assert_same_map(new, old, comparing: [:name, :people])
To assert different values for particular fields:
assert_same_map(new, old,
except: [lock_version: old.lock_version + 1,
people: &Enum.empty/1])
Note that the `except` comparison uses
`FlowAssertions.MiscA.assert_good_enough/2`.
Note that if the first value is a struct, the second must have the same type:
Assertion with == failed
left: %S{b: 3}
right: %R{b: 3}
"""
defchain assert_same_map(new, old, opts \\ []) do
if Keyword.has_key?(opts, :ignoring) && Keyword.has_key?(opts, :comparing),
do: flunk("Test error: you can't use both `:ignoring` and `comparing")
get_list = fn key -> Keyword.get(opts, key, []) end
{remaining_new, remaining_old} =
compare_specific_fields(new, old, get_list.(:except))
if Keyword.has_key?(opts, :comparing) do
assert_comparing_keys(remaining_new, remaining_old, get_list.(:comparing))
else
assert_ignoring_keys(remaining_new, remaining_old, get_list.(:ignoring))
end
end
# So much for the single responsibility principle. But it feels *so good*.
defp compare_specific_fields(new, old, expected_kvs) do
expected_keys = Keyword.keys(expected_kvs)
struct_must_have_keys!(new, expected_keys)
assert_fields(new, expected_kvs)
{ Map.drop(new, expected_keys), Map.drop(old, expected_keys)}
end
defp assert_ignoring_keys(new, old, fields_to_ignore) do
struct_must_have_keys!(new, fields_to_ignore)
elaborate_assert_equal(
Map.drop(new, fields_to_ignore),
Map.drop(old, fields_to_ignore))
end
defp assert_comparing_keys(new, old, fields_to_compare) do
struct_must_have_keys!(new, fields_to_compare)
elaborate_assert_equal(
Map.take(new, fields_to_compare),
Map.take(old, fields_to_compare))
end
# ----------------------------------------------------------------------------
@doc """
Assert that the value of the map at the key matches a binding form.
assert_field_shape(map, :field, %User{})
assert_field_shape(map, :field, [_ | _])
See `FlowAssertions.MiscA.assert_shape/2` for more.
"""
defmacro assert_field_shape(map, key, shape) do
quote do
eval_once = unquote(map)
adjust_assertion_error(fn ->
assert_shape(Map.fetch!(eval_once, unquote(key)), unquote(shape))
end,
message: Messages.no_field_match(unquote(key)))
eval_once
end
end
@doc """
Take a map and a field. Return the single element in the field's value.
with_singleton_content(%{a: [1]}, :a) # returns `1`
This is typically used with fields that take list values. Often,
you only want to test the empty list and a singleton list.
(When testing functions that produce their values with `Enum.map/2` or `for`,
creating a second list element gains you nothing.)
Using `with_singleton_content`, it's
convenient to apply assertions to the single element:
view_model
|> assert_assoc_loaded(:service_gaps)
|> with_singleton_content(:service_gaps)
|> assert_shape(%VM.ServiceGap{})
|> Ex.Datespan.assert_datestrings(:first)
If `field` does not exist or isn't an `Enum`, `with_singleton_content` will fail in
the same way `FlowAssertions.EnumA.singleton_content/1` does.
"""
def with_singleton_content(map, field) do
adjust_assertion_error(fn ->
map
|> Map.get(field)
|> EnumA.singleton_content
end, message: Messages.expected_1_element_field(field))
end
end
|
lib/map_a.ex
| 0.913828 | 0.873323 |
map_a.ex
|
starcoder
|
defmodule CSQuery.Expression do
@moduledoc """
A representation of an expression in the AWS CloudSearch structured query
syntax.
"""
alias CSQuery.{FieldValue, OperatorOption}
@typedoc "Valid operator names."
@type operators :: :and | :near | :not | :or | :phrase | :prefix | :range | :term
@typedoc "The `CSQuery.Expression` struct."
@type t :: %__MODULE__{
operator: operators,
options: list(OperatorOption.t()),
fields: list(FieldValue.t())
}
@enforce_keys [:operator]
defstruct [:operator, options: [], fields: []]
@doc """
Provide a `CSQuery.Expression` struct for the provided value.
If the value is a keyword list, the `CSQuery.Expression` will be constructed
from the keyword pairs. Each keyword pair will be parsed as an operator and
list (effectively calling `new/2` as `&new(elem(&1, 0), elem(&1, 1))`).
An exception will be raised if there is an invalid expression constructed.
If the value is a `CSQuery.Expression` struct, it will be returned.
iex> CSQuery.Expression.new(%CSQuery.Expression{operator: :and})
%CSQuery.Expression{operator: :and}
"""
def new(value)
@spec new(keyword) :: list(t) | no_return
def new(list) when is_list(list), do: Enum.map(list, &build/1)
@spec new(t) :: t
def new(%__MODULE__{} = value), do: value
@doc """
Provide a `CSQuery.Expression` struct for the operator and conditions.
See `CSQuery.and!/1`, `CSQuery.near!/1`, `CSQuery.not!/1`, `CSQuery.or!/1`,
`CSQuery.phrase!/1`, `CSQuery.prefix!/1`, `CSQuery.range!/1`, and
`CSQuery.term!/1` for the details on how expressions are built.
An exception will be raised if there is an invalid expression constructed.
"""
@rules %{
[:boost] => [:and, :or],
[:boost, :field] => [:not, :term, :phrase, :prefix, :range],
[:boost, :distance, :field] => [:near]
}
for {opts, ops} <- @rules, op <- ops do
@spec new(unquote(op), list) :: t | no_return
def new(unquote(op), conditions) when is_list(conditions) do
build(unquote(op), args_for(unquote(op), conditions, unquote(opts)))
end
end
def new(op, _), do: raise(CSQuery.UnknownOperatorError, op)
@doc """
Convert the parsed query expression to the AWS CloudSearch structured query
syntax string.
If a list of queries is provided, convert each item to structured query
syntax.
"""
def to_query(query)
@spec to_query(nil) :: String.t()
def to_query(nil), do: ""
@spec to_query(list(t)) :: list(String.t())
def to_query(list) when is_list(list), do: Enum.map(list, &to_query/1)
@spec to_query(t) :: String.t()
def to_query(%__MODULE__{} = expr) do
expr =
[expr.operator, expr.options, expr.fields]
|> Enum.flat_map(&value_for/1)
|> Enum.join(" ")
"(#{expr})"
end
defp build({operator, list}), do: new(operator, list)
defp build(operator, {options, values, named}) do
options = Enum.map(options, &OperatorOption.new/1)
fields =
Enum.map(values, &FieldValue.new/1) ++
Enum.map(named, &FieldValue.new(elem(&1, 0), elem(&1, 1)))
validate_fields!(operator, fields)
%__MODULE__{
operator: operator,
options: options,
fields: fields
}
end
@spec validate_fields!(operators, list(FieldValue.t())) :: :ok | no_return
defp validate_fields!(op, []), do: raise(CSQuery.NoFieldValuesError, op)
defp validate_fields!(op, fields)
when op in [:near, :not, :phrase, :prefix, :range, :term] and length(fields) > 1,
do: raise(CSQuery.TooManyFieldValuesError, {op, length(fields)})
defp validate_fields!(:near, [%FieldValue{value: value}]) when is_binary(value) do
if(String.contains?(value, " "), do: :ok, else: raise(CSQuery.MultipleWordsRequiredError))
end
defp validate_fields!(op, [%FieldValue{value: value}])
when op in [:near, :phrase, :prefix] and not is_binary(value),
do: raise(CSQuery.StringRequiredError, op)
defp validate_fields!(:range, [%FieldValue{value: %CSQuery.Range{}}]), do: :ok
defp validate_fields!(:range, [%FieldValue{value: value}]) when is_binary(value) do
if(CSQuery.Range.is_range_string?(value), do: :ok, else: raise(CSQuery.RangeRequiredError))
end
defp validate_fields!(:range, _), do: raise(CSQuery.RangeRequiredError)
defp validate_fields!(op, _) when op in [:and, :or], do: :ok
defp validate_fields!(_, _), do: :ok
@spec value_for(t) :: list(String.t())
defp value_for(%__MODULE__{} = expr), do: [to_query(expr)]
@spec value_for(operators) :: list(String.t())
defp value_for(operator) when is_atom(operator), do: [to_string(operator)]
@spec value_for(%FieldValue{} | %OperatorOption{}) :: list(String.t())
defp value_for(%mod{} = value) when mod in [FieldValue, OperatorOption],
do: [mod.to_value(value)]
@spec value_for(list) :: list(String.t())
defp value_for(list) when is_list(list), do: Enum.map(list, &value_for/1)
defp args_for(_operator, list, option_keys) do
list
|> Enum.filter(& &1)
|> split_named_values()
|> extract_options(option_keys)
end
defp split_named_values(list) when is_list(list), do: Enum.split_with(list, &is_keyword?/1)
defp extract_options({named, values}, option_keys) do
{option_values, values} = extract_option_values(values, option_keys)
{named_options, named_values} = extract_named_options(named, option_keys)
{option_values ++ named_options, values, named_values}
end
defp extract_option_values(values, option_keys) do
{options, values} = Enum.split_with(values, &valid_option?(&1, option_keys))
{options, Enum.reject(values, &is_option?/1)}
end
defp extract_named_options(named, option_keys) do
opts =
option_keys
|> Enum.reduce(%{}, &Map.put(&2, &1, Keyword.get(named, &1)))
|> Enum.filter(&elem(&1, 1))
{opts, Keyword.drop(named, option_keys)}
end
defp is_keyword?({key, _}) when is_atom(key) and not is_nil(key), do: true
defp is_keyword?(_), do: false
defp valid_option?(%OperatorOption{name: name}, option_keys), do: name in option_keys
defp valid_option?(_, _), do: false
defp is_option?(%OperatorOption{}), do: true
defp is_option?(_), do: false
end
|
lib/csquery/expression.ex
| 0.880752 | 0.646314 |
expression.ex
|
starcoder
|
defmodule Geocalc.DMS do
@moduledoc """
The `Geocalc.DMS` is a struct which contains degrees, minutes, seconds and cardinal direction.
Also have functions to convert DMS to decimal degrees.
"""
@enforce_keys [:hours, :minutes, :seconds, :direction]
defstruct [:hours, :minutes, :seconds, :direction]
@doc """
Converts `Geocalc.DMS` to decimal degrees
## Example
iex> dms = %Geocalc.DMS{hours: 13, minutes: 31, seconds: 59.998, direction: "N"}
iex> Geocalc.DMS.to_decimal(dms)
13.533332777777778
"""
@spec to_decimal(Geocalc.DMS.t()) :: number | :error
def to_decimal(%Geocalc.DMS{minutes: minutes}) when is_integer(minutes) and minutes >= 60 do
:error
end
def to_decimal(%Geocalc.DMS{minutes: minutes}) when is_integer(minutes) and minutes < 0 do
:error
end
def to_decimal(%Geocalc.DMS{seconds: seconds}) when is_number(seconds) and seconds >= 60 do
:error
end
def to_decimal(%Geocalc.DMS{seconds: seconds}) when is_number(seconds) and seconds < 0 do
:error
end
def to_decimal(%Geocalc.DMS{hours: hours, direction: "N"}) when is_integer(hours) and hours > 90 do
:error
end
def to_decimal(%Geocalc.DMS{hours: hours, direction: "N"}) when is_integer(hours) and hours < -90 do
:error
end
def to_decimal(%Geocalc.DMS{hours: hours, minutes: minutes, seconds: seconds, direction: "N"}) do
hours + minutes / 60 + seconds / 3600
end
def to_decimal(%Geocalc.DMS{hours: hours, direction: "S"}) when is_integer(hours) and hours > 90 do
:error
end
def to_decimal(%Geocalc.DMS{hours: hours, direction: "S"}) when is_integer(hours) and hours < -90 do
:error
end
def to_decimal(%Geocalc.DMS{hours: hours, minutes: minutes, seconds: seconds, direction: "S"}) do
-(hours + minutes / 60 + seconds / 3600)
end
def to_decimal(%Geocalc.DMS{hours: hours, minutes: minutes, seconds: seconds, direction: "W"}) do
-(longitude_hours(hours) + minutes / 60 + seconds / 3600)
end
def to_decimal(%Geocalc.DMS{hours: hours, minutes: minutes, seconds: seconds, direction: "E"}) do
longitude_hours(hours) + minutes / 60 + seconds / 3600
end
defp longitude_hours(hours) when is_integer(hours) and hours > 180 do
longitude_hours(hours - 360)
end
defp longitude_hours(hours) when is_integer(hours) and hours < -180 do
longitude_hours(hours + 360)
end
defp longitude_hours(hours) when is_integer(hours) do
hours
end
end
|
lib/geocalc/dms.ex
| 0.920504 | 0.884189 |
dms.ex
|
starcoder
|
defmodule Sanbase.Auth.Hmac do
@moduledoc """
What is HMAC? HMAC is a way to combine a key and a hashing function in a way
that's harder to attack.
HMAC does not encrypt the message.
The HMAC calculation is the following:
`HMAC(K,m) = H((K' ⊕ opad) || H ((K' ⊕ ipad)||m))`
where
- H is a cryptographic hash function,
- K is the secret key,
- m is the message to be authenticated,
- K' is another secret key, derived from the original key K
(by padding K to the right with extra zeroes to the input block size of the hash function,
or by hashing K if it is longer than that block size),
- || denotes concatenation,
- ⊕ denotes exclusive or (XOR),
- opad is the outer padding (0x5c5c5c…5c5c, one-block-long hexadecimal constant),
- ipad is the inner padding (0x363636…3636, one-block-long hexadecimal constant).
ipad and opad are choosed to have large Hamming distance
Currently the apikey does not give access to mutations but only gives access to read
data that requires authentication and possibly SAN staking.
We use HMAC to solve a particular problem - we should be able to show the users
the apikey in plain text at any time but we also should not store the apikey in plaintext
in the database. Because of this the following approach has been choosen:
1. Have a secret key on the server
2. When a apikey generation request is made, generate a token and store it in the database
3. Feed the HMAC algorithm the sha256 hashing function, the secret key and generated token.
The result of the HMAC is the apikey.
4. For easier search in the database prepend the apikey with the token itself. This does not
compromise the security as the real secret is the secret key and not the user token.
"""
require Sanbase.Utils.Config, as: Config
require Logger
@rand_bytes_length 32
@apikey_length 16
@spec hmac(String.t()) :: String.t()
def hmac(token) when is_binary(token) do
:crypto.hmac(:sha256, secret_key(), token)
|> Base.encode32(case: :lower)
|> binary_part(0, byte_size(token))
end
@spec generate_token :: String.t()
def generate_token() do
:crypto.strong_rand_bytes(@rand_bytes_length)
|> Base.encode32(case: :lower)
|> binary_part(0, @apikey_length)
end
@spec generate_apikey(String.t()) :: String.t()
def generate_apikey(token) do
token <> "_" <> hmac(token)
end
@spec apikey_valid?(String.t(), String.t()) :: boolean
def apikey_valid?(token, apikey) when byte_size(apikey) >= 32 and byte_size(token) >= 16 do
apikey == generate_apikey(token)
end
def apikey_valid?(_, _), do: false
@spec split_apikey(String.t()) :: {:ok, {String.t(), String.t()}} | {:error, String.t()}
def split_apikey(token_apikey) do
case String.split(token_apikey, "_") do
[token, apikey] ->
{:ok, {token, apikey}}
_ ->
{:error,
"Apikey '#{token_apikey}' is malformed - it must have two string parts separated by underscore"}
end
end
# Private functions
defp secret_key(), do: Config.get(:secret_key)
end
|
lib/sanbase/auth/apikey/hmac.ex
| 0.825976 | 0.628778 |
hmac.ex
|
starcoder
|
defmodule ExampleSupervisor do
def start do
# List comprehension creates a consumer per cpu core
children =
for i <- 1..System.schedulers_online(),
do: Supervisor.child_spec({ExampleConsumer, []}, id: {:consumer, i})
Supervisor.start_link(children, strategy: :one_for_one)
end
end
defmodule ExampleConsumer do
use Nostrum.Consumer
alias Nostrum.Api
require Logger
def start_link do
Consumer.start_link(__MODULE__, :state)
end
def handle_event({:MESSAGE_CREATE, {msg}, ws_state}, state) do
# Let's do a little command 'parsing'
# We first grab the content, split it into an array and then convert the array to a tuple
args = msg.content |> String.split(" ") |> List.to_tuple()
# Check if first arg is the command
if elem(args, 0) === "!userinfo" do
# First, we check if there are 2 arguments | This being `!userinfo ID` | Otherwise we error out in the `false` clause.
# Then we grab the second arg | `ID`
# Parse it into an integer, otherwise error out within the `:error` clause.
# Then, we try and grab the data from the cache.
# If this fails, it will try the next thing in the clause which in this case would be sending out a request to Discord.
# If this fails, it will go over into the else statement down below and print out that it can't find anyone/anything.
with true <- tuple_size(args) == 2,
second_arg = elem(args, 1),
{user_id, _binary} <- Integer.parse(second_arg),
{:ok, user} <- Nostrum.Cache.UserCache.get(id: user_id),
Api.get_user(user_id),
{:ok, channel} <- Nostrum.Cache.ChannelCache.get(id: msg.channel_id),
Api.get_channel(msg.channel_id),
{:ok, guild} <- Nostrum.Cache.GuildCache.get(channel.guild_id),
Api.get_guild(channel.guild_id) do
Api.create_message!(
msg.channel_id,
"#{user_id} belongs to a person named: #{user.username}\nMessage sent in: #{
channel.name
}\nChannel is in: #{guild.name}"
)
else
# For this, we just print out we can't find anyone while using the error from the with statement. If it can't find someone, it will print out `user_not_found` as the reason.
{:error, reason} ->
Api.create_message!(msg.channel_id, "Sorry, something went wrong: #{reason}")
# They typed an invalid input, probably due to using letters rather than numbers.
:error ->
Api.create_message!(msg.channel_id, "Make sure the User ID is only numbers")
# There wasn't 2 elements in there, so it returned false.
false ->
Api.create_message!(msg.channel_id, "Please supply a User ID")
end
end
{:ok, state}
end
# Default event handler, if you don't include this, your consumer WILL crash if
# you don't have a method definition for each event type.
def handle_event(_, state) do
{:ok, state}
end
end
|
examples/cache.ex
| 0.672009 | 0.424591 |
cache.ex
|
starcoder
|
defmodule Alerts.Cache.Store do
@moduledoc """
A process that maintains a few ETS tables. The tables are
* :alert_id_to_alerts - a set with has the "alert_id" as the key and
and %Alerts.Alert{} as the value.
* :route_id_and_type_to_alert_ids - a bag with "route_id" and "route_type" as keys and "alert_id" as
value. Each route_id, route_type pair in the table only has a single alert_id, but there can
be multiple entries for the same route_id and route_type
* :alert_banner - a set with a single key, :banner, which has either the value
nil or an %Alert.Banner{}
All the tables are protected, with read_concurrency: true. The updates occur
via update/2, which are a GenServer.call to the server, while all the reads
occur via client functions that query the ETS tables directly.
"""
use GenServer
# Client
def start_link(_) do
GenServer.start_link(__MODULE__, [], name: __MODULE__)
end
@doc """
Sets the ETS cache to these set of alerts and optional banner.
The previous alerts in the cache are all removed.
"""
@spec update([Alerts.Alert.t()], Alerts.Banner.t() | nil) :: :ok
def update(alerts, banner_alert) do
GenServer.call(__MODULE__, {:update, alerts, banner_alert})
end
@doc """
Retrieves all the alert ids (if any) for the given list of routes.
The IDs returned here can be passed to alerts/1 to get the alerts themselves.
"""
@spec alert_ids_for_routes([String.t()]) :: [String.t()]
def alert_ids_for_routes(route_ids) do
keys = Enum.map(route_ids, &{{&1, :_, :"$1"}, [], [:"$1"]})
:ets.select(:route_id_and_type_to_alert_ids, keys)
end
@spec alert_ids_for_route_types(Enumerable.t()) :: [String.t()]
def alert_ids_for_route_types(types) do
keys = Enum.map(types, &{{:_, &1, :"$1"}, [], [:"$1"]})
:ets.select(:route_id_and_type_to_alert_ids, keys)
end
def alert_ids_for_route_id_and_type(route_id, route_type) do
keys = [{{route_id, :_, :"$1"}, [], [:"$1"]}, {{nil, route_type, :"$1"}, [], [:"$1"]}]
:ets.select(:route_id_and_type_to_alert_ids, keys)
end
@doc """
Retrieves the alert objects given a list of alert IDs. If an ID
is passed that doesn't have a current alert, it is ignored.
"""
@spec alerts([String.t()], DateTime.t()) :: [Alerts.Alert.t()]
def alerts(alert_ids, now) do
:alert_id_to_alert
|> select_many(alert_ids)
|> Alerts.Sort.sort(now)
end
@doc """
Retrieves an alert object given an alert ID.
"""
@spec alert(String.t()) :: Alerts.Alert.t() | nil
def alert(alert_id) do
:alert_id_to_alert
|> select_many([alert_id])
|> List.first()
end
@doc """
Retrieves the full set of current alerts in priority sorted order.
"""
@spec all_alerts(DateTime.t()) :: [Alerts.Alert.t()]
def all_alerts(now) do
:alert_id_to_alert
|> :ets.select([{{:_, :"$1"}, [], [:"$1"]}])
|> Alerts.Sort.sort(now)
end
@doc """
Retrieves the saved Alert Banner if present.
"""
@spec banner() :: Alerts.Banner.t() | nil
def banner do
case :ets.lookup(:alert_banner, :banner) do
[{:banner, banner}] -> banner
_ -> nil
end
end
defp select_many(table, keys) do
selectors = for key <- keys, do: {{key, :"$1"}, [], [:"$1"]}
:ets.select(table, selectors)
end
# Server
@impl true
def init(_args) do
# no cover
_ = :ets.new(:alert_id_to_alert, [:set, :protected, :named_table, read_concurrency: true])
# no cover
_ =
:ets.new(:route_id_and_type_to_alert_ids, [
:bag,
:protected,
:named_table,
read_concurrency: true
])
# no cover
_ = :ets.new(:alert_banner, [:set, :protected, :named_table, read_concurrency: true])
{:ok, []}
end
@impl true
def handle_call({:update, alerts, banner}, _from, state) do
{alert_inserts, route_inserts} =
Enum.reduce(alerts, {[], []}, fn alert, {alert_inserts_acc, route_inserts_acc} ->
{
[{alert.id, alert} | alert_inserts_acc],
Enum.map(alert.informed_entity, &{&1.route, &1.route_type, alert.id}) ++
route_inserts_acc
}
end)
:ets.delete_all_objects(:alert_id_to_alert)
:ets.delete_all_objects(:route_id_and_type_to_alert_ids)
:ets.delete_all_objects(:alert_banner)
:ets.insert(:alert_id_to_alert, alert_inserts)
:ets.insert(:route_id_and_type_to_alert_ids, route_inserts)
:ets.insert(:alert_banner, {:banner, banner})
{:reply, :ok, state, :hibernate}
end
end
|
apps/alerts/lib/cache/store.ex
| 0.730578 | 0.594993 |
store.ex
|
starcoder
|
defmodule AWS.AutoScaling do
@moduledoc """
With Application Auto Scaling, you can configure automatic scaling for your
scalable AWS resources. You can use Application Auto Scaling to accomplish
the following tasks:
<ul> <li> Define scaling policies to automatically scale your AWS resources
</li> <li> Scale your resources in response to CloudWatch alarms
</li> <li> Schedule one-time or recurring scaling actions
</li> <li> View the history of your scaling events
</li> </ul> Application Auto Scaling can scale the following AWS resources:
<ul> <li> Amazon ECS services. For more information, see [Service Auto
Scaling](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-auto-scaling.html)
in the *Amazon Elastic Container Service Developer Guide*.
</li> <li> Amazon EC2 Spot fleets. For more information, see [Automatic
Scaling for Spot
Fleet](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/fleet-auto-scaling.html)
in the *Amazon EC2 User Guide*.
</li> <li> Amazon EMR clusters. For more information, see [Using Automatic
Scaling in Amazon
EMR](http://docs.aws.amazon.com/ElasticMapReduce/latest/ManagementGuide/emr-automatic-scaling.html)
in the *Amazon EMR Management Guide*.
</li> <li> AppStream 2.0 fleets. For more information, see [Fleet Auto
Scaling for Amazon AppStream
2.0](http://docs.aws.amazon.com/appstream2/latest/developerguide/autoscaling.html)
in the *Amazon AppStream 2.0 Developer Guide*.
</li> <li> Provisioned read and write capacity for Amazon DynamoDB tables
and global secondary indexes. For more information, see [Managing
Throughput Capacity Automatically with DynamoDB Auto
Scaling](http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/AutoScaling.html)
in the *Amazon DynamoDB Developer Guide*.
</li> <li> Amazon Aurora Replicas. For more information, see [Using Amazon
Aurora Auto Scaling with Aurora
Replicas](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Aurora.Integrating.AutoScaling.html).
</li> <li> Amazon SageMaker endpoints. For more information, see
[Automatically Scaling Amazon SageMaker
Models](http://docs.aws.amazon.com/sagemaker/latest/dg/endpoint-auto-scaling.html).
</li> </ul> To configure automatic scaling for multiple resources across
multiple services, use AWS Auto Scaling to create a scaling plan for your
application. For more information, see [AWS Auto
Scaling](http://aws.amazon.com/autoscaling).
For a list of supported regions, see [AWS Regions and Endpoints:
Application Auto
Scaling](http://docs.aws.amazon.com/general/latest/gr/rande.html#as-app_region)
in the *AWS General Reference*.
"""
@doc """
Deletes the specified Application Auto Scaling scaling policy.
Deleting a policy deletes the underlying alarm action, but does not delete
the CloudWatch alarm associated with the scaling policy, even if it no
longer has an associated action.
To create a scaling policy or update an existing one, see
`PutScalingPolicy`.
"""
def delete_scaling_policy(client, input, options \\ []) do
request(client, "DeleteScalingPolicy", input, options)
end
@doc """
Deletes the specified Application Auto Scaling scheduled action.
"""
def delete_scheduled_action(client, input, options \\ []) do
request(client, "DeleteScheduledAction", input, options)
end
@doc """
Deregisters a scalable target.
Deregistering a scalable target deletes the scaling policies that are
associated with it.
To create a scalable target or update an existing one, see
`RegisterScalableTarget`.
"""
def deregister_scalable_target(client, input, options \\ []) do
request(client, "DeregisterScalableTarget", input, options)
end
@doc """
Gets information about the scalable targets in the specified namespace.
You can filter the results using the `ResourceIds` and `ScalableDimension`
parameters.
To create a scalable target or update an existing one, see
`RegisterScalableTarget`. If you are no longer using a scalable target, you
can deregister it using `DeregisterScalableTarget`.
"""
def describe_scalable_targets(client, input, options \\ []) do
request(client, "DescribeScalableTargets", input, options)
end
@doc """
Provides descriptive information about the scaling activities in the
specified namespace from the previous six weeks.
You can filter the results using the `ResourceId` and `ScalableDimension`
parameters.
Scaling activities are triggered by CloudWatch alarms that are associated
with scaling policies. To view the scaling policies for a service
namespace, see `DescribeScalingPolicies`. To create a scaling policy or
update an existing one, see `PutScalingPolicy`.
"""
def describe_scaling_activities(client, input, options \\ []) do
request(client, "DescribeScalingActivities", input, options)
end
@doc """
Describes the scaling policies for the specified service namespace.
You can filter the results using the `ResourceId`, `ScalableDimension`, and
`PolicyNames` parameters.
To create a scaling policy or update an existing one, see
`PutScalingPolicy`. If you are no longer using a scaling policy, you can
delete it using `DeleteScalingPolicy`.
"""
def describe_scaling_policies(client, input, options \\ []) do
request(client, "DescribeScalingPolicies", input, options)
end
@doc """
Describes the scheduled actions for the specified service namespace.
You can filter the results using the `ResourceId`, `ScalableDimension`, and
`ScheduledActionNames` parameters.
To create a scheduled action or update an existing one, see
`PutScheduledAction`. If you are no longer using a scheduled action, you
can delete it using `DeleteScheduledAction`.
"""
def describe_scheduled_actions(client, input, options \\ []) do
request(client, "DescribeScheduledActions", input, options)
end
@doc """
Creates or updates a policy for an Application Auto Scaling scalable
target.
Each scalable target is identified by a service namespace, resource ID, and
scalable dimension. A scaling policy applies to the scalable target
identified by those three attributes. You cannot create a scaling policy
until you register the scalable target using `RegisterScalableTarget`.
To update a policy, specify its policy name and the parameters that you
want to change. Any parameters that you don't specify are not changed by
this update request.
You can view the scaling policies for a service namespace using
`DescribeScalingPolicies`. If you are no longer using a scaling policy, you
can delete it using `DeleteScalingPolicy`.
"""
def put_scaling_policy(client, input, options \\ []) do
request(client, "PutScalingPolicy", input, options)
end
@doc """
Creates or updates a scheduled action for an Application Auto Scaling
scalable target.
Each scalable target is identified by a service namespace, resource ID, and
scalable dimension. A scheduled action applies to the scalable target
identified by those three attributes. You cannot create a scheduled action
until you register the scalable target using `RegisterScalableTarget`.
To update an action, specify its name and the parameters that you want to
change. If you don't specify start and end times, the old values are
deleted. Any other parameters that you don't specify are not changed by
this update request.
You can view the scheduled actions using `DescribeScheduledActions`. If you
are no longer using a scheduled action, you can delete it using
`DeleteScheduledAction`.
"""
def put_scheduled_action(client, input, options \\ []) do
request(client, "PutScheduledAction", input, options)
end
@doc """
Registers or updates a scalable target. A scalable target is a resource
that Application Auto Scaling can scale out or scale in. After you have
registered a scalable target, you can use this operation to update the
minimum and maximum values for its scalable dimension.
After you register a scalable target, you can create and apply scaling
policies using `PutScalingPolicy`. You can view the scaling policies for a
service namespace using `DescribeScalableTargets`. If you no longer need a
scalable target, you can deregister it using `DeregisterScalableTarget`.
"""
def register_scalable_target(client, input, options \\ []) do
request(client, "RegisterScalableTarget", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "autoscaling"}
host = get_host("autoscaling", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AnyScaleFrontendService.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/autoscaling.ex
| 0.918635 | 0.543045 |
autoscaling.ex
|
starcoder
|
defmodule Wabbit.Queue do
@moduledoc """
Functions to operate on Queues.
"""
import Wabbit.Record
alias Wabbit.Utils
@doc """
Declares a queue.
The optional `queue` parameter is used to set the name. If set to an
empty string (default), the server will assign a name. Besides the
queue name, the following options can be used:
# Options
* `:durable` - If set, keeps the Queue between restarts of the broker
* `:auto_delete` - If set, deletes the Queue once all subscribers disconnect
* `:exclusive` - If set, only one subscriber can consume from the Queue
* `:passive` - If set, raises an error unless the queue already exists
* `:no_wait` - If set, the server will not respond to the method
* `:arguments` - A set of arguments for the declaration
"""
def declare(channel, queue \\ "", options \\ []) do
queue_declare =
queue_declare(queue: queue,
passive: Keyword.get(options, :passive, false),
durable: Keyword.get(options, :durable, false),
exclusive: Keyword.get(options, :exclusive, false),
auto_delete: Keyword.get(options, :auto_delete, false),
nowait: Keyword.get(options, :no_wait, false),
arguments: Keyword.get(options, :arguments, []) |> Utils.to_type_tuple)
queue_declare_ok(queue: queue,
message_count: message_count,
consumer_count: consumer_count) = :amqp_channel.call(channel, queue_declare)
{:ok, %{queue: queue, message_count: message_count, consumer_count: consumer_count}}
end
@doc """
Binds a Queue to an Exchange
The following options can be used:
# Options
* `:routing_key` - If set, specifies the routing key for the binding
* `:no_wait` - If set, the server will not respond to the method
* `:arguments` - A set of arguments for the binding
"""
def bind(channel, queue, exchange, options \\ []) do
queue_bind =
queue_bind(queue: queue,
exchange: exchange,
routing_key: Keyword.get(options, :routing_key, ""),
nowait: Keyword.get(options, :no_wait, false),
arguments: Keyword.get(options, :arguments, []) |> Utils.to_type_tuple)
queue_bind_ok() = :amqp_channel.call(channel, queue_bind)
:ok
end
@doc """
Unbinds a Queue from an Exchange
The following options can be used:
# Options
* `:routing_key` - If set, specifies the routing key for the unbind
* `:arguments` - A set of arguments for the unbind
"""
def unbind(channel, queue, exchange, options \\ []) do
queue_unbind =
queue_unbind(queue: queue,
exchange: exchange,
routing_key: Keyword.get(options, :routing_key, ""),
arguments: Keyword.get(options, :arguments, []))
queue_unbind_ok() = :amqp_channel.call(channel, queue_unbind)
:ok
end
@doc """
Deletes a Queue by name
The following options can be used:
# Options
* `:if_unused` - If set, the server will only delete the queue if it has no consumers
* `:if_empty` - If set, the server will only delete the queue if it has no messages
* `:no_wait` - If set, the server will not respond to the method
"""
def delete(channel, queue, options \\ []) do
queue_delete =
queue_delete(queue: queue,
if_unused: Keyword.get(options, :if_unused, false),
if_empty: Keyword.get(options, :if_empty, false),
nowait: Keyword.get(options, :no_wait, false))
queue_delete_ok(message_count: message_count) = :amqp_channel.call(channel, queue_delete)
{:ok, %{message_count: message_count}}
end
@doc """
Discards all messages in the Queue
"""
def purge(channel, queue) do
queue_purge_ok(message_count: message_count) = :amqp_channel.call(channel, queue_purge(queue: queue))
{:ok, %{message_count: message_count}}
end
@doc """
Returns the message count and consumer count for the given queue.
Uses Queue.declare with the `passive` option set.
"""
def status(channel, queue) do
declare(channel, queue, passive: true)
end
@doc """
Returns the number of messages that are ready for delivery (e.g. not
pending acknowledgements) in the queue
"""
def message_count(channel, queue) do
{:ok, %{message_count: message_count}} = status(channel, queue)
message_count
end
@doc """
Returns a number of active consumers on the queue
"""
def consumer_count(channel, queue) do
{:ok, %{consumer_count: consumer_count}} = status(channel, queue)
consumer_count
end
@doc """
Returns true if queue is empty (has no messages ready), false otherwise
"""
def empty?(channel, queue) do
message_count(channel, queue) == 0
end
end
|
lib/wabbit/queue.ex
| 0.85446 | 0.474509 |
queue.ex
|
starcoder
|
defmodule SimpleCipher do
@a_ordinal ?a
@alphabet Enum.to_list(?a..?z)
@alphabet_length length(@alphabet)
defguardp non_lower_alpha?(text_char) when not (text_char in @alphabet)
@doc """
Given a `plaintext` and `key`, encode each character of the `plaintext` by
shifting it by the corresponding letter in the alphabet shifted by the number
of letters represented by the `key` character, repeating the `key` if it is
shorter than the `plaintext`.
For example, for the letter 'd', the alphabet is rotated to become:
defghijklmnopqrstuvwxyzabc
You would encode the `plaintext` by taking the current letter and mapping it
to the letter in the same position in this rotated alphabet.
abcdefghijklmnopqrstuvwxyz
defghijklmnopqrstuvwxyzabc
"a" becomes "d", "t" becomes "w", etc...
Each letter in the `plaintext` will be encoded with the alphabet of the `key`
character in the same position. If the `key` is shorter than the `plaintext`,
repeat the `key`.
Example:
plaintext = "testing"
key = "abc"
The key should repeat to become the same length as the text, becoming
"abcabca". If the key is longer than the text, only use as many letters of it
as are necessary.
"""
def encode(plaintext, key), do: shift(plaintext, key, &+/2)
@doc """
Given a `ciphertext` and `key`, decode each character of the `ciphertext` by
finding the corresponding letter in the alphabet shifted by the number of
letters represented by the `key` character, repeating the `key` if it is
shorter than the `ciphertext`.
The same rules for key length and shifted alphabets apply as in `encode/2`,
but you will go the opposite way, so "d" becomes "a", "w" becomes "t",
etc..., depending on how much you shift the alphabet.
"""
def decode(ciphertext, key), do: shift(ciphertext, key, &-/2)
defp shift(text, key, fun) do
text
|> String.to_charlist()
|> Enum.zip(cycle_key(key))
|> Enum.map_join("", &substitute(fun, &1))
end
defp cycle_key(key) do
key
|> String.to_charlist()
|> Stream.cycle()
end
defp substitute(_fun, {text_char, _key_char})
when non_lower_alpha?(text_char) do
<<text_char::utf8>>
end
defp substitute(fun, {text_char, key_char}) do
value =
fun
|> apply([text_char - @a_ordinal, key_char - @a_ordinal])
|> Kernel.+(@alphabet_length)
|> rem(@alphabet_length)
|> Kernel.+(@a_ordinal)
<<value::utf8>>
end
end
|
elixir/simple-cipher/lib/simple_cipher.ex
| 0.789071 | 0.571826 |
simple_cipher.ex
|
starcoder
|
defmodule Noizu.MnesiaVersioning.Tasks.Install do
@moduledoc """
The Install Tasks creates the initial Mnesia schema and inserts the necessary record keeping tables for tracking schema versioning.
If you already have an existing schema you may run with the `--skip-schema` option to only insure the tracking tables are created.
This tasks will connect to all mnesia nodes specified by your `:topology_provider.mnesia_nodes` method. If you are running as a single
unnamed node simply return a nil or empty list [] value in your mnesia_nodes implementation.
Currently to use this task a user much implement a Mix.Tasks.Install (or similiarly named)
module and load the Noizu Implementation with a use statement.
*Note*
If installing on multiple mnesia nodes, all nodes must be running and reachable for this script to function correctly.
This may be done by simply running `MIX_ENV=%target% iex --name=%node_name% -S mix install wait` on all instances other than the one
on which you will be running `MIX_ENV=%target% iex --name=%node_name% -S mix install [--skip-schema]`
*Example*
```
defmodule Mix.Tasks.Install do
use Noizu.MnesiaVersioning.Tasks.Install
end
```
*Usage*
|| Command || Notes || Example ||
| `mix install` | Setup Schema and versioning table | `mix install` |
| `mix install --skip-schema` | Setup versioning table only. | `mix install --skip-schema` |
*Configuration*
The user must provide modules that implement the `Noizu.MnesiaVersioning.SchemaBehaviour` and `Noizu.MnesiaVersioning.TopologyBehaviour` behaviours.
The providers may be specified in the user's config file or as options to the use Noizu.MnesiaVersioning.Tasks.Migrate task.
The user may additionally choose to use a database other than Noizu.MnesiaVersioning.DAtabase for tracking schema versioning by insuring it is created/creatable and passing as a option to this command, or as a config paramater.
*Configration Example: config.exs*
```
config Noizu.MnesiaVersioning,
topology_provider: MyApp.Mnesia.TopologyProvider,
schema_provider: MyApp.Mnesia.SchemaProvider
```
*Configration Example: using arguments*
```
defmodule Mix.Tasks.Install do
use Noizu.MnesiaVersioning.Tasks.Install,
topology_provider: MyApp.Mnesia.AlternativeTopologyProvider,
schema_provider: MyApp.Mnesia.AlternativeSchemaProvider
end
```
"""
defmacro __using__(options) do
versioning_table = Keyword.get(options, :versioning_table, Application.get_env(:noizu_mnesia_versioning, :versioning_table, Noizu.MnesiaVersioning.Database))
silent = Keyword.get(options, :silent, Application.get_env(:noizu_mnesia_versioning, :silent, false))
topology_provider = Keyword.get(options, :topology_provider, Application.get_env(:noizu_mnesia_versioning, :topology_provider, :required_setting))
if topology_provider == :required_setting do
if (!silent), do: IO.puts "#{__MODULE__} - To use the Noizu.MnesiaVersioning library you must specify a topology_provider option in the noizu_mnesia_versioning config section. For more details @see mnesia_versioning/doc/config.md"
raise "Noizu.MnesiaVersioning :topology_provider setting not configured. @see mnesia_versioning/doc/config.md for more details."
end
quote do
require Amnesia
require Amnesia.Helper
use unquote(versioning_table)
use Mix.Task
import unquote(__MODULE__)
def log(message) do
if (!unquote(silent)), do: IO.puts(message)
end
def run(["wait"]) do
:ok
end
def run(["--skip-schema"]) do
log "#{__MODULE__} - Skipping Schema Creation . . . Proceeding to create versioning tables."
nodes = case unquote(topology_provider).mnesia_nodes() do
{:ok, nil} -> [node()]
{:ok, []} -> [node()]
{:ok, nodes} -> nodes
end
setup_versioning_tables(nodes)
end
def run([]) do
nodes = case unquote(topology_provider).mnesia_nodes() do
{:ok, nil} -> [node()]
{:ok, []} -> [node()]
{:ok, nodes} -> nodes
end
log "#{__MODULE__} - Configuring Schema on specified nodes: #{inspect nodes}"
case Amnesia.Schema.create(nodes) do
:ok ->
log "#{__MODULE__} - Schema created . . . Proceeding to create versioning tables."
setup_versioning_tables(nodes)
_ ->
log "#{__MODULE__} - Schema appears to already exit . . . Proceeding to create versioning tables (unexected outcomes may occur)."
setup_versioning_tables(nodes)
end # end case Schema.create
end # end def run/1
def run(_) do
log """
Usage:
mix install
mix install wait
mix install --skip-schema
"""
:error
end
def setup_versioning_tables(nodes) do
npids = for (n <- nodes) do
pid = Node.spawn(n, __MODULE__, :wait_for_init, [self()])
receive do
:wait_mode -> :wait_mode
end
pid
end
log "#{__MODULE__} - Installing Versioning Table"
attempt_create = unquote(versioning_table).create(disk: nodes)
log "#{__MODULE__} - Schema Create: #{inspect attempt_create}"
log "#{__MODULE__} - #{unquote(versioning_table)}.wait()"
attempt_wait = unquote(versioning_table).wait()
log "#{__MODULE__} - Schema Wait: #{inspect attempt_wait}"
for (n <- npids) do
send n, :initilization_complete
receive do
:initilization_complete_confirmed -> :ok
end # end recieve
end # end for npids
log "#{__MODULE__} -Initilization Complete."
:ok
end # end set_versioning_tables/1
def wait_for_init(caller) do
log "#{__MODULE__} #{inspect node()} - Wait For Init"
amnesia_start = Amnesia.start
log "#{__MODULE__} #{inspect node()} - Amnesia Start: #{inspect amnesia_start}."
log "Send wait_mode confirmation"
send caller, :wait_mode
log "#{__MODULE__} #{inspect node()} - Wait for :initilization_complete response"
receive do
:initilization_complete ->
log "#{__MODULE__} #{inspect node()} - Initilization Complete, stopping Amnesia"
Amnesia.stop
send caller, :initilization_complete_confirmed
:ok
end # end recieve
end # end wait_for_init/1
end # end qoute do
end # end using
end # end Mix.Tasks.MnesaVersioningInit
|
lib/mnesia_versioning/tasks/install.ex
| 0.803444 | 0.839306 |
install.ex
|
starcoder
|
defmodule Trifolium.Species do
@moduledoc """
Module to be used to interact with Trefle [Species](https://docs.trefle.io/reference/#tag/Species) related endpoints.
"""
alias Trifolium.Config
alias Trifolium.API
@endpoint_path "api/v1/species/"
@http_client Config.http_client()
@doc """
List every possible `Specie`s.
This endpoint IS paginated, using a optional keyword parameter. By default, the page 1 is returned.
You can use a `filter` or a `not_filter` like so:
```
iex()> Trifolium.Species.all(filter: %{year: year})
```
The same applies to the `order` and `range` parameters, where you just need to pass a map to it,
that it will be correctly parsed to the query parameter.
"""
@spec all(
filter: map,
filter_not: map,
order: map,
page: non_neg_integer(),
range: map
) :: API.response()
def all(opts \\ []) do
@http_client.get(
get_path(),
[],
params: API.build_query_params(opts)
)
|> API.parse_response()
end
@doc """
Find a specific `Specie` according to its `id` or `slug`.
"""
@spec find(non_neg_integer() | String.t()) :: API.response()
def find(id) do
@http_client.get(
get_path("#{id}"),
[],
params: API.build_query_params()
)
|> API.parse_response()
end
@doc """
Similar to `Trifolium.Species.all`, but you can pass an additional `query` parameter which will full-text search
for it in the available fields. More information can be found on Trefle documentation.
This endpoint IS paginated, using a optional keyword parameter. By default, the page 1 is returned.
You can use a `filter` or a `not_filter` like so:
```
iex()> Trifolium.Species.search(query, filter: %{year: year})
```
The same applies to the `order` and `range` parameters, where you just need to pass a map to it,
that it will be correctly parsed to the query parameter.
"""
@spec search(
String.t(),
filter: map,
filter_not: map,
order: map,
page: non_neg_integer(),
range: map
) :: API.response()
def search(query, opts \\ []) do
@http_client.get(
get_path("search"),
[],
params: API.build_query_params(Keyword.put(opts, :q, query))
)
|> API.parse_response()
end
@doc """
Report a problem with a specific `Specie`. The second parameter is optional, as per Trefle docs,
but be aware that it doesn't make much sense to report a problem without informing it.
The `notes` parameter is a `String.t()` explaining the problem.
```
iex()> Trifolium.Species.report(id, "Should have synonim Trifolium")
```
If you want to test this endpoint, without bothering Trefle community, you can use the special notes parameter `"TEST"`:
```
iex()> Trifolium.Species.report(id, "TEST")
```
"""
@spec report(integer(), String.t()) :: API.response()
def report(id, notes \\ "") do
@http_client.post(
get_path("#{id}/report"),
Jason.encode!(%{notes: notes}),
%{"Content-Type" => "application/json"},
params: API.build_query_params()
)
|> API.parse_response()
end
@spec get_path(String.t()) :: String.t()
defp get_path(url \\ "") do
Config.base_url() <> @endpoint_path <> url
end
end
|
lib/trifolium/endpoints/species.ex
| 0.873728 | 0.822118 |
species.ex
|
starcoder
|
defmodule Remit.UsernamesFromMentions do
import Ecto.Query
alias Remit.{Comment, Commit, Repo}
# Partially from: https://github.com/shinnn/github-username-regex/blob/master/index.js
@mention_re ~r/
(?<=^|\W) # The ?<= is a lookbehind: We must be at start of string, or following a non-word character.
@
(
[a-z\d] # Starts with ASCII letter or digit.
(?:[a-z\d]|-(?=[a-z\d])){0,38} # The ?= is a lookahead: any "-" must be followed by a letter or digit.
)
\b
/ix
def call(text) do
@mention_re
|> Regex.scan(strip_code_blocks(text), capture: :all_but_first)
|> List.flatten()
|> Enum.uniq()
|> do_call()
end
# Private
defp do_call([]), do: []
defp do_call(mentions) do
known_usernames = get_known_usernames()
# We normalise to the known form, because it's probably from GitHub's APIs, so should have correct casing.
# (Which we probably don't *need*, but why not?)
Enum.flat_map(mentions, fn mention ->
downcased_mention = String.downcase(mention)
Enum.find_value(known_usernames, [], &(if String.downcase(&1) == downcased_mention, do: [&1]))
end)
end
# This is simplistic and doesn't e.g. account for HTML-in-Markdown or backslash-escaped backticks. But probably good enough.
defp strip_code_blocks(text) do
text
|> String.replace(~r/^ .*/m, "") # Four-space indent.
|> String.replace(~r/^```(.*?)```/ms, "") # Triple backticks.
|> String.replace(~r/`(.*?)`/, "") # Single backticks.
end
defp get_known_usernames do
# UNION ALL to minimise DB roundtrips.
# UNION ALL rather than UNION because it's faster, we can't easily get unique values anyway.
# COALESCE because empty lists would otherwise ARRAY_AGG to `[nil]`.
commenter_usernames_q = from c in Comment, select: fragment("COALESCE(ARRAY_AGG(DISTINCT ?), '{}')", c.commenter_username)
from(c in Commit, select: c.usernames, distinct: true, union_all: ^commenter_usernames_q)
|> Repo.all()
|> List.flatten()
|> Enum.uniq()
end
end
|
lib/remit/usernames_from_mentions.ex
| 0.611382 | 0.537648 |
usernames_from_mentions.ex
|
starcoder
|
defmodule CritWeb.Fomantic.ListProducing do
use Phoenix.HTML
import CritWeb.Fomantic.Helpers
@moduledoc """
This module creates HTML that delivers list values to a controller action.
For example, producing a list of ideas like %{"ids" => ["0", "5", "10"]
"""
@doc """
Like `multiple_select`, but more convenient for user.
The `tuples` argument is a list of pairs like {"name", 5}.
The first element is the text displayed next to the checkbox.
The second is the value to send to the controller action.
The `checkbox_field` is something like `:chosen_ids`. The params
delivered to the controller action will have that key bound to
an array of values (like an array of chosen ids).
The checkboxes are all part of one `class="field"`, so they
will all be on the same line.
"""
def multiple_checkbox_row(f, [{_,_}|_]=tuples, checkbox_field, opts \\ []) do
checked = Keyword.get(opts, :checked, [])
opts = Keyword.delete(opts, :checked)
~E"""
<div class="field">
<%= for tuple <- tuples,
do: one_checkbox(f, tuple, checkbox_field, checked, opts)
%>
</div>
"""
end
@doc """
Like `multiple_checkbox_row`, except that
1. The values will be stacked horizontally.
2. Instead of tuples, structs are passed in. The `displayed_field:`
and `send_field:` options identify the keys in the
structure to use. They default to `:name` and `:id`.
"""
def multiple_checkbox_column(f, structs, checkbox_field, opts \\ []) do
defaults = %{sent_field: :id, displayed_field: :name, checked: []}
opts = Enum.into(opts, defaults)
for struct <- structs do
sent_value = Map.fetch!(struct, opts.sent_field)
label_value = Map.fetch!(struct, opts.displayed_field)
~E"""
<div class="field">
<%= one_checkbox(f, {label_value, sent_value}, checkbox_field, opts.checked) %>
</div>
"""
end
end
defp one_checkbox(f, {label_value, sent_value}, checkbox_field, all_checked, opts \\ []) do
checkbox_id = input_id(f, checkbox_field, sent_value)
checkbox_name = input_list_name(f, checkbox_field)
check_this? = Enum.member?(all_checked, sent_value)
fixed_opts = [name: checkbox_name,
id: checkbox_id,
type: "checkbox",
checked: check_this?,
value: sent_value]
checkbox_tag = tag(:input, fixed_opts ++ opts)
label_tag = content_tag(:label, label_value, for: checkbox_id)
~E"""
<div class="ui checkbox">
<%= checkbox_tag %>
<%= label_tag %>
</div>
"""
end
end
|
lib/crit_web/views/fomantic/list_producing.ex
| 0.702632 | 0.420689 |
list_producing.ex
|
starcoder
|
defmodule SurfaceBootstrap4.Table do
@moduledoc """
Bootstrap4 table component.
A table can be created by setting a source `data` to it and defining
columns using the `SurfaceBootstrap4.Table.Column` component.
"""
use Surface.Component
@doc "The data that populates the table"
prop data, :list, required: true
@doc "Additional CSS classes"
prop class, :css_class
@doc "The size of the table"
prop size, :string, values: ~w(sm md lg)
@doc "Add borders to all the cells"
prop bordered, :boolean, default: false
@doc "Add borderless to all the cells"
prop borderless, :boolean, default: false
@doc "Add stripes to the table"
prop striped, :boolean, default: false
@doc "Add hovers to the table"
prop hover, :boolean, default: false
@doc "Invert colors of the table to dark background and light text"
prop dark, :boolean, default: false
@doc """
A function that returns a class for the item's underlying `<tr>`
element. The function receives the item and index related to
the row.
"""
prop rowClass, :fun
@doc "The columns of the table"
slot cols, props: [item: ^data], required: true
def render(assigns) do
~H"""
<table class={{
:table,
@class,
"table-#{@size}": @size,
"table-bordered": @bordered,
"table-borderless": @borderless,
"table-striped": @striped,
"table-hover": @hover,
"table-dark": @dark
}}>
<thead>
<tr>
<th :for={{ col <- @cols }}>
{{ col.label }}
</th>
</tr>
</thead>
<tbody>
<tr
:for.with_index={{ {item, index} <- @data }}
class={{ row_class_fun(@rowClass).(item, index) }}>
<td :for.index={{ index <- @cols }}>
<span><slot name="cols" index={{ index }} :props={{ item: item }}/></span>
</td>
</tr>
</tbody>
</table>
"""
end
defp row_class_fun(nil), do: fn _, _ -> "" end
defp row_class_fun(rowClass), do: rowClass
end
|
lib/surface_boostrap4/table.ex
| 0.814459 | 0.463626 |
table.ex
|
starcoder
|
defmodule EctoCassandra.Query do
@types [
:ascii,
:bigint,
:blob,
:boolean,
:counter,
:date,
:decimal,
:double,
:float,
:inet,
:int,
:smallint,
:text,
:time,
:timestamp,
:timeuuid,
:tinyint,
:uuid,
:varchar,
:varint,
]
@cassandra_keys [
:if,
:allow_filtering,
:using,
]
defmacro __using__([]) do
quote do
import Ecto.Query, except: [from: 1, from: 2]
import EctoCassandra.Query
end
end
defmacro from(expr, kw \\ []) do
cassandra_kw = Keyword.take(kw, @cassandra_keys)
ecto_kw = Keyword.drop(kw, @cassandra_keys)
quote do
unquote(expr)
|> Ecto.Query.from(unquote(ecto_kw))
|> Map.merge(Enum.into(unquote(cassandra_kw), %{}))
end
end
defmacro token(fields) when is_list(fields) do
marks = Enum.map_join(fields, ", ", fn _ -> "?" end)
quote do: fragment(unquote("token(#{marks})"), unquote_splicing(fields))
end
defmacro token(field) do
quote do: fragment("token(?)", unquote(field))
end
defmacro cast(field, type) when type in @types do
fragment = "cast(? as #{Atom.to_string(type)})"
quote do: fragment(unquote(fragment), unquote(field))
end
defmacro uuid do
quote do: fragment("uuid()")
end
defmacro now do
quote do: fragment("now()")
end
defmacro min_timeuuid(time) do
quote do: fragment("minTimeuuid(?)", unquote(time))
end
defmacro max_timeuuid(time) do
quote do: fragment("maxTimeuuid(?)", unquote(time))
end
defmacro to_date(time) do
quote do: fragment("toDate(?)", unquote(time))
end
defmacro to_timestamp(time) do
quote do: fragment("toTimestamp(?)", unquote(time))
end
defmacro to_unix_timestamp(time) do
quote do: fragment("toUnixTimestamp(?)", unquote(time))
end
defmacro as_blob(field, type) when type in @types do
fragment = "#{Atom.to_string(type)}AsBlob(?)"
quote do: fragment(unquote(fragment), unquote(field))
end
defmacro contains(field, value) do
quote do: fragment("? CONTAINS ?", unquote(field), unquote(value))
end
end
|
lib/ecto_cassandra/query.ex
| 0.587588 | 0.416975 |
query.ex
|
starcoder
|
defmodule AdventOfCode.Day13 do
@moduledoc false
use AdventOfCode
@empty_cell " "
@filled_cell "█"
def part1(input) do
{grid, instructions} = preprocess_input(input)
grid
|> fold(Enum.at(instructions, 0))
|> Enum.count(fn {_, v} -> v == @filled_cell end)
end
def part2(input) do
{grid, instructions} = preprocess_input(input)
Enum.reduce(instructions, grid, fn instruction, acc -> fold(acc, instruction) end)
|> print_grid()
nil
end
defp fold(grid, {"y", fold_y}) do
# split grids
{top_grid, bottom_grid} =
Enum.reduce(grid, {%{}, %{}}, fn {{x, y}, value}, {top_grid, bottom_grid} ->
if y < fold_y do
{Map.put(top_grid, {x, y}, value), bottom_grid}
else
{top_grid, Map.put(bottom_grid, {x, y}, value)}
end
end)
bottom_ones =
bottom_grid
|> Enum.filter(fn {{_x, _y}, value} -> value == @filled_cell end)
# re-calculate coordinates
|> Enum.map(fn {{x, y}, value} ->
{{x, abs(y - 2 * fold_y)}, value}
end)
|> Map.new()
Map.merge(top_grid, bottom_ones)
end
defp fold(grid, {"x", fold_x}) do
# split grids
{left_grid, right_grid} =
Enum.reduce(grid, {%{}, %{}}, fn {{x, y}, value}, {left_grid, right_grid} ->
if x < fold_x do
{Map.put(left_grid, {x, y}, value), right_grid}
else
{left_grid, Map.put(right_grid, {x, y}, value)}
end
end)
right_ones =
right_grid
|> Enum.filter(fn {{_x, _y}, value} -> value == @filled_cell end)
# re-calculate coordinates
|> Enum.map(fn {{x, y}, value} ->
{{abs(x - 2 * fold_x), y}, value}
end)
|> Map.new()
Map.merge(left_grid, right_ones)
end
defp preprocess_input(input) do
[coords, instructions] =
input
|> String.split("\n\n", trim: true)
parsed_ones =
coords
|> String.split("\n", trim: true)
|> Enum.map(&String.split(&1, ",", trim: true))
|> Enum.reduce(%{}, fn [x, y], acc ->
Map.put(acc, {String.to_integer(x), String.to_integer(y)}, @filled_cell)
end)
max_x = Enum.max_by(parsed_ones, fn {{x, _y}, _v} -> x end) |> elem(0) |> elem(0)
max_y = Enum.max_by(parsed_ones, fn {{_x, y}, _v} -> y end) |> elem(0) |> elem(1)
empty_grid =
for row <- 0..max_x,
col <- 0..max_y,
into: %{} do
{{row, col}, @empty_cell}
end
parsed_grid = Map.merge(empty_grid, parsed_ones)
parsed_instructions =
instructions
|> String.replace("fold along ", "")
|> String.split("\n", trim: true)
|> Enum.map(&String.split(&1, "=", trim: true))
|> Enum.map(fn [axis, value] -> {axis, String.to_integer(value)} end)
{parsed_grid, parsed_instructions}
end
defp print_grid(grid, sep \\ "") do
grid
|> Enum.group_by(fn {{_x, y}, _value} -> y end)
|> Enum.map(fn {_y, points} ->
Enum.map(points, fn {{x, _y}, value} -> {x, value} end)
|> Enum.sort(fn {x1, _}, {x2, _} -> x1 < x2 end)
|> Enum.map(fn {_x, v} -> v end)
|> Enum.join(sep)
end)
|> Enum.each(fn l -> IO.puts(l) end)
IO.puts("\n")
grid
end
end
|
lib/day13.ex
| 0.672547 | 0.479443 |
day13.ex
|
starcoder
|
defmodule Memento.Schema do
require Memento.Mnesia
@moduledoc """
Module to interact with the database schema.
For persisting data, Mnesia databases need to be created on disk. This
module provides an interface to create the database on the disk of the
specified nodes. Most of the time that is usually the node that the
application is running on.
```
# Create schema on current node
Memento.Schema.create([ node() ]
# Create schema on many nodes
node_list = [node(), :alice@host_x, :bob@host_y, :eve@host_z]
Memento.Schema.create(node_list)
```
Important thing to note here is that only the nodes where data has to
be persisted to disk have to be included. RAM-only nodes should be
left out. Disk schemas can also be deleted by calling `delete/1` and
you can get information about them by calling `info/0`.
## Example
```elixir
# The nodes where you want to persist
nodes = [ node() ]
# Create the schema
Memento.stop
Memento.Schema.create(nodes)
Memento.start
# Create disc copies of your tables
Memento.Table.create!(TableA, disc_copies: nodes)
Memento.Table.create!(TableB, disc_copies: nodes)
```
"""
# Public API
# ----------
@doc """
Creates a new database on disk on the specified nodes.
Calling `:mnesia.create_schema` for a custom path throws an exception
if that path does not exist. Memento's version avoids this by ensuring
that the directory exists.
Also see `:mnesia.create_schema/1`.
"""
@spec create(list(node)) :: :ok | {:error, any}
def create(nodes) do
if path = Application.get_env(:mnesia, :dir) do
:ok = File.mkdir_p!(path)
end
:create_schema
|> Memento.Mnesia.call_and_catch([nodes])
|> Memento.Mnesia.handle_result
end
@doc """
Deletes the database previously created by `create/1` on the specified
nodes.
Use this with caution, as it makes persisting data obsolete. Also see
`:mnesia.delete_schema/1`.
"""
@spec delete(list(node)) :: :ok | {:error, any}
def delete(nodes) do
:delete_schema
|> Memento.Mnesia.call_and_catch([nodes])
|> Memento.Mnesia.handle_result
end
@doc """
Prints schema information about all Tables to the console.
"""
@spec info() :: :ok
def info do
:schema
|> Memento.Mnesia.call_and_catch
|> Memento.Mnesia.handle_result
end
@doc """
Prints schema information about the specified Table to the console.
"""
@spec info(Memento.Table.name) :: :ok
def info(table) do
:schema
|> Memento.Mnesia.call_and_catch([table])
|> Memento.Mnesia.handle_result
end
end
|
lib/memento/schema.ex
| 0.744842 | 0.859487 |
schema.ex
|
starcoder
|
defmodule QRCode.Placement do
@moduledoc """
A patterns are a non-data element of the QR code that is required
by the QR code specification, such as the three finder patterns in
the corners of the QR code matrix.
It contains function patterns (finder patterns, timing patterns,
separators, alignment patterns) and reserved areas (format
information area, version information area).
0,1 ... Endcoding data
2 ... Finders
3 ... Separators
4 ... Alignments
5 ... Reserved areas
6 ... Timing
7 ... Dark module
"""
alias MatrixReloaded.{Matrix, Vector}
alias QRCode.QR
import QRCode.QR, only: [version: 1]
@locations [
{14, [26, 46, 66]},
{15, [26, 48, 70]},
{16, [26, 50, 74]},
{17, [30, 54, 78]},
{18, [30, 56, 82]},
{19, [30, 58, 86]},
{20, [34, 62, 90]},
{21, [28, 50, 72, 94]},
{22, [26, 50, 74, 98]},
{23, [30, 54, 78, 102]},
{24, [28, 54, 80, 106]},
{25, [32, 58, 84, 110]},
{26, [30, 58, 86, 114]},
{27, [34, 62, 90, 118]},
{28, [26, 50, 74, 98, 122]},
{29, [30, 54, 78, 102, 126]},
{30, [26, 52, 78, 104, 130]},
{31, [30, 56, 82, 108, 134]},
{32, [34, 60, 86, 112, 138]},
{33, [30, 58, 86, 114, 142]},
{34, [34, 62, 90, 118, 146]},
{35, [30, 54, 78, 102, 126, 150]},
{36, [24, 50, 76, 102, 128, 154]},
{37, [28, 54, 80, 106, 132, 158]},
{38, [32, 58, 84, 110, 136, 162]},
{39, [26, 54, 82, 110, 138, 166]},
{40, [30, 58, 86, 114, 142, 170]}
]
@finder Matrix.new(7, 2) |> elem(1)
@separator Vector.row(8, 3)
@alignment Matrix.new(5, 4) |> elem(1)
@reserved_area 5
@timing 6
@dark_module 7
@correct_finder [Matrix.new(7, 1), Matrix.new(5)]
|> Result.and_then_x(&Matrix.update(&1, &2, {1, 1}))
|> (fn mat -> [mat, Matrix.new(3, 1)] end).()
|> Result.and_then_x(&Matrix.update(&1, &2, {2, 2}))
|> elem(1)
@correct_alignment [Matrix.new(5, 1), Matrix.new(3)]
|> Result.and_then_x(&Matrix.update(&1, &2, {1, 1}))
|> Result.and_then(&Matrix.update_element(&1, 1, {2, 2}))
|> elem(1)
@correct_separator Vector.row(8)
@spec put_patterns(QR.t()) :: Result.t(String.t(), QR.t())
def put_patterns(%QR{version: version, message: message} = qr) when version(version) do
size = (version - 1) * 4 + 21
size
|> Matrix.new()
|> Result.and_then(&add_finders(&1, version, @finder))
|> Result.and_then(&add_separators(&1, version, @separator))
|> Result.and_then(&add_reserved_areas(&1, version, @reserved_area))
|> Result.and_then(&add_timings(&1, version, @timing))
|> Result.and_then(&add_alignments(&1, version, @alignment))
|> Result.and_then(&add_dark_module(&1, version, @dark_module))
|> Result.map(&fill_matrix_by_message(&1, size, message))
|> Result.map(fn matrix -> %{qr | matrix: matrix} end)
end
@spec replace_placeholders(QR.t()) :: Result.t(String.t(), QR.t())
def replace_placeholders(%QR{matrix: matrix, version: version} = qr) when version(version) do
matrix
|> add_finders(version)
|> Result.and_then(&add_separators(&1, version))
|> Result.and_then(&add_reserved_areas(&1, version))
|> Result.and_then(&add_timings(&1, version))
|> Result.and_then(&add_alignments(&1, version))
|> Result.and_then(&add_dark_module(&1, version))
|> Result.map(fn matrix -> %{qr | matrix: matrix} end)
end
@spec add_finders(Matrix.t(), QR.version(), Matrix.t()) :: Result.t(String.t(), Matrix.t())
def add_finders(matrix, version, finder \\ @correct_finder) do
matrix
|> Matrix.update(finder, {0, 0})
|> Result.and_then(&Matrix.update(&1, finder, {0, 4 * version + 10}))
|> Result.and_then(&Matrix.update(&1, finder, {4 * version + 10, 0}))
end
@spec add_separators(Matrix.t(), QR.version(), Vector.t()) :: Result.t(String.t(), Matrix.t())
def add_separators(matrix, version, row \\ @correct_separator) do
col = Vector.transpose(row)
matrix
|> Matrix.update_row(row, {7, 0})
|> Result.and_then(&Matrix.update_row(&1, row, {7, 4 * version + 9}))
|> Result.and_then(&Matrix.update_row(&1, row, {4 * version + 9, 0}))
|> Result.and_then(&Matrix.update_col(&1, col, {0, 7}))
|> Result.and_then(&Matrix.update_col(&1, col, {0, 4 * version + 9}))
|> Result.and_then(&Matrix.update_col(&1, col, {4 * version + 9, 7}))
end
@spec add_reserved_areas(Matrix.t(), QR.version(), non_neg_integer()) ::
Result.t(String.t(), Matrix.t())
def add_reserved_areas(matrix, version, val \\ 0)
def add_reserved_areas(matrix, version, val) when version < 7 do
add_reserve_fia(matrix, version, val)
end
def add_reserved_areas(matrix, version, val) do
matrix
|> add_reserve_fia(version, val)
|> add_reserve_via(version, val)
end
@spec add_timings(Matrix.t(), QR.version()) :: Result.t(String.t(), Matrix.t())
def add_timings(matrix, version) do
row = get_timing_row(version)
[row |> Result.and_then(&Matrix.update_row(matrix, &1, {6, 8})), row]
|> Result.and_then_x(&Matrix.update_col(&1, Vector.transpose(&2), {8, 6}))
end
@spec add_timings(Matrix.t(), QR.version(), pos_integer()) :: Result.t(String.t(), Matrix.t())
def add_timings(matrix, version, val) do
size = 4 * version + 1
row = size |> Vector.row(val)
matrix
|> Matrix.update_row(row, {6, 8})
|> Result.and_then(&Matrix.update_col(&1, Vector.transpose(row), {8, 6}))
end
@spec add_alignments(Matrix.t(), QR.version(), Matrix.t()) :: Result.t(String.t(), Matrix.t())
def add_alignments(matrix, version, alignment \\ @correct_alignment)
def add_alignments(matrix, 1, _alignment), do: Result.ok(matrix)
def add_alignments(matrix, version, alignment) when version < 7 do
Matrix.update(matrix, alignment, {4 * version + 8, 4 * version + 8})
end
def add_alignments(matrix, version, alignment) when version < 14 do
Matrix.update_map(matrix, alignment, get_all_positions([2 * version + 8, 4 * version + 10]))
end
def add_alignments(matrix, version, alignment) do
positions =
version
|> find_positions()
|> get_all_positions()
Matrix.update_map(matrix, alignment, positions)
end
@spec add_dark_module(Matrix.t(), QR.version(), pos_integer()) ::
Result.t(String.t(), Matrix.t())
def add_dark_module(matrix, version, val \\ 1) do
Matrix.update_element(matrix, val, {4 * version + 9, 8})
end
defp fill_matrix_by_message(matrix, size, message) do
(size - 1)..7
|> Enum.take_every(2)
|> Enum.concat([5, 3, 1])
|> Enum.map_reduce({matrix, message}, fn col, acc ->
{col, make_fill(acc, [col, col - 1])}
end)
|> Kernel.elem(1)
|> Kernel.elem(0)
end
defp make_fill({matrix, acc_message}, cols) do
matrix
|> Matrix.flip_ud()
|> Enum.map_reduce(acc_message, fn row, acc_msg ->
fill_row(row, acc_msg, cols)
end)
end
defp fill_row(row, acc_msg, cols) do
Enum.reduce(cols, {row, acc_msg}, fn col, {row, msg} ->
if Enum.at(row, col) == 0 do
<<cw::size(1), rest::bitstring>> = msg
{List.update_at(row, col, fn _ -> cw end), rest}
else
{row, msg}
end
end)
end
defp reserved_area(val) do
{6, 3} |> Matrix.new(val) |> elem(1)
end
defp add_reserve_fia(matrix, version, val) do
row_left = Vector.row(6, val) ++ [0] ++ [val, val]
row_right = Vector.row(8, val)
col_top = Vector.transpose(row_left)
col_bottom = Vector.col(7, val)
matrix
|> Matrix.update_row(row_left, {8, 0})
|> Result.and_then(&Matrix.update_row(&1, row_right, {8, 4 * version + 9}))
|> Result.and_then(&Matrix.update_col(&1, col_top, {0, 8}))
|> Result.and_then(&Matrix.update_col(&1, col_bottom, {4 * version + 10, 8}))
end
defp add_reserve_via(matrix, version, val) do
transp = val |> reserved_area() |> Matrix.transpose()
matrix
|> Result.and_then(&Matrix.update(&1, reserved_area(val), {0, 4 * version + 6}))
|> Result.and_then(&Matrix.update(&1, transp, {4 * version + 6, 0}))
end
defp find_positions(version) do
Enum.reduce_while(@locations, version, fn {ver, list_center}, acc ->
if version == ver do
{:halt, list_center}
else
{:cont, acc}
end
end)
end
defp generate_positions(list) do
for x <- list, y <- list, do: {x, y}
end
defp generate_positions(list, :horizontal) do
for x <- [6], y <- Enum.drop(list, -1), do: {x, y}
end
defp generate_positions(list, :vertical) do
for x <- Enum.drop(list, -1), y <- [6], do: {x, y}
end
defp get_positions(version) when version < 14 do
[2 * version + 8, 4 * version + 10]
end
defp get_positions(version) do
find_positions(version)
end
defp get_all_positions(list) do
list
|> generate_positions()
|> Kernel.++(generate_positions(list, :horizontal))
|> Kernel.++(generate_positions(list, :vertical))
|> Enum.map(fn {row_pos, col_pos} -> {row_pos - 2, col_pos - 2} end)
end
defp get_timing_row(version) when version < 7 do
size = 4 * version + 1
size
|> Vector.row()
|> Vector.alternate_seq(1)
|> Result.ok()
end
defp get_timing_row(version) do
size = 4 * version + 1
positions =
version
|> get_positions()
|> generate_positions(:horizontal)
|> Enum.map(fn {_row_pos, col_pos} -> col_pos - 10 end)
size
|> Vector.row()
|> Vector.alternate_seq(1)
|> Vector.update_map(
[0, 0, 0, 0, 0],
positions
)
end
end
|
lib/qr_code/placement.ex
| 0.853715 | 0.688428 |
placement.ex
|
starcoder
|
defmodule DogStat do
@moduledoc """
This module provides helper functions to persist meaningful metrics to StatsD or DogstatsD servers.
Code is based on [Statix](https://github.com/lexmag/statix) library.
"""
use GenServer
alias DogStat.Packet
@type key :: iodata
@type options :: [sample_rate: float, tags: [String.t]]
@type on_send :: :ok | {:error, term}
@doc """
Starts a metric collector process.
`opts` accepts connection arguments:
* `enabled?` - enables or disables metrics reporting;
* `host` - StatsD server host;
* `port` - StatsD server port;
* `namespace` - will be used as prefix to collected metrics;
* `send_tags?` - allows to disable tags for StatsD servers that don't support them;
* `sink` - if set to list, all metrics will be stored in a process state, useful for testing;
* `name` - worker process name.
"""
def start_link(opts) do
name = Keyword.get(opts, :name, __MODULE__)
GenServer.start_link(__MODULE__, opts, name: name)
end
@doc false
def init(opts) do
{:ok, socket} = :gen_udp.open(0, [active: false])
state =
opts
|> get_config()
|> Map.put(:socket, socket)
{:ok, state}
end
@doc """
Changes DogStat configuration at run-time. Accepts `opts` is identical to a `start_link/1`.
"""
def configure(opts) do
GenServer.call(__MODULE__, {:configure, opts})
end
defp get_config(opts) do
enabled? = Keyword.get(opts, :enabled?, true)
host = opts |> Keyword.get(:host, "127.0.0.1") |> String.to_char_list()
port = Keyword.get(opts, :port, 8125)
sink = Keyword.get(opts, :sink, nil)
send_tags? = Keyword.get(opts, :send_tags?, true)
namespace = Keyword.get(opts, :namespace, nil)
namespace = if namespace, do: [namespace, ?.], else: ""
{:ok, address} = :inet.getaddr(host, :inet)
header = Packet.header(address, port)
%{
enabled?: enabled?,
send_tags?: send_tags?,
header: [header | namespace],
sink: sink
}
end
@doc """
Increments the StatsD counter identified by `key` by the given `value`.
`value` is supposed to be zero or positive and `c:decrement/3` should be
used for negative values.
## Examples
iex> increment("hits", 1, [])
:ok
"""
@spec increment(key, value :: number, options) :: on_send
def increment(key, val \\ 1, options \\ []) when is_number(val) do
transmit(:counter, key, val, options)
end
@doc """
Decrements the StatsD counter identified by `key` by the given `value`.
Works same as `c:increment/3` but subtracts `value` instead of adding it. For
this reason `value` should be zero or negative.
## Examples
iex> decrement("open_connections", 1, [])
:ok
"""
@spec decrement(key, value :: number, options) :: on_send
def decrement(key, val \\ 1, options \\ []) when is_number(val) do
transmit(:counter, key, [?-, to_string(val)], options)
end
@doc """
Writes to the StatsD gauge identified by `key`.
## Examples
iex> gauge("cpu_usage", 0.83, [])
:ok
"""
@spec gauge(key, value :: String.Chars.t, options) :: on_send
def gauge(key, val, options \\ []) do
transmit(:gauge, key, val, options)
end
@doc """
Writes `value` to the histogram identified by `key`.
Not all StatsD-compatible servers support histograms. An example of a such
server [statsite](https://github.com/statsite/statsite).
## Examples
iex> histogram("online_users", 123, [])
:ok
"""
@spec histogram(key, value :: String.Chars.t, options) :: on_send
def histogram(key, val, options \\ []) do
transmit(:histogram, key, val, options)
end
@doc """
Writes the given `value` to the StatsD timing identified by `key`.
`value` is expected in milliseconds.
## Examples
iex> timing("rendering", 12, [])
:ok
"""
@spec timing(key, value :: String.Chars.t, options) :: on_send
def timing(key, val, options \\ []) do
transmit(:timing, key, val, options)
end
@doc """
Writes the given `value` to the StatsD set identified by `key`.
## Examples
iex> set("unique_visitors", "user1", [])
:ok
"""
@spec set(key, value :: String.Chars.t, options) :: on_send
def set(key, val, options \\ []) do
transmit(:set, key, val, options)
end
@doc """
Measures the execution time of the given `function` and writes that to the
StatsD timing identified by `key`.
This function returns the value returned by `function`, making it suitable for
easily wrapping existing code.
## Examples
iex> measure("integer_to_string", [], fn -> Integer.to_string(123) end)
"123"
"""
@spec measure(key, options, function :: (() -> result)) :: result when result: var
def measure(key, options \\ [], fun) when is_function(fun, 0) do
{elapsed, result} = :timer.tc(fun)
timing(key, div(elapsed, 1000), options)
result
end
@doc false
def transmit(type, key, val, options) when (is_binary(key) or is_list(key)) and is_list(options) do
sample_rate = Keyword.get(options, :sample_rate)
if is_nil(sample_rate) or sample_rate >= :rand.uniform() do
GenServer.cast(__MODULE__, {:transmit, type, key, to_string(val), options})
end
:ok
end
@doc false
def handle_cast({:transmit, _type, _key, _value, _options}, %{enabled?: false} = state),
do: {:noreply, state}
# Transmits message to a sink
@doc false
def handle_cast({:transmit, type, key, value, options}, %{sink: sink} = state) when is_list(sink) do
%{header: header} = state
packet = %{type: type, key: key, value: value, options: options, header: header}
{:noreply, %{state | sink: [packet | sink]}}
end
# Transmits message to a StatsD server
@doc false
def handle_cast({:transmit, type, key, value, options}, state) do
%{header: header, socket: socket, send_tags?: send_tags?} = state
packet = Packet.build(header, type, key, value, send_tags?, options)
Port.command(socket, packet)
receive do
{:inet_reply, _port, status} -> status
end
{:noreply, state}
end
@doc false
def handle_call({:configure, opts}, _from, state) do
state = Map.merge(state, get_config(opts))
{:reply, {:ok, state}, state}
end
end
|
lib/dogstat.ex
| 0.916521 | 0.531878 |
dogstat.ex
|
starcoder
|
defmodule CQL.QueryParams do
@moduledoc """
Represents a CQL query/execute statements parameters
"""
import CQL.DataTypes.Encoder
require Bitwise
defstruct [
consistency: :one,
values: nil,
skip_metadata: false,
page_size: nil,
paging_state: nil,
serial_consistency: nil,
timestamp: nil,
]
@valid_keys [
:consistency,
:values,
:skip_metadata,
:page_size,
:paging_state,
:serial_consistency,
:timestamp,
]
@flags %{
:values => 0x01,
:skip_metadata => 0x02,
:page_size => 0x04,
:with_paging_state => 0x08,
:with_serial_consistency => 0x10,
:with_default_timestamp => 0x20,
:with_names => 0x40,
}
def new(options) when is_list(options) do
if Keyword.keyword?(options) do
struct(__MODULE__, Keyword.take(options, @valid_keys))
else
struct(__MODULE__)
end
end
def new(options) when is_map(options) do
struct(__MODULE__, Map.take(options, @valid_keys))
end
def new(_) do
struct(__MODULE__)
end
def encode(q = %__MODULE__{values: values}) when is_nil(values) do
encode(q, false, false, nil)
end
def encode(q = %__MODULE__{values: values}) when is_list(values) or is_map(values) do
if Enum.empty?(values) do
encode(q, false, false, nil)
else
with {:ok, encoded} <- ok(values(values)) do
encode(q, true, is_map(values), encoded)
end
end
end
def encode(_), do: CQL.Error.new("invalud params")
defp encode(q, has_values, has_names, values) do
has_timestamp = is_integer(q.timestamp) and q.timestamp > 0
flags =
[]
|> prepend(:values, has_values)
|> prepend(:skip_metadata, q.skip_metadata)
|> prepend(:page_size, q.page_size)
|> prepend(:with_paging_state, q.paging_state)
|> prepend(:with_serial_consistency, q.serial_consistency)
|> prepend(:with_default_timestamp, has_timestamp)
|> prepend(:with_names, has_names)
|> names_to_flag(@flags)
|> byte
q.consistency
|> consistency
|> List.wrap
|> prepend(flags)
|> prepend(values, has_values)
|> prepend_not_nil(q.page_size, :int)
|> prepend_not_nil(q.paging_state, :bytes)
|> prepend_not_nil(q.serial_consistency, :consistency)
|> prepend(q.timestamp, has_timestamp)
|> Enum.reverse
|> Enum.join
end
end
|
lib/cql/query_params.ex
| 0.702428 | 0.500122 |
query_params.ex
|
starcoder
|
defmodule USGovData.Parser do
@doc """
Parses a file using specific parser module
"""
@spec parse_file(String.t(), atom, Keyword.t()) ::
{:ok, [term]} | {:error, non_neg_integer, atom}
def parse_file(path, parser, opts \\ []) do
file_opts = parse_file_opts(path, opts)
drop_errors = parse_drop_errors(opts)
case File.open(path, file_opts) do
{:ok, fd} ->
read_and_parse(fd, parser, 1, [], drop_errors)
error ->
error
end
end
@doc """
Parses a single line using specific parser module
"""
@spec parse_line(String.t(), atom, Keyword.t()) ::
{:ok, [term]} | {:error, non_neg_integer, atom}
def parse_line(line, parser, opts \\ []) do
drop_errors = parse_drop_errors(opts)
line =
if String.ends_with?(line, "\n") do
line
else
line <> "\n"
end
case apply(parser, :parse_line, [line]) do
{:ok, parsed} ->
{:ok, parsed}
{:error, reason} ->
if drop_error?(drop_errors, reason) do
{:ok, []}
else
{:error, 1, reason}
end
end
end
defp drop_error?([:all], _error), do: true
defp drop_error?(drops, error), do: Enum.member?(drops, error)
defp read_and_parse(fd, parser, linum, acc, drop_errors) do
case :file.read_line(fd) do
{:ok, line} ->
line =
if String.ends_with?(line, "\n") do
line
else
line <> "\n"
end
case apply(parser, :parse_line, [line]) do
{:ok, parsed} ->
read_and_parse(fd, parser, linum + 1, [parsed | acc], drop_errors)
{:error, reason} ->
if drop_error?(drop_errors, reason) do
read_and_parse(fd, parser, linum + 1, acc, drop_errors)
else
{:error, linum, reason}
end
end
:eof ->
{:ok, Enum.reverse(acc)}
{:error, reason} ->
{:error, linum, reason}
end
end
defp parse_file_opts(path, opts) do
fopts = [:read, :binary]
if Keyword.get(opts, :compressed) == true or String.ends_with?(path, ".gz") do
[:compressed | fopts]
else
fopts
end
end
defp parse_drop_errors(opts) do
drop_errors =
case Keyword.get(opts, :drop_errors) do
error when is_atom(error) ->
[error]
errs when is_list(errs) ->
errs
_ ->
[]
end
consolidate_drops(drop_errors)
end
defp consolidate_drops(errors) do
if Enum.member?(errors, :all) do
[:all]
else
errors |> Enum.uniq()
end
end
end
|
lib/usgov_data.ex
| 0.613352 | 0.463444 |
usgov_data.ex
|
starcoder
|
defmodule CFG do
@moduledoc """
This top-level namespace holds all the functionality for working with CFG.
You'll normally interact with configurations using the Config submodule.
"""
alias ComplexNum.Cartesian, as: Complex
defmodule Location do
defstruct line: 1, column: 1
@typedoc """
This type represents a location in the CFG source.
These are its fields:
* `line`: The source line. It must be a positive integer.
* `column`: The source column. It must be a non-negative integer. Newlines end
with a zero column; the first character in the next line would be at column 1.
"""
@type t :: %__MODULE__{
line: pos_integer(),
column: non_neg_integer()
}
@doc """
Return a location with the specified line and column.
"""
@spec new(pos_integer(), non_neg_integer()) :: %Location{}
def new(line \\ 1, column \\ 1) do
%Location{line: line, column: column}
end
# "Return the location of the start of the next line."
@doc false
@spec next_line(%Location{}) :: %Location{}
def next_line(loc) do
%Location{line: loc.line + 1, column: 1}
end
# "Return the location of the next column."
@doc false
@spec next_col(%Location{}) :: %Location{}
def next_col(loc) do
%Location{line: loc.line, column: loc.column + 1}
end
# "Return the location of the previous column."
@doc false
@spec prev_col(%Location{}) :: %Location{}
def prev_col(loc) do
%Location{line: loc.line, column: loc.column - 1}
end
end
defimpl Inspect, for: Location do
def inspect(loc, _opts) do
"_L(#{loc.line}, #{loc.column})"
end
end
defimpl String.Chars, for: Location do
def to_string(loc) do
"(#{loc.line}, #{loc.column})"
end
end
defmodule RecognizerError do
defexception [:location, :reason, :detail]
@typedoc """
This type represents an error which occurred when processing CFG.
These are its fields:
* `location`: The optional location of the error in the source. Some errors may
have no location.
* `reason`: An atom indicating the kind of error.
* `detail`: Optional additional information about the error.
Here are the error reasons currently in use:
* `invalid_escape` - an invalid escape sequence was detected in a string.
* `unterminated_backtick` - a backtick-string is unterminated.
* `newlines_not_allowed` - newlines aren't allowed in strings other than multi-line strings.
* `unterminated_string` - a quoted string is unterminated.
* `bad_number` - a number is badly formed.
* `bad_octal_constant` - a number which looks like an octal constant is badly formed.
* `unexpected_char` - an unexpected character was encountered.
* `unexpected_token` - an unexpected token was encountered.
* `unexpected_token_for_value` - an unexpected token was encountered when looking for a value.
* `unexpected_token_for_atom` - an unexpected token was encountered when looking for an atomic value.
* `bad_key_value_separator` - a bad key/value separator was encountered.
* `unexpected_for_key` - an unexpected token was encountered when looking for a key in a mapping.
* `unexpected_token_for_container` - an unexpected token was encountered when parsing a container.
* `text_after_container` - there is trailing text following text for a valid container.
* `invalid_index` - an array or slice index is invalid.
* `unexpected_token_for_expression` - an unexpected token was encountered when looking for an expression.
* `must_be_mapping` - a top-level configuration must be a mapping.
* `invalid_path` - a CFG path is invalid.
* `invalid_path_extra` - there is text following what looks like a valid CFG path.
* `no_configuration` - no configuration has been loaded.
* `not_found` - the specified key or path was not found in this configuration.
* `invalid_step` - an invalid step (zero) was specified.
* `unexpected_path_start` - a CFG path doesn't begin as expected (with an identifier).
* `cannot_evaluate` - an expression cannot be evaluated.
* `string_expected` - a string was expected, but not found.
* `include_not_found` - an included configuration was not found.
* `cannot_add` - an addition cannot be performed.
* `cannot_negate` - a negation cannot be performed.
* `cannot_subtract` - a subtraction cannot be performed.
* `cannot_multiply` - a multiplication cannot be performed.
* `cannot_divide` - a division cannot be performed.
* `cannot_integer_divide` - an integer division cannot be performed.
* `cannot_compute_modulo` - a modulo operation cannot be performed.
* `cannot_left_shift` - a left shift cannot be performed.
* `cannot_right_shift` - a right shift cannot be performed.
* `cannot_raise_to_power` - raise to power operation cannot be performed.
* `cannot_bitwise_or` - a bitwise-or operation cannot be performed.
* `cannot_bitwise_and` - a bitwise-and operation cannot be performed.
* `cannot_bitwise_xor` - a bitwise-xor operation cannot be performed.
* `unknown_variable` - a variable is undefined or no context was provided.
* `conversion_failure` - a string conversion operation cannot be performed.
* `circular_reference` - a circular reference was detected when resolving references.
* `not_implemented` - a feature is not implemented.
"""
@type t :: %__MODULE__{
location: nil | %Location{},
reason: atom(),
detail: nil | any()
}
@doc false
def exception(reason, location, detail),
do: %__MODULE__{reason: reason, location: location, detail: detail}
@doc false
def message(exception = %__MODULE__{}),
do: CFG.format_error(exception)
end
@doc """
Format an exception for display.
"""
@spec format_error(%RecognizerError{}) :: String.t()
def format_error(exception) do
"#{inspect(exception)}"
end
defimpl String.Chars, for: ComplexNum do
def to_string(z) do
if z.real == 0 do
"#{z.imaginary}j"
else
"#{z.real} + #{z.imaginary}j"
end
end
end
defmodule Token do
# """This module represents a token in the CFG language."""
@moduledoc false
defstruct [:kind, :text, :value, :start, :end]
end
defimpl String.Chars, for: Token do
def to_string(t) do
"<Token #{t.kind} \{#{t.text}\} [#{t.value}] #{t.start}-#{t.end}>"
end
end
defmodule Tokenizer do
# """This module contains the functionality to convert CFG source code into tokens."""
@moduledoc false
use Agent
require Logger
require Map
defstruct [
:stream,
:location,
:char_location,
:pushed_back,
:escapes,
:punctuation,
:keywords,
:keyword_values
]
def new(stream) do
{:ok, pid} =
Agent.start(fn ->
%Tokenizer{
stream: stream,
location: Location.new(),
char_location: Location.new(),
pushed_back: [],
escapes: %{
"a" => "\a",
"b" => "\b",
"f" => "\f",
"n" => "\n",
"r" => "\r",
"t" => "\t",
"v" => "\v",
"\\" => "\\",
"'" => "'",
"\"" => "\""
},
punctuation: %{
"=" => :ASSIGN,
":" => :COLON,
"-" => :MINUS,
"+" => :PLUS,
"*" => :STAR,
"/" => :SLASH,
"%" => :MODULO,
"," => :COMMA,
"{" => :LCURLY,
"}" => :RCURLY,
"[" => :LBRACK,
"]" => :RBRACK,
"(" => :LPAREN,
")" => :RPAREN,
"@" => :AT,
"$" => :DOLLAR,
"<" => :LT,
">" => :GT,
"!" => :NOT,
"~" => :BITNOT,
"&" => :BITAND,
"|" => :BITOR,
"^" => :BITXOR
},
keywords: %{
"true" => :TRUE,
"false" => :FALSE,
"null" => :NONE,
"is" => :IS,
"in" => :IN,
"not" => :NOT,
"and" => :AND,
"or" => :OR
},
keyword_values: %{
:TRUE => true,
:FALSE => false,
:NONE => nil
}
}
end)
pid
end
def from_source(s) do
{:ok, stream} = StringIO.open(s)
new(stream)
end
def from_file(path) do
{:ok, stream} = File.open(path, [:read, :utf8])
new(stream)
end
defp push_back(this, c) do
if c != :eof do
state = Agent.get(this, fn state -> state end)
pb = [{c, state.char_location} | state.pushed_back]
Agent.update(this, fn state -> %{state | pushed_back: pb} end)
end
end
defp get_char(this) do
state = Agent.get(this, fn state -> state end)
pb = state.pushed_back
loc = state.location
{result, cloc, read_back} =
if Enum.empty?(pb) do
{IO.read(state.stream, 1), loc, false}
else
[h | t] = pb
Agent.update(this, fn state -> %{state | pushed_back: t} end)
{a, b} = h
{a, b, true}
end
loc =
if read_back do
cloc
else
loc
end
loc =
if !is_binary(result) do
loc
else
if result == "\n" do
Location.next_line(loc)
else
Location.next_col(loc)
end
end
if result != :eof do
Agent.update(this, fn state -> %{state | location: loc, char_location: cloc} end)
end
result
end
defp as_string(list) do
List.to_string(list)
end
defp is_digit(c) do
is_binary(c) && String.match?(c, ~r/\d/)
end
defp is_alnum(c) do
is_binary(c) && String.match?(c, ~r/[_\p{L}\p{Nd}]/u)
end
defp adjusted_loc(c, loc) do
if c == :eof do
loc
else
Location.prev_col(loc)
end
end
defp collect_ident(this, token, start) do
c = get_char(this)
if c != :eof && is_alnum(c) do
collect_ident(this, token ++ [c], start)
else
push_back(this, c)
s = as_string(token)
state = Agent.get(this, fn state -> state end)
k = Map.get(state.keywords, s, :WORD)
v = Map.get(state.keyword_values, k)
eloc = adjusted_loc(c, state.char_location)
{:ok, %Token{kind: k, text: s, value: v, start: start, end: eloc}}
end
end
defp char_loc(this) do
Agent.get(this, fn state -> state.char_location end)
end
defp error(reason, loc, detail) do
{:error, RecognizerError.exception(reason, loc, detail)}
end
defp parse_escapes(escapes, s, pos) do
parts = String.split(s, "\\", parts: 2)
if length(parts) == 1 do
{:ok, s}
else
[first, rest] = parts
newpos = pos + String.length(first)
# Logger.debug("[#{first}|#{rest}]")
c = String.first(rest)
if Map.has_key?(escapes, c) do
last = parse_escapes(escapes, String.slice(rest, 1..-1), newpos)
case last do
{:error, _} -> last
{:ok, pv} -> {:ok, first <> Map.get(escapes, c) <> pv}
end
else
c = String.first(rest)
if String.match?(c, ~r/[ux]/i) do
len =
cond do
c == "x" || c == "X" -> 2
c == "u" -> 4
true -> 8
end
if String.length(rest) < len + 1 do
error(:invalid_escape, newpos, rest)
else
hex = String.slice(rest, 1, len)
if !String.match?(hex, ~r/^[0-9a-f]+$/i) do
error(:invalid_escape, newpos, rest)
else
{esc, _} = Integer.parse(hex, 16)
{:ok, first <> List.to_string([esc]) <> String.slice(rest, (1 + len)..-1)}
end
end
else
{:ok, first <> rest}
end
end
end
end
defp collect_backtick(this, token, start) do
c = get_char(this)
cond do
c == :eof ->
error(:unterminated_backtick, char_loc(this), as_string(token))
c == "\n" ->
error(:newlines_not_allowed, char_loc(this), as_string(token))
c == "`" ->
state = Agent.get(this, fn state -> state end)
s = as_string(token ++ [c])
n = 1 + length(token)
pe = parse_escapes(state.escapes, String.slice(s, 1, n - 2), 0)
case pe do
{:error, %RecognizerError{reason: r, detail: d}} ->
error(r, start, d)
{:ok, pv} ->
{:ok,
%Token{kind: :BACKTICK, text: s, value: pv, start: start, end: state.char_location}}
end
true ->
collect_backtick(this, token ++ [c], start)
end
end
defp collect_string(this, token, quoter, multi_line, escaped, start) do
c = get_char(this)
# Logger.debug("[#{token}|#{c}]")
cond do
c == :eof ->
error(:unterminated_string, char_loc(this), as_string(token))
c == "\\" ->
collect_string(this, token ++ [c], quoter, multi_line, !escaped, start)
c == "\n" || c == "\r" ->
if !multi_line do
error(:newlines_not_allowed, char_loc(this), as_string(token))
else
addend =
if c == "\n" do
["\n"]
else
nc = get_char(this)
if nc == "\n" do
["\r", "\n"]
else
# perhaps handle unexpected \r not followed by \n
["\r", nc]
end
end
collect_string(this, token ++ addend, quoter, multi_line, false, start)
end
c == String.first(quoter) && !escaped ->
s = as_string(token ++ [c])
n = 1 + length(token)
qn = String.length(quoter)
# Logger.debug("[#{s}|#{n}|#{qn}|#{String.slice(s, -qn..-1)}|#{quoter}]")
if n >= qn * 2 && String.slice(s, -qn..-1) == quoter do
state = Agent.get(this, fn state -> state end)
# Have to be careful to use a range here - on Windows, \r\n is counted
# as one grapheme, but we've counted it as two since we've been using
# a list rather than a string to collect the token.
pe = parse_escapes(state.escapes, String.slice(s, qn..(-qn - 1)), 0)
case pe do
{:error, %RecognizerError{reason: r, detail: d}} ->
error(r, start, d)
{:ok, pv} ->
r = %Token{
kind: :STRING,
text: s,
value: pv,
start: start,
end: state.char_location
}
{:ok, r}
end
else
collect_string(this, token ++ [c], quoter, multi_line, false, start)
end
true ->
token = token ++ [c]
# IO.puts(as_string(token))
collect_string(this, token, quoter, multi_line, false, start)
end
end
defp collect_string(this, quote, start) do
c = get_char(this)
c1_loc = char_loc(this)
quoter =
if c != quote do
push_back(this, c)
quote
else
c = get_char(this)
if c != quote do
push_back(this, c)
Agent.update(this, fn state -> %{state | char_location: c1_loc} end)
push_back(this, quote)
quote
else
"#{quote}#{quote}#{quote}"
end
end
collect_string(
this,
String.split(quoter, "", trim: true),
quoter,
String.length(quoter) > 1,
false,
start
)
end
defp has_exponent(token) do
s = as_string(token)
String.contains?(s, "E") || String.contains?(s, "e")
end
defp number_done(this, token, start, radix, c) do
s = as_string(token)
if String.match?(String.last(s), ~r/[eoxb_-]/i) do
error(:bad_number, char_loc(this), s)
else
kind =
cond do
String.ends_with?(s, ["j", "J"]) ->
:COMPLEX
String.contains?(s, ".") || has_exponent(token) ->
:FLOAT
true ->
:INTEGER
end
ss =
cond do
String.slice(s, 0, 2) == "-." ->
"-0#{String.slice(s, 1..-1)}"
String.slice(s, 0, 1) == "." ->
"0#{s}"
true ->
s
end
ss =
cond do
kind == :COMPLEX ->
String.slice(ss, 0..-2)
radix != 10 ->
String.slice(ss, 2..-1)
true ->
ss
end
ss = String.replace(ss, ~r/_/, "")
# At this point, we could have a number with a leading zero,
# which should be treated as an octal constant, even though
# the radix would be 10 here as we didn't start with 0[xob].
# We check for that specifically before the final parse to
# a number.
if kind == :INTEGER and radix == 10 && String.match?(ss, ~r/^0[0-9]+$/) &&
!String.match?(ss, ~r/^0[0-7]+$/) do
error(:bad_octal_constant, char_loc(this), ss)
else
radix =
if radix == 10 && String.match?(ss, ~r/^0[0-7]+$/) do
8
else
radix
end
{v, _} =
if kind == :INTEGER do
Integer.parse(ss, radix)
else
Float.parse(ss)
end
state = Agent.get(this, fn state -> state end)
eloc = adjusted_loc(c, state.char_location)
# IO.puts("#{kind}, #{v}")
v =
if kind != :COMPLEX do
v
else
Complex.new(0, v)
end
{:ok, %Token{kind: kind, text: s, value: v, start: start, end: eloc}}
end
end
end
defp last_index(haystack, needle) do
result = Enum.find_index(Enum.reverse(haystack), fn x -> x == needle end)
result =
if result == nil do
nil
else
length(haystack) - result - 1
end
# IO.puts("last_index(#{haystack}, #{needle}) -> #{result}")
result
end
defp is_valid_digit(c, radix) do
cond do
radix == 16 ->
String.match?(c, ~r/[0-9a-f]/i)
radix == 8 ->
c >= "0" && c <= "7"
radix == 2 ->
c == "0" || c == "1"
true ->
is_digit(c)
end
end
defp collect_number(this, token, radix, start) do
c = get_char(this)
cond do
c == :eof ->
number_done(this, token, start, radix, c)
is_valid_digit(c, radix) ->
collect_number(this, token ++ [c], radix, start)
c == "_" ->
if is_valid_digit(List.last(token), radix) do
collect_number(this, token ++ [c], radix, start)
else
error(:bad_number, char_loc(this), "#{token}#{c}")
end
c == "." ->
if radix != 10 || String.contains?(as_string(token), ".") do
error(:bad_number, char_loc(this), "#{token}#{c}")
else
collect_number(this, token ++ [c], radix, start)
end
c == "e" || c == "E" ->
if has_exponent(token) || radix != 10 do
error(:bad_number, char_loc(this), "#{token}#{c}")
else
collect_number(this, token ++ [c], radix, start)
end
c == "-" ->
lv = last_index(token, "-")
# existing minus after exponent
if !has_exponent(token) || (lv != nil && lv > 0) do
error(:bad_number, char_loc(this), "#{token}#{c}")
else
collect_number(this, token ++ [c], radix, start)
end
c == "j" || c == "J" ->
if radix != 10 do
error(:bad_number, char_loc(this), "#{token}#{c}")
else
nc = get_char(this)
if nc != :eof && is_alnum(nc) do
error(:bad_number, char_loc(this), "#{token}#{c}#{nc}")
else
push_back(this, nc)
number_done(this, token ++ [c], start, radix, nc)
end
end
# We flag up an alphanumeric char adjacent to a number
is_alnum(c) ->
error(:bad_number, char_loc(this), "#{token}#{c}")
true ->
push_back(this, c)
number_done(this, token, start, radix, c)
end
end
defp punctuation(kind, s, start) do
eloc = %{start | column: start.column + String.length(s) - 1}
{:ok, %Token{kind: kind, text: s, value: nil, start: start, end: eloc}}
end
defp get_radix(c) do
cond do
c == "x" || c == "X" ->
16
c == "o" || c == "O" ->
8
c == "b" || c == "B" ->
2
true ->
10
end
end
def collect_punct(this, c, kind, start) do
case c do
"=" ->
nc = get_char(this)
if nc == "=" do
punctuation(:EQ, "==", start)
else
push_back(this, nc)
punctuation(kind, c, start)
end
"!" ->
nc = get_char(this)
if nc == "=" do
punctuation(:NEQ, "!=", start)
else
push_back(this, nc)
punctuation(kind, c, start)
end
"*" ->
nc = get_char(this)
if nc == "*" do
punctuation(:POWER, "**", start)
else
push_back(this, nc)
punctuation(kind, c, start)
end
"/" ->
nc = get_char(this)
if nc == "/" do
punctuation(:SLASHSLASH, "//", start)
else
push_back(this, nc)
punctuation(kind, c, start)
end
"<" ->
nc = get_char(this)
case nc do
"=" ->
punctuation(:LE, "<=", start)
">" ->
punctuation(:ALT_NEQ, "<>", start)
"<" ->
punctuation(:LSHIFT, "<<", start)
_ ->
push_back(this, nc)
punctuation(kind, c, start)
end
">" ->
nc = get_char(this)
case nc do
"=" ->
punctuation(:GE, ">=", start)
">" ->
punctuation(:RSHIFT, ">>", start)
_ ->
push_back(this, nc)
punctuation(kind, c, start)
end
_ ->
punctuation(kind, c, start)
end
end
defp collect_newline(s, start) do
{:ok,
%Token{
kind: :NEWLINE,
text: s,
value: nil,
start: start,
end: %Location{line: start.line + 1, column: 0}
}}
end
def get_token(this) do
c = get_char(this)
state = Agent.get(this, fn state -> state end)
start_location = state.char_location
# end_location = state.char_location
cond do
c == :eof ->
{:ok,
%Token{kind: :EOF, text: "", value: nil, start: state.location, end: state.location}}
String.match?(c, ~r/[ \t]/) ->
get_token(this)
c == "\r" || c == "\n" ->
if c == "\r" do
c = get_char(this)
if c != "\n" do
push_back(this, c)
end
end
collect_newline("\n", start_location)
c == "#" ->
s = IO.read(state.stream, :line)
loc = Location.new(start_location.line + 1, 1)
Agent.update(this, fn state -> %{state | location: loc} end)
collect_newline("\##{String.trim_trailing(s)}", start_location)
c == "\\" ->
c = get_char(this)
if c != "\r" && c != "\n" do
error(:unexpected_char, state.char_location, "\\")
else
if c == "\r" do
c = get_char(this)
if c != "\n" do
push_back(this, c)
end
end
get_token(this)
end
c == "'" || c == "\"" ->
collect_string(this, c, start_location)
c == "`" ->
collect_backtick(this, [c], start_location)
String.match?(c, ~r/[_\p{L}]/u) ->
collect_ident(this, [c], start_location)
c == "0" ->
c = get_char(this)
radix = get_radix(c)
token =
if radix == 10 do
push_back(this, c)
["0"]
else
["0", c]
end
collect_number(this, token, radix, start_location)
is_digit(c) ->
collect_number(this, [c], 10, start_location)
c == "." ->
c = get_char(this)
if is_digit(c) do
collect_number(this, [".", c], 10, start_location)
else
push_back(this, c)
punctuation(:DOT, ".", start_location)
end
c == "-" ->
c = get_char(this)
cond do
c == "0" ->
c = get_char(this)
radix = get_radix(c)
token =
if radix == 10 do
["-", "0"]
else
["-", "0", c]
end
collect_number(this, token, radix, start_location)
is_digit(c) || c == "." ->
collect_number(this, ["-", c], 10, start_location)
true ->
push_back(this, c)
punctuation(:MINUS, "-", start_location)
end
Map.has_key?(state.punctuation, c) ->
collect_punct(this, c, Map.get(state.punctuation, c), start_location)
true ->
error(:unexpected_char, char_loc(this), c)
end
end
end
defmodule UnaryNode do
# """ This module represents an AST node for a unary expression."""
@moduledoc false
defstruct [:kind, :operand, :start]
@doc """
Return a new unary node.
"""
@spec new(atom(), struct(), %Location{}) :: %UnaryNode{}
def new(kind, operand, start) do
%UnaryNode{kind: kind, operand: operand, start: start}
end
end
defmodule BinaryNode do
# """This module represents an AST node for a binary expression."""
@moduledoc false
defstruct [:kind, :lhs, :rhs, :start]
@doc """
Return a new binary node.
"""
@spec new(atom(), struct(), struct(), %Location{}) :: %BinaryNode{}
def new(kind, lhs, rhs, start) do
%BinaryNode{kind: kind, lhs: lhs, rhs: rhs, start: start}
end
end
defimpl Inspect, for: Token do
def inspect(tok, _opts) do
v =
if tok.kind == :WORD do
tok.text
else
tok.value
end
"_T[#{inspect(tok.kind)}|#{v}|#{tok.start}]"
end
end
defimpl Inspect, for: UnaryNode do
def inspect(un, _opts) do
"_U[#{inspect(un.kind)}|#{inspect(un.operand)}]"
end
end
defimpl Inspect, for: BinaryNode do
def inspect(bn, _opts) do
"_B[#{inspect(bn.kind)}|#{inspect(bn.lhs)}|#{inspect(bn.rhs)}]"
end
end
defmodule SliceNode do
# """This module represents an AST node for a slice expression (start, stop, step)."""
@moduledoc false
defstruct [:start_index, :stop_index, :step, :start]
@doc """
Return a new slice node.
"""
@spec new(any(), any(), any(), %Location{}) :: %SliceNode{}
def new(start, stop, step, loc) do
%SliceNode{start_index: start, stop_index: stop, step: step, start: loc}
end
end
defmodule ListNode do
# """This module represents an AST node for a list."""
@moduledoc false
defstruct kind: :LBRACK, elements: [], start: nil
def new(elements, start) do
%ListNode{elements: elements, start: start}
end
end
defmodule MappingNode do
# """This module represents an AST node for a mapping."""
@moduledoc false
defstruct kind: :LCURLY, elements: [], start: nil
def new(elements, start) do
%MappingNode{elements: elements, start: start}
end
end
defmodule Parser do
# """This module contains the functionality to convert CFG source code into an AST (abstract syntax tree)."""
@moduledoc false
use Agent
require Logger
defstruct [
:tokenizer,
:next_token,
:expression_starters,
:value_starters,
:comparison_operators
]
def new(stream) do
tokenizer = Tokenizer.new(stream)
v = Tokenizer.get_token(tokenizer)
case v do
{:error, _} ->
v
{:ok, t} ->
Agent.start(fn ->
%Parser{
tokenizer: tokenizer,
next_token: t,
expression_starters:
MapSet.new([
:LCURLY,
:LBRACK,
:LPAREN,
:AT,
:DOLLAR,
:BACKTICK,
:PLUS,
:MINUS,
:BITNOT,
:INTEGER,
:FLOAT,
:COMPLEX,
:TRUE,
:FALSE,
:NONE,
:NOT,
:STRING,
:WORD
]),
value_starters:
MapSet.new([
:BACKTICK,
:INTEGER,
:FLOAT,
:COMPLEX,
:TRUE,
:FALSE,
:NONE,
:STRING,
:WORD
]),
comparison_operators:
MapSet.new([
:LT,
:LE,
:GT,
:GE,
:EQ,
:NEQ,
:ALT_NEQ,
:IS,
:IN,
:NOT
])
}
end)
end
end
def from_source(s) do
{:ok, stream} = StringIO.open(s)
new(stream)
end
def from_file(path) do
{:ok, stream} = File.open(path, [:read, :utf8])
new(stream)
end
def at_end(this) do
Agent.get(this, fn state -> state.next_token.kind == :EOF end)
end
defp advance(this) do
tokenizer = Agent.get(this, fn state -> state.tokenizer end)
v = Tokenizer.get_token(tokenizer)
case v do
{:error, _} ->
v
{:ok, nt} ->
Agent.update(this, fn state -> %{state | next_token: nt} end)
{:ok, nt.kind}
end
end
# public for debugging
def next_token(this) do
Agent.get(this, fn state -> state.next_token end)
end
defp next_token_start(this) do
Agent.get(this, fn state -> state.next_token.start end)
end
defp error(reason, loc, detail) do
{:error, RecognizerError.exception(reason, loc, detail)}
end
defp expect(this, kind) do
nt = next_token(this)
if nt.kind != kind do
error(:unexpected_token, nt.start, {kind, nt.kind})
else
advance(this)
{:ok, nt}
end
end
defp consume_newlines(this) do
nt = next_token(this)
case nt.kind do
:NEWLINE ->
v = advance(this)
case v do
{:ok, :NEWLINE} ->
consume_newlines(this)
_ ->
v
end
_ ->
{:ok, nt.kind}
end
end
defp collect_strings(this, pid) do
nt = next_token(this)
Agent.update(pid, fn state ->
%{
state
| texts: state.texts ++ [nt.text],
values: state.values ++ [nt.value],
end: nt.end
}
end)
v = advance(this)
case v do
{:error, _} ->
v
{:ok, :STRING} ->
collect_strings(this, pid)
{:ok, _} ->
state = Agent.get(pid, fn state -> state end)
merged = %Token{
kind: :STRING,
text: Enum.join(state.texts),
value: Enum.join(state.values),
start: state.start,
end: state.end
}
{:ok, merged}
end
end
def strings(this) do
result = next_token(this)
v = advance(this)
case v do
{:error, _} ->
v
{:ok, :STRING} ->
{:ok, pid} =
Agent.start(fn ->
%{
texts: [result.text],
values: [result.value],
start: result.start,
end: result.end
}
end)
v = collect_strings(this, pid)
Agent.stop(pid)
v
{:ok, _} ->
{:ok, result}
end
end
def value(this) do
state = Agent.get(this, fn state -> state end)
kind = state.next_token.kind
if !MapSet.member?(state.value_starters, kind) do
error(:unexpected_token_for_value, state.next_token.start, kind)
else
if kind == :STRING do
strings(this)
else
result = state.next_token
v = advance(this)
case v do
{:error, _} ->
v
_ ->
{:ok, result}
end
end
end
end
def atom(this) do
state = Agent.get(this, fn state -> state end)
nt = state.next_token
case nt.kind do
:LCURLY ->
mapping(this)
:LBRACK ->
list(this)
:LPAREN ->
v = advance(this)
case v do
{:error, _} ->
v
_ ->
expr = expression(this)
case expr do
{:error, _} ->
expr
_ ->
v = expect(this, :RPAREN)
case v do
{:error, _} ->
v
_ ->
expr
end
end
end
:DOLLAR ->
v = advance(this)
case v do
{:error, _} ->
v
_ ->
v = expect(this, :LCURLY)
case v do
{:error, _} ->
v
_ ->
start = Agent.get(this, fn state -> state.next_token.start end)
expr = primary(this)
case expr do
{:error, _} ->
v
{:ok, expr} ->
v = expect(this, :RCURLY)
case v do
{:error, _} ->
v
_ ->
{:ok, UnaryNode.new(:DOLLAR, expr, start)}
end
end
end
end
# Can't use MapSet.member?(state.value_starters, k) :-(
k
when k in [
:BACKTICK,
:INTEGER,
:FLOAT,
:COMPLEX,
:TRUE,
:FALSE,
:NONE,
:STRING,
:WORD
] ->
value(this)
_ ->
error(:unexpected_token_for_atom, nt.start, nt.kind)
end
end
defp mapping_key(this, nt) do
if nt.kind == :STRING do
strings(this)
else
v = advance(this)
case v do
{:error, _} -> v
_ -> {:ok, nt}
end
end
end
defp advance_and_consume_newlines(this) do
v = advance(this)
case v do
{:error, _} ->
v
_ ->
consume_newlines(this)
end
end
defp collect_mapping_elements(this, nt, elements) do
v = mapping_key(this, nt)
case v do
{:error, _} ->
v
{:ok, key} ->
nt = next_token(this)
if nt.kind != :COLON && nt.kind != :ASSIGN do
error(:bad_key_value_separator, nt.start, nt.kind)
else
v = advance_and_consume_newlines(this)
case v do
{:error, _} ->
v
_ ->
v = expression(this)
case v do
{:error, _} ->
v
{:ok, expr} ->
kvp = {key, expr}
nt = next_token(this)
case nt.kind do
k when k in [:NEWLINE, :COMMA] ->
v = advance_and_consume_newlines(this)
case v do
{:error, _} ->
v
{:ok, kind} ->
if kind in [:EOF, :RCURLY] do
{:ok, elements ++ [kvp]}
else
nt = next_token(this)
if kind != :WORD && kind != :STRING do
error(:unexpected_for_key, nt.start, kind)
else
collect_mapping_elements(this, nt, elements ++ [kvp])
end
end
end
k when k in [:EOF, :RCURLY] ->
{:ok, elements ++ [kvp]}
k when k in [:WORD, :STRING] ->
collect_mapping_elements(this, nt, elements ++ [kvp])
_ ->
error(:unexpected_for_key, nt.start, nt.kind)
end
end
end
end
end
end
def mapping_body(this) do
v = consume_newlines(this)
case v do
{:error, _} ->
v
_ ->
nt = next_token(this)
kind = nt.kind
start = nt.start
if kind == :EOF || kind == :RCURLY do
{:ok, MappingNode.new([], start)}
else
if kind != :WORD && kind != :STRING do
error(:unexpected_for_key, start, kind)
else
v = collect_mapping_elements(this, nt, [])
case v do
{:error, _} ->
v
{:ok, elements} ->
{:ok, MappingNode.new(elements, start)}
end
end
end
end
end
def mapping(this) do
v = expect(this, :LCURLY)
case v do
{:error, _} ->
v
_ ->
v = mapping_body(this)
case v do
{:error, _} ->
v
_ ->
ev = expect(this, :RCURLY)
case ev do
{:error, _} -> ev
_ -> v
end
end
end
end
defp collect_list_elements(this, elements) do
v = expression(this)
case v do
{:error, _} ->
v
{:ok, expr} ->
{es, kind} =
Agent.get(this, fn state -> {state.expression_starters, state.next_token.kind} end)
if kind in [:NEWLINE, :COMMA] do
v = advance_and_consume_newlines(this)
case v do
{:error, _} ->
v
{:ok, kind} ->
if !MapSet.member?(es, kind) do
{:ok, elements ++ [expr]}
else
collect_list_elements(this, elements ++ [expr])
end
end
else
{:ok, elements ++ [expr]}
end
end
end
def list_body(this) do
v = consume_newlines(this)
case v do
{:error, _} ->
v
_ ->
{es, nt} =
Agent.get(this, fn state -> {state.expression_starters, state.next_token} end)
kind = nt.kind
start = nt.start
elements = []
if MapSet.member?(es, kind) do
v = collect_list_elements(this, elements)
case v do
{:error, _} ->
v
{:ok, elements} ->
{:ok, ListNode.new(elements, start)}
end
else
{:ok, ListNode.new(elements, start)}
end
end
end
def list(this) do
v = expect(this, :LBRACK)
case v do
{:error, _} ->
v
_ ->
v = list_body(this)
case v do
{:error, _} ->
v
_ ->
ev = expect(this, :RBRACK)
case ev do
{:error, _} -> ev
_ -> v
end
end
end
end
def container(this, check_end \\ true) do
v = consume_newlines(this)
result =
case v do
{:error, _} ->
v
{:ok, :LCURLY} ->
mapping(this)
{:ok, :LBRACK} ->
list(this)
{:ok, k} ->
if k in [:WORD, :STRING, :EOF] do
mapping_body(this)
else
nt = next_token(this)
error(:unexpected_token_for_container, nt.start, k)
end
end
case result do
{:error, _} ->
result
_ ->
v = consume_newlines(this)
case v do
{:error, _} ->
v
_ ->
if !check_end || at_end(this) do
result
else
nt = next_token(this)
error(:text_after_container, nt.start, nt)
end
end
end
end
defp get_slice_element(this) do
v = list_body(this)
case v do
{:error, _} ->
v
{:ok, lb} ->
n = length(lb.elements)
if n != 1 do
error(:invalid_index, lb.start, n)
else
{:ok, List.first(lb.elements)}
end
end
end
defp trailer(this, kind) do
if kind == :DOT do
v = advance(this)
case v do
{:error, _} ->
v
_ ->
v = expect(this, :WORD)
case v do
{:error, _} -> v
{:ok, w} -> {:ok, :DOT, w}
end
end
else
start = next_token_start(this)
v = advance(this)
case v do
{:error, _} ->
v
_ ->
{:ok, pid} =
Agent.start(fn ->
%{is_slice: false, start_index: nil, stop_index: nil, step: nil, result: nil}
end)
# After this point we just store errors in the state.result, and
# decide what to do at the end
kind = Agent.get(this, fn state -> state.next_token.kind end)
if kind == :COLON do
Agent.update(pid, fn state -> %{state | is_slice: true} end)
else
v = get_slice_element(this)
case v do
{:error, _} ->
Agent.update(pid, fn state -> %{state | result: v} end)
{:ok, elem} ->
kind = Agent.get(this, fn state -> state.next_token.kind end)
if kind != :COLON do
Agent.update(pid, fn state -> %{state | result: elem} end)
else
Agent.update(pid, fn state -> %{state | start_index: elem, is_slice: true} end)
end
end
end
state = Agent.get(pid, fn state -> state end)
if state.is_slice do
# at this point start_index is either nil (if foo[:xyz]) or a
# value representing the start. We are pointing at the COLON
# after the start value
v = advance(this)
case v do
{:error, _} ->
Agent.update(pid, fn state -> %{state | result: v} end)
{:ok, kind} ->
case kind do
# no stop, but there might be a step
:COLON ->
v = advance(this)
case v do
{:error, _} ->
Agent.update(pid, fn state -> %{state | result: v} end)
{:ok, kind} ->
if kind != :RBRACK do
v = get_slice_element(this)
case v do
{:error, _} ->
Agent.update(pid, fn state -> %{state | result: v} end)
{:ok, elem} ->
Agent.update(pid, fn state -> %{state | step: elem} end)
end
end
end
:RBRACK ->
{}
_ ->
v = get_slice_element(this)
case v do
{:error, _} ->
Agent.update(pid, fn state -> %{state | result: v} end)
{:ok, elem} ->
Agent.update(pid, fn state -> %{state | stop_index: elem} end)
kind = Agent.get(this, fn state -> state.next_token.kind end)
if kind == :COLON do
v = advance(this)
case v do
{:error, _} ->
Agent.update(pid, fn state -> %{state | result: v} end)
{:ok, kind} ->
if kind != :RBRACK do
v = get_slice_element(this)
case v do
{:error, _} ->
Agent.update(pid, fn state -> %{state | result: v} end)
{:ok, elem} ->
Agent.update(pid, fn state -> %{state | step: elem} end)
end
end
end
end
end
end
end
end
v = expect(this, :RBRACK)
case v do
{:error, _} -> Agent.update(pid, fn state -> %{state | result: v} end)
_ -> {}
end
state = Agent.get(pid, fn state -> state end)
Agent.stop(pid)
case state.result do
{:error, _} ->
state.result
_ ->
if state.is_slice do
{:ok, :COLON,
SliceNode.new(state.start_index, state.stop_index, state.step, start)}
else
{:ok, :LBRACK, state.result}
end
end
end
end
end
defp collect_trailers(this, kind, lhs, start) do
t = trailer(this, kind)
case t do
{:error, _} ->
t
{:ok, op, v} ->
new_lhs = BinaryNode.new(op, lhs, v, start)
kind = Agent.get(this, fn state -> state.next_token.kind end)
if kind == :DOT || kind == :LBRACK do
collect_trailers(this, kind, new_lhs, start)
else
{:ok, new_lhs}
end
end
end
def primary(this) do
start = next_token_start(this)
result = atom(this)
case result do
{:error, _} ->
result
{:ok, lhs} ->
kind = Agent.get(this, fn state -> state.next_token.kind end)
if kind == :DOT || kind == :LBRACK do
collect_trailers(this, kind, lhs, start)
else
result
end
end
end
defp collect_power(this, lhs, start) do
v = advance(this)
case v do
{:error, _} ->
v
_ ->
v = unary_expr(this)
case v do
{:error, _} ->
v
{:ok, expr} ->
result = BinaryNode.new(:POWER, lhs, expr, start)
nt = next_token(this)
if nt.kind == :POWER do
collect_power(this, result, start)
else
{:ok, result}
end
end
end
end
def power(this) do
start = next_token_start(this)
result = primary(this)
case result do
{:error, _} ->
result
{:ok, expr} ->
nt = next_token(this)
if nt.kind != :POWER do
result
else
collect_power(this, expr, start)
end
end
end
def unary_expr(this) do
nt = next_token(this)
if nt.kind not in [:PLUS, :MINUS, :BITNOT, :AT] do
power(this)
else
v = advance(this)
case v do
{:error, _} ->
v
_ ->
v = unary_expr(this)
case v do
{:error, _} ->
v
{:ok, expr} ->
{:ok, UnaryNode.new(nt.kind, expr, nt.start)}
end
end
end
end
def collect_mul_expr(this, kind, lhs, start) do
v = advance(this)
case v do
{:error, _} ->
v
_ ->
v = unary_expr(this)
case v do
{:error, _} ->
v
{:ok, expr} ->
result = BinaryNode.new(kind, lhs, expr, start)
nt = next_token(this)
if nt.kind in [:STAR, :SLASH, :SLASHSLASH, :MODULO] do
collect_mul_expr(this, nt.kind, result, start)
else
{:ok, result}
end
end
end
end
def mul_expr(this) do
start = next_token_start(this)
result = unary_expr(this)
case result do
{:error, _} ->
result
{:ok, expr} ->
nt = next_token(this)
if nt.kind not in [:STAR, :SLASH, :SLASHSLASH, :MODULO] do
result
else
collect_mul_expr(this, nt.kind, expr, start)
end
end
end
def collect_add_expr(this, kind, lhs, start) do
v = advance(this)
case v do
{:error, _} ->
v
_ ->
v = mul_expr(this)
case v do
{:error, _} ->
v
{:ok, expr} ->
result = BinaryNode.new(kind, lhs, expr, start)
nt = next_token(this)
if nt.kind in [:PLUS, :MINUS] do
collect_add_expr(this, nt.kind, result, start)
else
{:ok, result}
end
end
end
end
def add_expr(this) do
start = next_token_start(this)
result = mul_expr(this)
case result do
{:error, _} ->
result
{:ok, expr} ->
nt = next_token(this)
if nt.kind not in [:PLUS, :MINUS] do
result
else
collect_add_expr(this, nt.kind, expr, start)
end
end
end
def collect_shift_expr(this, kind, lhs, start) do
v = advance(this)
case v do
{:error, _} ->
v
_ ->
v = add_expr(this)
case v do
{:error, _} ->
v
{:ok, expr} ->
result = BinaryNode.new(kind, lhs, expr, start)
nt = next_token(this)
if nt.kind in [:LSHIFT, :RSHIFT] do
collect_shift_expr(this, nt.kind, result, start)
else
{:ok, result}
end
end
end
end
def shift_expr(this) do
start = next_token_start(this)
result = add_expr(this)
case result do
{:error, _} ->
result
{:ok, expr} ->
nt = next_token(this)
if nt.kind not in [:LSHIFT, :RSHIFT] do
result
else
collect_shift_expr(this, nt.kind, expr, start)
end
end
end
def collect_bitand_expr(this, lhs, start) do
v = advance(this)
case v do
{:error, _} ->
v
_ ->
v = shift_expr(this)
case v do
{:error, _} ->
v
{:ok, expr} ->
result = BinaryNode.new(:BITAND, lhs, expr, start)
nt = next_token(this)
if nt.kind == :BITAND do
collect_bitand_expr(this, result, start)
else
{:ok, result}
end
end
end
end
def bitand_expr(this) do
start = next_token_start(this)
result = shift_expr(this)
case result do
{:error, _} ->
result
{:ok, expr} ->
nt = next_token(this)
if nt.kind != :BITAND do
result
else
collect_bitand_expr(this, expr, start)
end
end
end
def collect_bitxor_expr(this, lhs, start) do
v = advance(this)
case v do
{:error, _} ->
v
_ ->
v = bitand_expr(this)
case v do
{:error, _} ->
v
{:ok, expr} ->
result = BinaryNode.new(:BITXOR, lhs, expr, start)
nt = next_token(this)
if nt.kind == :BITXOR do
collect_bitxor_expr(this, result, start)
else
{:ok, result}
end
end
end
end
def bitxor_expr(this) do
start = next_token_start(this)
result = bitand_expr(this)
case result do
{:error, _} ->
result
{:ok, expr} ->
nt = next_token(this)
if nt.kind != :BITXOR do
result
else
collect_bitxor_expr(this, expr, start)
end
end
end
def collect_bitor_expr(this, lhs, start) do
v = advance(this)
case v do
{:error, _} ->
v
_ ->
v = bitxor_expr(this)
case v do
{:error, _} ->
v
{:ok, expr} ->
result = BinaryNode.new(:BITOR, lhs, expr, start)
nt = next_token(this)
if nt.kind == :BITOR do
collect_bitor_expr(this, result, start)
else
{:ok, result}
end
end
end
end
def bitor_expr(this) do
start = next_token_start(this)
result = bitxor_expr(this)
case result do
{:error, _} ->
result
{:ok, expr} ->
nt = next_token(this)
if nt.kind != :BITOR do
result
else
collect_bitor_expr(this, expr, start)
end
end
end
def comparison_op(this) do
nt = next_token(this)
v = advance(this)
case v do
{:error, _} ->
v
{:ok, kind} ->
{kind, should_advance} =
cond do
nt.kind == :IS && kind == :NOT ->
{:ISNOT, true}
nt.kind == :NOT && kind == :IN ->
{:NOTIN, true}
true ->
{nt.kind, false}
end
if !should_advance do
{:ok, kind}
else
v = advance(this)
case v do
{:error, _} -> v
_ -> {:ok, kind}
end
end
end
end
def comparison(this) do
start = next_token_start(this)
result = bitor_expr(this)
case result do
{:error, _} ->
result
{:ok, expr} ->
{co, nt} =
Agent.get(this, fn state -> {state.comparison_operators, state.next_token} end)
if !MapSet.member?(co, nt.kind) do
result
else
v = comparison_op(this)
case v do
{:error, _} ->
v
{:ok, kind} ->
rhs = bitor_expr(this)
case rhs do
{:error, _} -> rhs
{:ok, erhs} -> {:ok, BinaryNode.new(kind, expr, erhs, start)}
end
end
end
end
end
def not_expr(this) do
nt = next_token(this)
if nt.kind != :NOT do
comparison(this)
else
v = advance(this)
case v do
{:error, _} ->
v
_ ->
v = not_expr(this)
case v do
{:error, _} ->
v
{:ok, expr} ->
{:ok, UnaryNode.new(:NOT, expr, nt.start)}
end
end
end
end
def collect_and_expr(this, lhs, start) do
v = advance(this)
case v do
{:error, _} ->
v
_ ->
v = not_expr(this)
case v do
{:error, _} ->
v
{:ok, expr} ->
result = BinaryNode.new(:AND, lhs, expr, start)
nt = next_token(this)
if nt.kind == :AND do
collect_and_expr(this, result, start)
else
{:ok, result}
end
end
end
end
def and_expr(this) do
start = next_token_start(this)
result = not_expr(this)
case result do
{:error, _} ->
result
{:ok, expr} ->
nt = next_token(this)
if nt.kind != :AND do
result
else
collect_and_expr(this, expr, start)
end
end
end
def collect_or_expr(this, lhs, start) do
v = advance(this)
case v do
{:error, _} ->
v
_ ->
v = and_expr(this)
case v do
{:error, _} ->
v
{:ok, expr} ->
result = BinaryNode.new(:OR, lhs, expr, start)
nt = next_token(this)
if nt.kind == :OR do
collect_or_expr(this, result, start)
else
{:ok, result}
end
end
end
end
def or_expr(this) do
start = next_token_start(this)
result = and_expr(this)
case result do
{:error, _} ->
result
{:ok, expr} ->
nt = next_token(this)
if nt.kind != :OR do
result
else
collect_or_expr(this, expr, start)
end
end
end
def expression(this) do
{es, nt} = Agent.get(this, fn state -> {state.expression_starters, state.next_token} end)
if !MapSet.member?(es, nt.kind) do
error(:unexpected_token_for_expression, nt.start, nt.kind)
else
or_expr(this)
end
end
end
defmodule MapUtils do
# """A utility module to merge maps recursively."""
@moduledoc false
def deep_merge(left, right) do
Map.merge(left, right, &deep_resolve/3)
end
# Key exists in both maps, and both values are maps as well.
# These can be merged recursively.
defp deep_resolve(_key, left = %{}, right = %{}) do
deep_merge(left, right)
end
# Key exists in both maps, but at least one of the values is
# NOT a map. We fall back to standard merge behavior, preferring
# the value on the right.
defp deep_resolve(_key, _left, right) do
right
end
end
defmodule Pow do
# """A utility module to raise integers to powers."""
@moduledoc false
require Integer
def pow(_, 0), do: 1
def pow(x, n) when Integer.is_odd(n), do: x * pow(x, n - 1)
def pow(x, n) do
result = pow(x, div(n, 2))
result * result
end
end
defmodule Config do
@moduledoc """
This module contains top-level functionality for working with CFG. Client code will usually just interact with this module.
"""
use Agent
use Bitwise
require Logger
defstruct data: nil,
no_duplicates: true,
strict_conversions: true,
context: nil,
include_path: [],
path: nil,
root_dir: nil,
parent: nil,
cache: nil,
error: nil,
scalar_tokens:
MapSet.new([
:STRING,
:INTEGER,
:FLOAT,
:COMPLEX,
:FALSE,
:TRUE,
:NONE
]),
refs_seen: nil,
string_converter: nil
@typedoc """
This type represents a configuration loaded from CFG source.
These are its fields:
* `no_duplicates` - Whether duplicate keys are allowed. If allowed, newer values for a given key
silently overwrite older ones. If not and a duplicate is seen, an error is returned.
* `strict_conversions` - Whether conversions of backtick-strings are allowed to fail. If not
strict, a failure results in the special string being returned. Otherwise, an error is returned.
* `context` - An optional map containing a variable name-to-value mapping.
* `include_path` - A list of directories which is searched for included configurations. The directory
of the including configuration is searched first.
* `path` - The absolute path from where the configuration was loaded.
* `root_dir` - The directory containing `path`.
* `parent` - The parent configuration of an included configuration.
* `string_converter` - A function which is called with a string and the configuration to perform
backtick-string conversion.
"""
@type t :: %__MODULE__{
data: nil | map(),
no_duplicates: boolean(),
strict_conversions: boolean(),
context: nil | map(),
include_path: list(),
path: nil | binary(),
root_dir: nil | binary(),
parent: nil | pid(),
cache: nil | map(),
string_converter: function()
}
defp error(reason, loc, detail) do
{:error, RecognizerError.exception(reason, loc, detail)}
end
# Public for testing only
@doc false
def is_identifier(s) do
Regex.match?(~r/^(?!\d)(\w+)$/u, s)
end
defp tuple_to_string(t) do
parts =
Enum.reduce(Tuple.to_list(t), [], fn item, parts ->
s =
case item do
nt when is_tuple(nt) -> tuple_to_string(nt)
m when is_map(m) -> map_to_string(m)
_ -> to_string(item)
end
parts ++ [s]
end)
"[#{Enum.join(parts, ", ")}]"
end
defp map_to_string(m) do
parts =
Enum.reduce(m, [], fn {k, v}, parts ->
s =
case v do
nt when is_tuple(nt) -> tuple_to_string(nt)
m when is_map(m) -> map_to_string(m)
_ -> to_string(v)
end
parts ++ ["#{k}: #{s}"]
end)
"{#{Enum.join(parts, ", ")}}"
end
defp default_string_converter(s, cfg) do
iso_datetime_pattern =
~r/^(\d{4})-(\d{2})-(\d{2})(([ T])(((\d{2}):(\d{2}):(\d{2}))(\.\d{1,6})?(([+-])(\d{2}):(\d{2})(:(\d{2})(\.\d{1,6})?)?)?))?$/
env_value_pattern = ~r/^\$(\w+)(\|(.*))?$/
colon_object_pattern = ~r/^([A-Za-z_]\w*(\.[A-Za-z_]\w*)*)(:([A-Za-z_]\w*))?$/
interpolation_pattern = ~r/\$\{([^}]+)\}/
m = Regex.run(iso_datetime_pattern, s)
if !is_nil(m) do
mlen = length(m)
m = List.to_tuple(m)
has_time = mlen > 4
{year, _} = Integer.parse(elem(m, 1))
{month, _} = Integer.parse(elem(m, 2))
{day, _} = Integer.parse(elem(m, 3))
{:ok, date} = Date.new(year, month, day)
if !has_time do
date
else
{hour, _} = Integer.parse(elem(m, 8))
{minute, _} = Integer.parse(elem(m, 9))
{second, _} = Integer.parse(elem(m, 10))
microsecond =
if mlen < 12 || elem(m, 11) == "" do
0
else
{f, _} = Float.parse("0" <> elem(m, 11))
round(f * 1.0e6)
end
has_offset = mlen > 13
{:ok, time} = Time.new(hour, minute, second, microsecond)
{:ok, ndt} = NaiveDateTime.new(date, time)
{:ok, date_time} = DateTime.from_naive(ndt, "Etc/UTC")
if !has_offset do
date_time
else
sign =
if elem(m, 13) == "-" do
-1
else
1
end
{ohour, _} = Integer.parse(elem(m, 14))
{ominute, _} = Integer.parse(elem(m, 15))
osecond =
if mlen < 17 do
0
else
{os, _} = Integer.parse(elem(m, 17))
os
end
offset = osecond + 60 * ominute + 3600 * ohour
%{date_time | utc_offset: sign * offset}
end
end
else
m = Regex.run(env_value_pattern, s)
if !is_nil(m) do
mlen = length(m)
m = List.to_tuple(m)
has_pipe = mlen > 2
dv =
if !has_pipe do
nil
else
elem(m, 3)
end
System.get_env(elem(m, 1), dv)
else
m = Regex.run(colon_object_pattern, s)
# Logger.debug("tested colon object pattern: #{inspect m}")
if !is_nil(m) do
m = List.to_tuple(m)
try do
mod = String.to_existing_atom(elem(m, 1))
func = String.to_existing_atom(elem(m, 4))
apply(mod, func, [])
rescue
_ -> s
end
else
m = Regex.match?(interpolation_pattern, s)
if !m do
s
else
m = Regex.scan(interpolation_pattern, s, return: :index)
# Logger.debug("#{__ENV__.line}: #{inspect m}")
{pos, parts, failed} =
Enum.reduce_while(m, {0, [], false}, fn m, {pos, parts, _} ->
[{ostart, olen}, {istart, ilen}] = m
parts =
if pos < ostart do
parts ++ [String.slice(s, pos, ostart - pos)]
else
parts
end
expr = String.slice(s, istart, ilen)
v = Config.get(cfg, expr)
case v do
{:error, _e} ->
# Logger.debug("#{__ENV__.line}: #{expr}: #{inspect(_e)}")
{:halt, {pos, parts, true}}
{:ok, v} ->
sv =
case v do
t when is_tuple(t) ->
tuple_to_string(t)
m when is_map(m) ->
map_to_string(m)
_ ->
to_string(v)
end
{:cont, {ostart + olen, parts ++ [sv], false}}
end
end)
parts =
if !failed && pos < String.length(s) do
parts ++ [String.slice(s, pos..-1)]
else
parts
end
# Logger.debug("#{__ENV__.line}: #{inspect({pos, parts, failed})}")
if failed do
s
else
Enum.join(parts)
end
end
end
end
end
end
@doc "Return a new, empty configuration with specified options."
@spec new(map()) :: {atom(), pid()}
def new(options \\ %{}) do
Agent.start(fn ->
%Config{
no_duplicates: Map.get(options, :no_duplicates, true),
strict_conversions: Map.get(options, :strict_conversions, true),
context: Map.get(options, :context),
include_path: Map.get(options, :include_path, []),
string_converter: Map.get(options, :string_converter, &default_string_converter/2)
}
end)
end
# defp name(this) do
# Path.basename(Agent.get(this, fn state -> state.path end))
# end
defp wrap_mapping(this, mn) do
no_dupes = Agent.get(this, fn state -> state.no_duplicates end)
{:ok, pid} = Agent.start(fn -> %{error: nil, data: %{}} end)
_ =
Enum.reduce_while(mn.elements, pid, fn elem, pid ->
{key, value} = elem
data = Agent.get(pid, fn state -> state.data end)
kv =
if key.kind == :WORD do
key.text
else
key.value
end
if no_dupes && Map.has_key?(data, kv) do
e = RecognizerError.exception(:duplicate_key, key.start, kv)
Agent.update(pid, fn state -> %{state | error: e} end)
{:halt, pid}
else
data = Map.put(data, kv, value)
Agent.update(pid, fn state -> %{state | data: data} end)
{:cont, pid}
end
end)
state = Agent.get(pid, fn state -> state end)
Agent.stop(pid)
# Logger.debug("#{__ENV__.line} #{inspect(state)}")
if is_nil(state.error) do
{:ok, state.data}
else
{:error, state.error}
end
end
defp load(this, stream) do
{:ok, p} = Parser.new(stream)
v = Parser.container(p)
case v do
{:error, _} ->
v
{:ok, mn = %MappingNode{}} ->
v = wrap_mapping(this, mn)
case v do
{:error, _} ->
v
{:ok, data} ->
Agent.update(this, fn state -> %{state | data: data} end)
{:ok, this}
end
{:ok, other} ->
error(:must_be_mapping, other.start, other)
end
end
@doc "Load this configuration from a file, given its path."
@spec load_file(pid(), binary()) :: {atom(), any()}
def load_file(this, path) do
{:ok, stream} = File.open(path, [:read, :utf8])
v = load(this, stream)
case v do
{:error, _} ->
v
{:ok, this} ->
Config.set_path(this, path)
v
end
end
defp from_stream(stream) do
{:ok, this} = new()
load(this, stream)
end
@doc "Return a configuration from its source."
@spec from_source(binary()) :: {atom(), any()}
def from_source(s) do
{:ok, stream} = StringIO.open(s)
from_stream(stream)
end
@doc "Return a configuration from a file, given its path."
@spec from_file(binary()) :: {atom(), any()}
def from_file(path) do
# Logger.debug("About to load: #{path}")
{:ok, stream} = File.open(path, [:read, :utf8])
v = from_stream(stream)
# Logger.debug("#{__ENV__.line}: #{inspect v}")
case v do
{:error, _} ->
v
{:ok, this} ->
Config.set_path(this, path)
v
end
end
@doc """
Set the path from which this configuration was loaded.
This is also used to determine the directory searched for included configurations.
"""
@spec set_path(pid(), binary()) :: atom()
def set_path(this, p) do
rd = Path.dirname(p)
Agent.update(this, fn state -> %{state | path: p, root_dir: rd} end)
end
@doc "See whether this configuration allows duplicates."
@spec get_no_duplicates(pid()) :: boolean()
def get_no_duplicates(this) do
Agent.get(this, fn state -> state.no_duplicates end)
end
@doc "Set whether this configuration allows duplicates."
@spec set_no_duplicates(pid(), boolean()) :: atom()
def set_no_duplicates(this, no_dupes) do
Agent.update(this, fn state -> %{state | no_duplicates: no_dupes} end)
end
@doc "Append or prepend a list of directories to the include path of this configuration."
@spec add_include(pid(), [binary()], boolean()) :: atom()
def add_include(this, path, append \\ true) when is_list(path) do
ip = Agent.get(this, fn state -> state.include_path end)
new_ip =
if append do
ip ++ path
else
path ++ ip
end
Agent.update(this, fn state -> %{state | include_path: new_ip} end)
end
@doc "Get the include path of this configuration."
@spec get_include(pid()) :: [binary()]
def get_include(this) do
Agent.get(this, fn state -> state.include_path end)
end
@doc "Set the include path of this configuration to the specified list of directories."
@spec set_include(pid(), [binary()]) :: atom()
def set_include(this, path) when is_list(path) do
Agent.update(this, fn state -> %{state | include_path: path} end)
end
# public for tests only
@doc false
def parse_path(s) do
v = Parser.from_source(s)
case v do
{:error, _} ->
v
{:ok, p} ->
t = Parser.next_token(p)
if t.kind != :WORD do
error(:invalid_path, t.start, s)
else
v = Parser.primary(p)
case v do
{:error, _} ->
v
_ ->
if Parser.at_end(p) do
v
else
t = Parser.next_token(p)
error(:invalid_path_extra, t.start, s)
end
end
end
end
end
# public for tests only
@doc false
def to_source(node) do
case node do
%Token{} ->
if node.kind == :WORD do
node.text
else
to_string(node.value)
end
%BinaryNode{} ->
path = unpack_path(node)
first = List.first(path)
parts = [List.last(first).text]
parts =
if length(path) == 1 do
parts
else
[_ | rest] = path
Enum.reduce(rest, parts, fn item, parts ->
case item do
[:DOT, t = %Token{kind: :WORD}] ->
parts ++ [".", t.text]
[:LBRACK, indexpr] ->
parts ++ ["[", to_source(indexpr), "]"]
[:COLON, sn = %SliceNode{}] ->
addend =
["["] ++
if is_nil(sn.start_index) do
[":"]
else
[to_source(sn.start_index), ":"]
end
addend =
addend ++
if is_nil(sn.stop_index) do
[]
else
[to_source(sn.stop_index)]
end
addend =
addend ++
if is_nil(sn.step) do
[]
else
[":", to_source(sn.step)]
end
parts ++ addend ++ ["]"]
end
end)
end
Enum.join(parts)
_ ->
{:ok, to_string(node)}
end
end
defp visit(pid, node) do
case node do
t = %Token{} ->
Agent.update(pid, fn state -> state ++ [[:DOT, t]] end)
%UnaryNode{operand: o} ->
visit(pid, o)
%BinaryNode{kind: k, lhs: lhs, rhs: rhs} ->
visit(pid, lhs)
Agent.update(pid, fn state -> state ++ [[k, rhs]] end)
end
end
# public for tests only
@doc false
def unpack_path(node) do
{:ok, pid} = Agent.start(fn -> [] end)
visit(pid, node)
result = Agent.get(pid, fn state -> state end)
Agent.stop(pid)
result
end
@doc "See whether this configuration uses a cache."
@spec is_cached(pid()) :: boolean()
def is_cached(this) do
Agent.get(this, fn state -> !is_nil(state.cache) end)
end
@doc "Set whether this configuration uses a cache."
@spec set_cached(pid(), boolean()) :: no_return()
def set_cached(this, cached) do
state = Agent.get(this, fn state -> state end)
cond do
cached && is_nil(state.cache) ->
Agent.update(this, fn state -> %{state | cache: Map.new()} end)
!cached && !is_nil(state.cache) ->
Agent.update(this, fn state -> %{state | cache: nil} end)
end
end
@doc """
Get a value from this configuration by key or path, with an optional default value if not found.
If no default is specified and a value isn't found, an error will be returned.
"""
@spec get(pid(), binary(), any()) :: tuple()
def get(this, key, default \\ :MISSING) do
state =
Agent.get_and_update(this, fn state -> {state, %{state | refs_seen: MapSet.new()}} end)
result =
cond do
!is_nil(state.cache) && Map.has_key?(state.cache, key) ->
{:ok, Map.get(state.cache, key)}
is_nil(state.data) ->
error(:no_configuration, %Location{}, this)
Map.has_key?(state.data, key) ->
evaluated(this, Map.get(state.data, key))
is_identifier(key) ->
if default == :MISSING do
# Logger.debug("#{__ENV__.line}: #{key} not in #{inspect(Map.keys(state.data))}")
error(:not_found, %Location{}, key)
else
{:ok, default}
end
true ->
v = get_from_path(this, key)
case v do
{:ok, _} ->
v
{:error, e} ->
if default == :MISSING do
v
else
case e do
%RecognizerError{reason: :not_found} -> {:ok, default}
_ -> v
end
end
end
end
# Logger.debug("#{__ENV__.line}: get: #{name(this)}: #{key} -> #{inspect(result)}")
result
end
defp shallow_unwrap(this, v) do
result =
case v do
%MappingNode{} = mn ->
wrap_mapping(this, mn)
%ListNode{} = ln ->
{:ok, List.to_tuple(ln.elements)}
%Token{} = t ->
evaluate(this, t)
%UnaryNode{} ->
evaluate(this, v)
%BinaryNode{} ->
evaluate(this, v)
_ ->
{:ok, v}
end
# Logger.debug("#{__ENV__.line}: s_u: #{name(this)}: #{inspect(result)}")
result
end
defp map_access(this, map, key, start) do
result =
if !Map.has_key?(map, key) do
# Logger.debug("#{__ENV__.line}: #{key} not in #{inspect(Map.keys(map))}")
error(:not_found, start, key)
else
shallow_unwrap(this, Map.get(map, key))
end
# Logger.debug("#{__ENV__.line}: m_a: #{name(this)}: #{key} -> #{inspect(result)}")
result
end
defp flag_error(pid, e) do
Agent.update(pid, fn state -> %{state | error: e} end)
{:halt, pid}
end
defp flag_error(pid, reason, loc, detail) do
flag_error(pid, RecognizerError.exception(reason, loc, detail))
end
defp evaluate_optional(this, expr, default) do
if is_nil(expr) do
{:ok, default}
else
evaluate(this, expr)
end
end
defp collect_slice(container, start, stop, step, result) do
done =
if step > 0 do
start >= stop
else
start <= stop
end
# Logger.debug("#{__ENV__.line}: #{start}, #{stop}, #{step}, #{inspect result}, #{done}")
if done do
result
else
collect_slice(container, start + step, stop, step, result ++ [elem(container, start)])
end
end
defp get_slice(this, container, sn) when is_tuple(container) do
start_index = evaluate_optional(this, sn.start_index, 0)
case start_index do
{:error, _} ->
start_index
{:ok, start} ->
size = tuple_size(container)
start =
if start < 0 do
if start >= -size do
start + size
else
0
end
else
start
end
stop_index = evaluate_optional(this, sn.stop_index, size)
case stop_index do
{:error, _} ->
stop_index
{:ok, stop} ->
stop =
if stop > size do
size
else
stop
end
stop =
if stop < 0 do
if stop >= -size do
stop + size
else
0
end
else
stop
end
step = evaluate_optional(this, sn.step, 1)
case step do
{:error, _} ->
step
{:ok, 0} ->
error(:invalid_step, sn.step.start, 0)
{:ok, step} ->
{start, stop} =
if step < 0 && start < stop do
stop =
if stop >= size do
size - 1
else
stop
end
start =
if start == 0 do
-1
else
start
end
{stop, start}
else
{start, stop}
end
{:ok, List.to_tuple(collect_slice(container, start, stop, step, []))}
end
end
end
end
@doc "See if the specified value is a configuration."
@spec is_config(any()) :: boolean()
def is_config(v) do
if !is_pid(v) do
false
else
state = Agent.get(v, fn state -> state end)
case state do
%Config{} -> true
_ -> false
end
end
end
defp get_from_path(this, key) when is_binary(key) do
v = parse_path(key)
case v do
{:error, _} ->
v
{:ok, node} ->
get_from_path(this, node)
end
end
defp get_from_path(this, node) do
path = unpack_path(node)
first = List.first(path)
kind = List.first(first)
if kind != :DOT do
error(:unexpected_path_start, node.start, first)
else
data = Agent.get(this, fn state -> state.data end)
{:ok, pid} = Agent.start(fn -> %{error: nil, current: data, config: this} end)
# iterate over the path elements
_ =
Enum.reduce_while(path, pid, fn item, pid ->
{current, config} = Agent.get(pid, fn state -> {state.current, state.config} end)
# Logger.debug(
# "#{__ENV__.line}: pit: #{name(config)}: item = #{inspect(item)}, current = #{
# inspect(current)
# }"
# )
case item do
[:DOT, t = %Token{kind: :WORD}] ->
# attribute access
{key, start} = {t.text, t.start}
case current do
%{} ->
v = map_access(config, current, key, start)
case v do
{:error, e} ->
flag_error(pid, e)
{:ok, v} ->
cfg =
if is_config(v) do
v
else
config
end
Agent.update(pid, fn state -> %{state | current: v, config: cfg} end)
{:cont, pid}
end
_ ->
if !is_config(current) do
flag_error(pid, :invalid_container, start, {key, current})
else
v = Config.get(current, key)
case v do
{:error, e} ->
flag_error(pid, e)
{:ok, v} ->
cfg =
if is_config(v) do
v
else
config
end
Agent.update(pid, fn state -> %{state | current: v, config: cfg} end)
{:cont, pid}
end
end
end
[:LBRACK, indexpr] ->
# indexed access
v = evaluate(config, indexpr)
case v do
{:error, e} ->
flag_error(pid, e)
{:ok, iv} ->
cond do
is_binary(iv) ->
case current do
%{} ->
v = map_access(config, current, iv, indexpr.start)
case v do
{:error, e} ->
flag_error(pid, e)
{:ok, v} ->
Agent.update(pid, fn state -> %{state | current: v} end)
{:cont, pid}
end
_ ->
if !is_config(current) do
flag_error(pid, :invalid_container, indexpr.start, {iv, current})
else
v = Config.get(current, iv)
case v do
{:error, e} ->
flag_error(pid, e)
{:ok, v} ->
Agent.update(pid, fn state -> %{state | current: v} end)
{:cont, pid}
end
end
end
is_integer(iv) ->
case current do
t when is_tuple(t) ->
size = tuple_size(t)
index =
if iv < 0 do
size + iv
else
iv
end
if index < 0 || index >= size do
flag_error(pid, :invalid_index, indexpr.start, {index, current})
else
v = shallow_unwrap(config, elem(t, index))
case v do
{:error, e} ->
flag_error(pid, e)
{:ok, v} ->
Agent.update(pid, fn state -> %{state | current: v} end)
{:cont, pid}
end
end
_ ->
# Logger.debug("#{__ENV__.line}: #{inspect(current)}")
flag_error(pid, :invalid_container, indexpr.start, {iv, current})
end
true ->
flag_error(pid, :invalid_index, indexpr.start, v)
end
end
[:COLON, sn = %SliceNode{}] ->
# slice access
case current do
t when is_tuple(t) ->
v = get_slice(this, t, sn)
case v do
{:error, e} ->
flag_error(pid, e)
{:ok, v} ->
Agent.update(pid, fn state -> %{state | current: v} end)
{:cont, pid}
end
_ ->
flag_error(pid, :invalid_container, sn.start, {nil, current})
end
_ ->
flag_error(pid, :not_implemented, __ENV__.line, item)
end
end)
state = Agent.get(pid, fn state -> state end)
Agent.stop(pid)
Agent.update(this, fn state -> %{state | refs_seen: MapSet.new()} end)
if is_nil(state.error) do
evaluated(state.config, state.current)
else
{:error, state.error}
end
end
end
defp evaluated(this, node) do
v =
case node do
%Token{} ->
evaluate(this, node)
%UnaryNode{} ->
v = evaluate(this, node)
# Logger.debug("#{__ENV__.line}: #{inspect v}")
v
%BinaryNode{} ->
evaluate(this, node)
%ComplexNum{} ->
{:ok, node}
%MappingNode{} ->
wrap_mapping(this, node)
%ListNode{} ->
as_list(this, node.elements)
t when is_list(t) ->
as_list(this, t)
_ ->
if is_integer(node) || is_float(node) || is_binary(node) || is_tuple(node) ||
is_map(node) || is_boolean(node) do
{:ok, node}
else
error(:cannot_evaluate, %Location{}, node)
end
end
result =
case v do
{:error, _} -> v
{:ok, v} -> unwrap(this, v)
end
result
end
defp same_file(p1, p2) do
s1 = Path.expand(p1)
s2 = Path.expand(p2)
case :os.type() do
{:unix, _} ->
String.equivalent?(s1, s2)
{:win32, _} ->
String.equivalent?(String.downcase(s1), String.downcase(s2))
end
end
defp find_include(this, fname, start) do
state = Agent.get(this, fn state -> state end)
{found, path} =
if Path.type(fname) == :absolute do
{File.exists?(fname), fname}
else
d =
if is_nil(state.root_dir) do
File.cwd()
else
state.root_dir
end
p = Path.join(d, fname)
if File.exists?(p) do
{true, p}
else
# Logger.debug("#{__ENV__.line}: not found: #{p}")
Enum.reduce_while(state.include_path, {false, fname}, fn d, acc ->
p = Path.join(d, fname)
if File.exists?(p) do
{:halt, {true, p}}
else
# Logger.debug("#{__ENV__.line}: not found: #{p}")
{:cont, acc}
end
end)
end
end
if !found do
error(:cannot_evaluate, start, fname)
else
if state.path != nil and same_file(state.path, path) do
error(:cannot_include_self, start, Path.basename(path))
else
v = Parser.from_file(path)
case v do
{:error, _} ->
v
{:ok, p} ->
v = Parser.container(p)
case v do
{:error, _} ->
v
{:ok, node} ->
case node do
mn = %MappingNode{} ->
# Create a new child config
{:ok, child} = Config.new(state)
# Logger.debug("#{__ENV__.line}: created child #{inspect child}")
set_path(child, path)
if is_cached(this) do
set_cached(child, true)
end
v = wrap_mapping(child, mn)
# Logger.debug("#{__ENV__.line}: #{inspect v}")
case v do
{:error, _} ->
v
{:ok, data} ->
Agent.update(child, fn state -> %{state | parent: this, data: data} end)
{:ok, child}
end
_ ->
v
end
end
end
end
end
end
defp eval_at(this, node) do
v = evaluate(this, node)
case v do
{:error, _} ->
v
{:ok, key} ->
if !is_binary(key) do
error(:string_expected, node.start, key)
else
v = find_include(this, key, node.start)
# Logger.debug("#{__ENV__.line}: #{inspect v}")
case v do
{:error, nil} ->
error(:include_not_found, node.start, key)
_ ->
# Logger.debug("#{__ENV__.line}: #{inspect v}")
v
end
end
end
end
defp is_complex(v) do
case v do
%ComplexNum{} -> true
_ -> false
end
end
defp eval_add(this, node = %UnaryNode{}) do
evaluate(this, node.operand)
end
defp eval_add(this, node = %BinaryNode{}) do
v = evaluate(this, node.lhs)
case v do
{:error, _} ->
v
{:ok, lhs} ->
# Logger.debug("#{__ENV__.line}: #{inspect node.rhs}")
v = evaluate(this, node.rhs)
case v do
{:error, _} ->
v
{:ok, rhs} ->
cond do
is_number(lhs) && is_number(rhs) ->
{:ok, lhs + rhs}
is_binary(lhs) && is_binary(rhs) ->
{:ok, lhs <> rhs}
is_complex(lhs) || is_complex(rhs) ->
cond do
is_complex(lhs) && is_complex(rhs) ->
{:ok, Complex.add(lhs, rhs)}
is_number(lhs) ->
{:ok, Complex.add(Complex.new(lhs, 0), rhs)}
is_number(rhs) ->
{:ok, Complex.add(lhs, Complex.new(rhs, 0))}
true ->
error(:cannot_add, node.start, {lhs, rhs})
end
is_tuple(lhs) && is_tuple(rhs) ->
{:ok, List.to_tuple(Tuple.to_list(lhs) ++ Tuple.to_list(rhs))}
is_map(lhs) && is_map(rhs) ->
{:ok, MapUtils.deep_merge(lhs, rhs)}
true ->
error(:cannot_add, node.start, {lhs, rhs})
end
end
end
end
defp eval_subtract(this, node = %UnaryNode{}) do
v = evaluate(this, node.operand)
case v do
{:error, _} ->
v
{:ok, operand} ->
cond do
is_number(operand) -> {:ok, -operand}
is_complex(operand) -> {:ok, Complex.minus(operand)}
true -> error(:cannot_negate, node.start, operand)
end
end
end
defp eval_subtract(this, node = %BinaryNode{}) do
# Logger.debug("#{__ENV__.line}: #{inspect node}")
v = evaluate(this, node.lhs)
case v do
{:error, _} ->
v
{:ok, lhs} ->
v = evaluate(this, node.rhs)
case v do
{:error, _} ->
v
{:ok, rhs} ->
# Logger.debug("#{__ENV__.line}: #{inspect lhs} - #{inspect rhs}")
cond do
is_number(lhs) && is_number(rhs) ->
{:ok, lhs - rhs}
is_complex(lhs) || is_complex(rhs) ->
cond do
is_complex(lhs) && is_complex(rhs) ->
# Logger.debug("#{__ENV__.line}: #{inspect lhs} - #{inspect rhs}")
{:ok, Complex.sub(lhs, rhs)}
is_number(lhs) ->
{:ok, Complex.sub(Complex.new(lhs, 0), rhs)}
is_number(rhs) ->
{:ok, Complex.sub(lhs, Complex.new(rhs, 0))}
true ->
error(:cannot_subtract, node.start, {lhs, rhs})
end
is_map(lhs) && is_map(rhs) ->
{:ok, Map.drop(lhs, Map.keys(rhs))}
true ->
error(:cannot_subtract, node.start, {lhs, rhs})
end
end
end
end
defp eval_multiply(this, node = %BinaryNode{}) do
v = evaluate(this, node.lhs)
case v do
{:error, _} ->
v
{:ok, lhs} ->
v = evaluate(this, node.rhs)
case v do
{:error, _} ->
v
{:ok, rhs} ->
cond do
is_number(lhs) && is_number(rhs) ->
{:ok, lhs * rhs}
is_complex(lhs) || is_complex(rhs) ->
cond do
is_complex(lhs) && is_complex(rhs) ->
{:ok, Complex.mult(lhs, rhs)}
is_number(lhs) ->
{:ok, Complex.mult(Complex.new(lhs, 0), rhs)}
is_number(rhs) ->
{:ok, Complex.mult(lhs, Complex.new(rhs, 0))}
true ->
error(:cannot_multiply, __ENV__.line, {lhs, rhs})
end
true ->
error(:not_implemented, __ENV__.line, {lhs, rhs})
end
end
end
end
defp eval_divide(this, node = %BinaryNode{}) do
v = evaluate(this, node.lhs)
case v do
{:error, _} ->
v
{:ok, lhs} ->
v = evaluate(this, node.rhs)
case v do
{:error, _} ->
v
{:ok, rhs} ->
cond do
is_number(lhs) && is_number(rhs) ->
{:ok, lhs / rhs}
is_complex(lhs) || is_complex(rhs) ->
cond do
is_complex(lhs) && is_complex(rhs) ->
{:ok, Complex.div(lhs, rhs)}
is_number(lhs) ->
{:ok, Complex.div(Complex.new(lhs, 0), rhs)}
is_number(rhs) ->
{:ok, Complex.div(lhs, Complex.new(rhs, 0))}
true ->
error(:cannot_divide, __ENV__.line, {lhs, rhs})
end
true ->
error(:not_implemented, __ENV__.line, {lhs, rhs})
end
end
end
end
defp eval_integer_divide(this, node = %BinaryNode{}) do
v = evaluate(this, node.lhs)
case v do
{:error, _} ->
v
{:ok, lhs} ->
v = evaluate(this, node.rhs)
case v do
{:error, _} ->
v
{:ok, rhs} ->
if is_integer(lhs) && is_integer(rhs) do
{:ok, div(lhs, rhs)}
else
error(:cannot_integer_divide, __ENV__.line, {lhs, rhs})
end
end
end
end
defp eval_modulo(this, node = %BinaryNode{}) do
v = evaluate(this, node.lhs)
case v do
{:error, _} ->
v
{:ok, lhs} ->
v = evaluate(this, node.rhs)
case v do
{:error, _} ->
v
{:ok, rhs} ->
if is_integer(lhs) && is_integer(rhs) do
{:ok, Integer.mod(lhs, rhs)}
else
error(:cannot_compute_modulo, node.start, {lhs, rhs})
end
end
end
end
defp eval_left_shift(this, node = %BinaryNode{}) do
v = evaluate(this, node.lhs)
case v do
{:error, _} ->
v
{:ok, lhs} ->
v = evaluate(this, node.rhs)
case v do
{:error, _} ->
v
{:ok, rhs} ->
if is_integer(lhs) && is_integer(rhs) do
{:ok, lhs <<< rhs}
else
error(:cannot_left_shift, node.start, {lhs, rhs})
end
end
end
end
defp eval_right_shift(this, node = %BinaryNode{}) do
v = evaluate(this, node.lhs)
case v do
{:error, _} ->
v
{:ok, lhs} ->
v = evaluate(this, node.rhs)
case v do
{:error, _} ->
v
{:ok, rhs} ->
if is_integer(lhs) && is_integer(rhs) do
{:ok, lhs >>> rhs}
else
error(:cannot_right_shift, node.start, {lhs, rhs})
end
end
end
end
defp eval_power(this, node = %BinaryNode{}) do
v = evaluate(this, node.lhs)
case v do
{:error, _} ->
v
{:ok, lhs} ->
v = evaluate(this, node.rhs)
case v do
{:error, _} ->
v
{:ok, rhs} ->
cond do
is_number(lhs) && is_number(rhs) ->
{:ok, Pow.pow(lhs, rhs)}
is_complex(lhs) && is_integer(rhs) ->
{:ok, Complex.pow(lhs, rhs)}
true ->
error(:cannot_raise_to_power, node.start, {lhs, rhs})
end
end
end
end
defp eval_bitor(this, node = %BinaryNode{}) do
v = evaluate(this, node.lhs)
case v do
{:error, _} ->
v
{:ok, lhs} ->
v = evaluate(this, node.rhs)
case v do
{:error, _} ->
v
{:ok, rhs} ->
cond do
is_integer(lhs) && is_integer(rhs) ->
{:ok, lhs ||| rhs}
is_map(lhs) && is_map(rhs) ->
{:ok, MapUtils.deep_merge(lhs, rhs)}
true ->
error(:cannot_bitwise_or, __ENV__.line, {lhs, rhs})
end
end
end
end
defp eval_bitand(this, node = %BinaryNode{}) do
v = evaluate(this, node.lhs)
case v do
{:error, _} ->
v
{:ok, lhs} ->
v = evaluate(this, node.rhs)
case v do
{:error, _} ->
v
{:ok, rhs} ->
cond do
is_integer(lhs) && is_integer(rhs) ->
{:ok, lhs &&& rhs}
is_map(lhs) && is_map(rhs) ->
{:ok, Map.drop(lhs, Map.keys(rhs))}
true ->
error(:cannot_bitwise_and, __ENV__.line, {lhs, rhs})
end
end
end
end
defp eval_bitxor(this, node = %BinaryNode{}) do
v = evaluate(this, node.lhs)
case v do
{:error, _} ->
v
{:ok, lhs} ->
v = evaluate(this, node.rhs)
case v do
{:error, _} ->
v
{:ok, rhs} ->
cond do
is_integer(lhs) && is_integer(rhs) ->
{:ok, bxor(lhs, rhs)}
is_map(lhs) && is_map(rhs) ->
{:ok, Map.drop(lhs, Map.keys(rhs))}
true ->
error(:cannot_bitwise_xor, __ENV__.line, {lhs, rhs})
end
end
end
end
defp eval_logical_or(this, node = %BinaryNode{}) do
v = evaluate(this, node.lhs)
case v do
{:error, _} ->
v
{:ok, lhs} ->
if lhs do
{:ok, true}
else
v = evaluate(this, node.rhs)
case v do
{:error, _} ->
v
{:ok, rhs} ->
{:ok, !!rhs}
end
end
end
end
defp eval_logical_and(this, node = %BinaryNode{}) do
v = evaluate(this, node.lhs)
case v do
{:error, _} ->
v
{:ok, lhs} ->
if !lhs do
{:ok, false}
else
v = evaluate(this, node.rhs)
case v do
{:error, _} ->
v
{:ok, rhs} ->
{:ok, !!rhs}
end
end
end
end
defp eval_reference(this, node) do
refs_seen = Agent.get(this, fn state -> state.refs_seen end)
if MapSet.member?(refs_seen, node) do
sorted =
Enum.sort(refs_seen, fn node1, node2 ->
cond do
node1.start.line > node2.start.line ->
false
node1.start.line == node2.start.line && node1.start.column > node2.start.column ->
false
true ->
true
end
end)
sorted =
Enum.map(sorted, fn node ->
{node.start, to_source(node.operand)}
end)
error(:circular_reference, node.start, sorted)
else
refs_seen = MapSet.put(refs_seen, node)
# Logger.debug("#{__ENV__.line}: #{inspect(refs_seen)}")
Agent.update(this, fn state -> %{state | refs_seen: refs_seen} end)
get_from_path(this, node.operand)
end
end
defp evaluate(this, node) do
# Logger.debug("#{__ENV__.line}: eva? #{name(this)}: #{inspect(node)}")
state = Agent.get(this, fn state -> state end)
result =
case node do
%Token{} ->
if MapSet.member?(state.scalar_tokens, node.kind) do
{:ok, node.value}
else
case node.kind do
:WORD ->
key = node.text
if !is_nil(state.context) && Map.has_key?(state.context, key) do
{:ok, Map.get(state.context, key)}
else
error(:unknown_variable, node.start, key)
end
:BACKTICK ->
convert_string(this, node)
end
end
%MappingNode{} ->
v = wrap_mapping(this, node)
case v do
{:error, _} -> v
{:ok, v} -> as_dict(this, v)
end
%ListNode{} ->
as_list(this, node.elements)
_ ->
case node.kind do
:AT ->
eval_at(this, node.operand)
:PLUS ->
eval_add(this, node)
:MINUS ->
eval_subtract(this, node)
:STAR ->
eval_multiply(this, node)
:SLASH ->
eval_divide(this, node)
:SLASHSLASH ->
eval_integer_divide(this, node)
:MODULO ->
eval_modulo(this, node)
:POWER ->
eval_power(this, node)
:DOLLAR ->
eval_reference(this, node)
:BITOR ->
eval_bitor(this, node)
:BITAND ->
eval_bitand(this, node)
:BITXOR ->
eval_bitxor(this, node)
:LSHIFT ->
eval_left_shift(this, node)
:RSHIFT ->
eval_right_shift(this, node)
:AND ->
eval_logical_and(this, node)
:OR ->
eval_logical_or(this, node)
_ ->
error(:not_implemented, __ENV__.line, node)
end
end
# Logger.debug("#{__ENV__.line}: eva! #{name(this)}: #{inspect(result)}")
result
end
defp convert_string(this, node) do
state = Agent.get(this, fn state -> state end)
s = node.value
v = state.string_converter.(s, this)
# Logger.debug("convert_string: #{s} -> #{inspect(v)}")
if s == v && state.strict_conversions do
error(:conversion_failure, node.start, s)
else
{:ok, v}
end
end
defp unwrap(this, v) do
# Logger.debug("#{__ENV__.line}: unw? #{name(this)}: #{inspect(v, structs: false)}")
result =
case v do
s when is_binary(s) ->
{:ok, s}
i when is_integer(i) ->
{:ok, i}
f when is_float(f) ->
{:ok, f}
b when is_boolean(b) ->
{:ok, b}
%DateTime{} ->
{:ok, v}
c = %ComplexNum{} ->
{:ok, c}
mn = %MappingNode{} ->
v = wrap_mapping(this, mn)
case v do
{:error, _} -> v
{:ok, v} -> as_dict(this, v)
end
ln = %ListNode{} ->
as_list(this, ln.elements)
%{} ->
as_dict(this, v)
t when is_tuple(t) ->
as_list(this, t)
t when is_list(t) ->
as_list(this, t)
_ ->
if is_config(v) do
{:ok, v}
else
error(:not_implemented, __ENV__.line, v)
end
end
# Logger.debug("#{__ENV__.line}: unw! #{name(this)}: #{inspect result}")
result
end
defp as_list(this, elements) when is_list(elements) do
{:ok, pid} = Agent.start(fn -> %{data: [], error: nil} end)
_ =
Enum.reduce_while(elements, pid, fn value, pid ->
ev = evaluated(this, value)
case ev do
{:error, e} ->
Agent.update(pid, fn state -> %{state | error: e} end)
{:halt, pid}
{:ok, v} ->
rv = unwrap(this, v)
case rv do
{:error, e} ->
Agent.update(pid, fn state -> %{state | error: e} end)
{:halt, pid}
{:ok, result} ->
data = Agent.get(pid, fn state -> state.data end)
new_data = data ++ [result]
Agent.update(pid, fn state -> %{state | data: new_data} end)
{:cont, pid}
end
end
end)
state = Agent.get(pid, fn state -> state end)
Agent.stop(pid)
if is_nil(state.error) do
{:ok, List.to_tuple(state.data)}
else
{:error, state.error}
end
end
defp as_list(this, elements) when is_tuple(elements) do
as_list(this, Tuple.to_list(elements))
end
defp as_dict(this, d) when is_map(d) do
{:ok, pid} = Agent.start(fn -> %{data: %{}, error: nil} end)
_ =
Enum.reduce_while(d, pid, fn {key, value}, pid ->
ev = evaluated(this, value)
# Logger.debug("#{__ENV__.line}: #{key} -> #{inspect ev}")
case ev do
{:error, e} ->
Agent.update(pid, fn state -> %{state | error: e} end)
{:halt, pid}
{:ok, v} ->
rv = unwrap(this, v)
case rv do
{:error, e} ->
Agent.update(pid, fn state -> %{state | error: e} end)
{:halt, pid}
{:ok, result} ->
data = Agent.get(pid, fn state -> state.data end)
new_data = Map.put(data, key, result)
Agent.update(pid, fn state -> %{state | data: new_data} end)
{:cont, pid}
end
end
{:cont, pid}
end)
state = Agent.get(pid, fn state -> state end)
Agent.stop(pid)
if is_nil(state.error) do
{:ok, state.data}
else
{:error, state.error}
end
end
@doc "Return this configuration as a map, recursing into included configurations."
@spec as_dict(pid()) :: {atom(), any()}
def as_dict(this) do
state = Agent.get(this, fn state -> state end)
as_dict(this, state.data)
end
end
end
|
lib/cfg.ex
| 0.930443 | 0.77193 |
cfg.ex
|
starcoder
|
defmodule CastParams do
@moduledoc """
Plug for casting request params to defined types.
## Usage
```elixir
defmodule AccountController do
use AppWeb, :controller
use CastParams
# define params types
# :category_id - required integer param (raise CastParams.NotFound if not exists)
# :weight - float param, set nil if doesn't exists
cast_params category_id: :integer!, weight: :float
# defining for show action
# :name - is required string param
# :terms - is boolean param
cast_params name: :string!, terms: :boolean when action == :show
# received prepared params
def index(conn, %{"category_id" => category_id, "weight" => weight} = params) do
end
# received prepared params
def show(conn, %{"category_id" => category_id, "terms" => terms, "weight" => weight} = params) do
end
end
```
## Supported Types
Each type can ending with a `!` to mark the parameter as required.
* *`:boolean`*
* *`:integer`*
* *`:string`*
* *`:float`*
* *`:decimal`*
"""
alias CastParams.{Schema, Plug, Config}
@typedoc """
Options for use CastParams
"""
@type options :: [
nulify: boolean()
]
@spec __using__(options) :: no_return()
defmacro __using__(opts \\ []) do
quote do
@config Config.init(unquote(opts))
import CastParams
end
end
@doc """
Stores a plug to be executed as part of the plug pipeline.
"""
@spec cast_params(Schema.t()) :: Macro.t()
defmacro cast_params(schema)
defmacro cast_params({:when, _, [options, guards]}) do
cast_params(options, guards)
end
defmacro cast_params(options) do
{params, guards} = detect_attached_guards(options)
cast_params(params, guards)
end
defp cast_params(options, guards) do
schema = Schema.init(options)
result =
if guards do
quote location: :keep do
plug(Plug, {unquote(Macro.escape(schema)), @config} when unquote(guards))
end
else
quote location: :keep do
plug(Plug, {unquote(Macro.escape(schema)), @config})
end
end
# result
# |> IO.inspect
# |> Macro.to_string
# |> IO.puts
result
end
# detect attached guard to the end of options list
# `cast_params id: :integer when action == :index`
defp detect_attached_guards(args) do
Enum.reduce(args, {[], nil}, fn
{key, {:when, _env, [value, condition]}}, {ast, _guard} ->
{[{key, value} | ast], condition}
{key, value}, {ast, guard} ->
{[{key, value} | ast], guard}
end)
end
end
|
lib/cast_params.ex
| 0.821403 | 0.688403 |
cast_params.ex
|
starcoder
|
defmodule Cryptozaur.Utils do
alias Cryptozaur.{Repo, Connector}
@max_substitutions_in_prepare_statement 65535
defmacro amazing_success(value) do
quote do
{:ok, unquote(value)}
end
end
defmacro extreme_failure(reason) do
quote do
{:error, unquote(reason)}
end
end
def drop_milliseconds(datetime) do
with amazing_success(datetime_without_milliseconds) <- NaiveDateTime.from_erl(NaiveDateTime.to_erl(datetime)) do
datetime_without_milliseconds
else
:error -> :error
end
end
def seconds_to_millis(seconds) do
round(seconds * 1000)
end
def now do
NaiveDateTime.utc_now() |> drop_milliseconds()
end
def epoch do
~N[1970-01-01 00:00:00.000000]
end
def from_unix(timestamp) do
DateTime.to_naive(DateTime.from_unix!(timestamp))
end
def to_unix(datetime) do
DateTime.to_unix(DateTime.from_naive!(datetime, "Etc/UTC"))
end
def from_now(period) do
to = now()
from = NaiveDateTime.add(to, -period)
{from, to}
end
def get_strategy_module_by_type(type) do
String.to_existing_atom("Elixir.Cryptozaur.Strategies.#{type}")
end
def save_bulk(objects, insert_all_opts \\ [], transaction_opts \\ [timeout: :infinity, pool_timeout: :infinity])
def save_bulk([], _insert_all_opts, _transaction_opts), do: amazing_success(0)
def save_bulk([%{__struct__: _model} | _] = objects, insert_all_opts, transaction_opts) do
Repo.transaction(fn -> do_save_bulk(objects, insert_all_opts) end, transaction_opts)
end
def do_save_bulk([%{__struct__: model} | _] = objects, insert_all_opts) do
objects
|> Enum.map(&to_map_without_id/1)
|> Enum.chunk_every(chunk_size(model))
|> Enum.map(&Repo.insert_all(model, &1, insert_all_opts))
|> Enum.reduce(0, &(&2 + elem(&1, 0)))
end
def chunk_size(model) do
Integer.floor_div(@max_substitutions_in_prepare_statement, length(model.fields))
end
def align_to_resolution(naive_datetime, resolution) do
{:ok, datetime} = DateTime.from_naive(naive_datetime, "Etc/UTC")
timestamp = DateTime.to_unix(datetime)
remainder = rem(timestamp, resolution)
{:ok, datetime} = DateTime.from_unix(timestamp - remainder)
DateTime.to_naive(datetime)
end
def milliseconds_from_beginning_of_day(datetime) do
timestamp = to_unix(datetime)
rem(timestamp, 24 * 60 * 60) * 1000
end
def max_date(a, b) do
if NaiveDateTime.compare(a, b) == :gt, do: a, else: b
end
def min_date(a, b) do
if NaiveDateTime.compare(a, b) == :lt, do: a, else: b
end
def date_gte(a, b) do
NaiveDateTime.compare(a, b) in [:gt, :eq]
end
def date_lte(a, b) do
NaiveDateTime.compare(a, b) in [:lt, :eq]
end
def date_gt(a, b) do
NaiveDateTime.compare(a, b) == :gt
end
def date_lt(a, b) do
NaiveDateTime.compare(a, b) == :lt
end
def closest_to_zero(a, b) do
if abs(a) < abs(b), do: a, else: b
end
def precise_amount_without_dust(symbol, amount, price) do
[exchange, base, _quote] = to_list(symbol)
amount = precise_amount(symbol, amount)
dust = Connector.get_min_amount(exchange, base, price)
if abs(amount) >= dust do
amount
else
0.0
end
end
def precise_amount(symbol, amount) do
[exchange, base, quote] = to_list(symbol)
precise_amount(exchange, base, quote, amount)
end
def precise_amount(exchange, base, quote, amount) do
precision = Connector.get_amount_precision(exchange, base, quote)
cond do
amount > 0.0 -> Float.floor(amount, precision)
amount < 0.0 -> Float.ceil(amount, precision)
amount == 0.0 -> 0.0
end
end
def precise_price(symbol, price) do
[exchange, base, quote] = to_list(symbol)
precise_price(exchange, base, quote, price)
end
def precise_price(exchange, base, quote, price) do
precision = Connector.get_price_precision(exchange, base, quote)
Float.round(price, precision)
end
def to_pair(symbol) do
[_exchange, base, quote] = to_list(symbol)
"#{base}:#{quote}"
end
def to_exchange(symbol) do
[exchange, _base, _quote] = to_list(symbol)
exchange
end
def to_list(symbol) do
symbol |> String.split(":")
end
def get_base(symbol) do
to_list(symbol) |> Enum.at(1)
end
def get_quote(symbol) do
to_list(symbol) |> Enum.at(2)
end
def as_maps([%{__struct__: struct} | _] = structs) do
structs |> Enum.map(&Map.take(&1, apply(struct, :fields, [])))
end
def to_maps(structs) do
structs |> Enum.map(&to_map(&1))
end
def to_map(list) when is_list(list) do
list |> Enum.into(%{})
end
def to_map(%{__meta__: __meta__} = struct) do
association_fields = struct.__struct__.__schema__(:associations)
waste_fields = association_fields ++ [:__meta__]
struct |> Map.from_struct() |> Map.drop(waste_fields)
end
def to_map_without_id(struct) do
struct |> to_map() |> Map.drop([:id])
end
def to_float(term) when is_number(term) do
# yeah, that's the official way
term / 1
end
def to_float(term) when is_binary(term) do
{result, _} = Float.parse(term)
result
end
def to_integer(term) when is_float(term) do
round(term)
end
def to_integer(term) when is_binary(term) do
{result, _} = Integer.parse(term)
result
end
def defaults(map, defaults) do
Map.merge(defaults, map, fn _key, default, value -> if is_empty(value), do: default, else: value end)
end
def default(value, default) do
if value, do: value, else: default
end
def is_empty(value) when is_binary(value), do: String.length(value) == 0
def is_empty(value) when is_integer(value), do: value == 0
def is_empty(value) when is_float(value), do: value == 0.0
# normalize return value for OK
def mkdir(path) do
case File.mkdir(path) do
:ok -> {:ok, path}
{:error, :eexist} -> {:ok, path}
{:error, reason} -> {:error, reason}
end
end
def format_float(float, precision) do
:erlang.float_to_binary(float, decimals: precision)
end
def format_amount(exchange, base, quote, amount) do
format_float(amount, Connector.get_amount_precision(exchange, base, quote))
end
def format_price(exchange, base, quote, price) do
format_float(price, Connector.get_price_precision(exchange, base, quote))
end
def all_ok(enum, default) do
Enum.find(enum, amazing_success(default), &match?(extreme_failure(_), &1))
end
def from_satoshi(price, quote) do
if quote in ["BTC", "ETH"] and price > 1.0, do: price * 0.00000001, else: price
end
def bangify(result) do
case result do
{:ok, value} -> value
{:error, error} -> raise error
end
end
def parse!(json) do
try do
Poison.Parser.parse!(json)
rescue
e in Poison.SyntaxError -> raise %{e | message: "#{e.message} (trying to parse \"#{json}\")"}
end
end
def parse(json) do
try do
amazing_success(Poison.Parser.parse!(json))
rescue
e in Poison.SyntaxError -> extreme_failure(e.message)
end
end
def execute_for_symbols(module, args) do
Application.get_env(:cryptozaur, :symbols, [])
|> Enum.map(&Task.async(module, :execute, [&1 | args]))
|> Enum.map(&Task.await(&1, :infinity))
end
def difference_normalized_by_midpoint(a, b) do
(b - a) / ((a + b) / 2)
end
def difference_normalized_by_startpoint(a, b) do
(b - a) / a
end
def sign(value) do
if value > 0.0, do: 1.0, else: -1.0
end
def identity(value) do
value
end
def between(low, current, high) do
low <= current and current <= high
end
def not_more_than(a, b) do
min(a, b)
end
def not_more_than_with_dust(a, b, symbol, price) do
[exchange, base, _quote] = to_list(symbol)
dust = Connector.get_min_amount(exchange, base, price)
if a - b < dust, do: a, else: min(a, b)
end
def not_less_than(a, b) do
max(a, b)
end
def not_less_than_with_dust(a, b, symbol, price) do
[exchange, base, _quote] = to_list(symbol)
dust = Connector.get_min_amount(exchange, base, price)
if a - b > -dust, do: a, else: max(a, b)
end
def key(map, value) when is_map(map) do
map
|> Enum.find(fn {_key, val} -> val == value end)
|> elem(0)
end
def value(list, key, default \\ nil) when is_list(list) do
value = List.keyfind(list, key, 0)
if value, do: elem(value, 1), else: default
end
def pluck(enumerable, key) do
Enum.map(enumerable, &Map.get(&1, key))
end
def pluck_all(enumerable, keys) do
Enum.map(enumerable, &Map.take(&1, keys))
end
def apply_mfa({module, function, arguments}, extra_arguments) do
apply(module, function, arguments ++ extra_arguments)
end
def metacall(module, name, data, params, now) do
module = Map.get(params, "#{name}_module", Atom.to_string(module)) |> String.to_atom()
function = Map.get(params, "#{name}_function") |> String.to_atom()
params = Map.get(params, "#{name}_params", %{})
module = (Code.ensure_loaded(module) && function_exported?(module, function, 3) && module) || Cryptozaur.Strategy
apply(module, function, [data, params, now])
end
def atomize_keys(map) do
for {key, val} <- map, into: %{}, do: {String.to_atom(key), val}
end
def ensure_atoms_map([]), do: []
def ensure_atoms_map(%{__struct__: _} = value), do: value
def ensure_atoms_map(value) do
if is_map(value) || Keyword.keyword?(value) do
Enum.into(value, %{}, fn {k, v} ->
{ensure_atom(k), ensure_atoms_map(v)}
end)
else
value
end
end
def ensure_atom(value) when is_bitstring(value), do: String.to_atom(value)
def ensure_atom(value) when is_atom(value), do: value
def pluralize(string, count, suffix \\ "s"), do: string <> if(count == 1, do: "", else: suffix)
def unwrap(tuple) do
{:ok, result} = tuple
result
end
def wrap(result) do
{:ok, result}
end
def check_success(tuple, error) do
case tuple do
amazing_success(result) -> amazing_success(result)
_ -> extreme_failure(error)
end
end
def check_success_true(tuple, error) do
case tuple do
amazing_success(true) -> amazing_success(true)
_ -> extreme_failure(error)
end
end
# Integer.parse, Map.fetch, ...
def check_success_unwrapped(result, error) do
case result do
:error -> extreme_failure(error)
result -> amazing_success(result)
end
end
def check_if(value, error) do
if value, do: amazing_success(value), else: extreme_failure(error)
end
def is_struct(map) do
Map.has_key?(map, :__struct__)
end
def increment_nonce(state) do
Map.get_and_update(state, :nonce, fn nonce -> {nonce, nonce + 1} end)
end
def is_backtest() do
Application.get_env(:cryptozaur, :env) == :test or Application.get_env(:cryptozaur, Cryptozaur.Backtester) != nil
end
def ohlc4(candle) do
(candle.open + candle.high + candle.low + candle.close) / 4
end
def amount_from_capital(symbol, capital, price) do
precise_amount_without_dust(symbol, capital / price, price)
end
def embed_with_key(map, key) do
map |> Enum.map(&{"#{key}_#{elem(&1, 0)}", elem(&1, 1)}) |> Enum.into(%{})
end
def ensure_all_candles_present(list, resolution) do
[head | tail] = list
[head | tail |> Enum.flat_map_reduce(head, &ensure_candle_present(&1, &2, resolution)) |> elem(0)]
end
def ensure_candle_present(%{__struct__: struct_module} = current_candle, previous_candle, resolution) do
gap = NaiveDateTime.diff(current_candle.timestamp, previous_candle.timestamp) / resolution
if gap - Float.round(gap) != 0.0, do: raise("Gap is not a whole number (#{gap})")
gap = to_integer(gap)
candles =
if gap > 1 do
for i <- 2..gap do
struct(
struct_module,
open: previous_candle.close,
high: previous_candle.close,
low: previous_candle.close,
close: previous_candle.close,
timestamp: previous_candle.timestamp |> NaiveDateTime.add((i - 1) * resolution)
)
end ++ [current_candle]
else
[current_candle]
end
{candles, current_candle}
end
end
defmodule Cryptozaur.Utils.Stream do
def time(from, to, resolution \\ 1, comparator \\ &Cryptozaur.Utils.date_lte/2) do
from = Cryptozaur.Utils.align_to_resolution(from, resolution)
to = Cryptozaur.Utils.align_to_resolution(to, resolution)
Stream.unfold(from, fn timestamp ->
if comparator.(timestamp, to) do
{timestamp, NaiveDateTime.add(timestamp, resolution)}
else
nil
end
end)
end
end
|
lib/cryptozaur/utils.ex
| 0.583203 | 0.443781 |
utils.ex
|
starcoder
|
defmodule AWS.PinpointSMSVoiceV2 do
@moduledoc """
Welcome to the *Amazon Pinpoint SMS and Voice, version 2 API Reference*.
This guide provides information about Amazon Pinpoint SMS and Voice, version 2
API resources, including supported HTTP methods, parameters, and schemas.
Amazon Pinpoint is an Amazon Web Services service that you can use to engage
with your recipients across multiple messaging channels. The Amazon Pinpoint SMS
and Voice, version 2 API provides programmatic access to options that are unique
to the SMS and voice channels and supplements the resources provided by the
Amazon Pinpoint API.
If you're new to Amazon Pinpoint, it's also helpful to review the [ Amazon Pinpoint Developer
Guide](https://docs.aws.amazon.com/pinpoint/latest/developerguide/welcome.html).
The *Amazon Pinpoint Developer Guide* provides tutorials, code samples, and
procedures that demonstrate how to use Amazon Pinpoint features programmatically
and how to integrate Amazon Pinpoint functionality into mobile apps and other
types of applications. The guide also provides key information, such as Amazon
Pinpoint integration with other Amazon Web Services services, and the quotas
that apply to use of the service.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2022-03-31",
content_type: "application/x-amz-json-1.0",
credential_scope: nil,
endpoint_prefix: "sms-voice",
global?: false,
protocol: "json",
service_id: "Pinpoint SMS Voice V2",
signature_version: "v4",
signing_name: "sms-voice",
target_prefix: "PinpointSMSVoiceV2"
}
end
@doc """
Associates the specified origination identity with a pool.
If the origination identity is a phone number and is already associated with
another pool, an Error is returned. A sender ID can be associated with multiple
pools.
If the origination identity configuration doesn't match the pool's
configuration, an Error is returned.
"""
def associate_origination_identity(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AssociateOriginationIdentity", input, options)
end
@doc """
Creates a new configuration set.
After you create the configuration set, you can add one or more event
destinations to it.
A configuration set is a set of rules that you apply to the SMS and voice
messages that you send.
When you send a message, you can optionally specify a single configuration set.
"""
def create_configuration_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateConfigurationSet", input, options)
end
@doc """
Creates a new event destination in a configuration set.
An event destination is a location where you send message events. The event
options are Amazon CloudWatch, Amazon Kinesis Data Firehose, or Amazon SNS. For
example, when a message is delivered successfully, you can send information
about that event to an event destination, or send notifications to endpoints
that are subscribed to an Amazon SNS topic.
Each configuration set can contain between 0 and 5 event destinations. Each
event destination can contain a reference to a single destination, such as a
CloudWatch or Kinesis Data Firehose destination.
"""
def create_event_destination(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateEventDestination", input, options)
end
@doc """
Creates a new opt-out list.
If the opt-out list name already exists, an Error is returned.
An opt-out list is a list of phone numbers that are opted out, meaning you can't
send SMS or voice messages to them. If end user replies with the keyword "STOP,"
an entry for the phone number is added to the opt-out list. In addition to STOP,
your recipients can use any supported opt-out keyword, such as CANCEL or OPTOUT.
For a list of supported opt-out keywords, see [ SMS opt out
](https://docs.aws.amazon.com/pinpoint/latest/userguide/channels-sms-manage.html#channels-sms-manage-optout)
in the *Amazon Pinpoint User Guide*.
"""
def create_opt_out_list(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateOptOutList", input, options)
end
@doc """
Creates a new pool and associates the specified origination identity to the
pool.
A pool can include one or more phone numbers and SenderIds that are associated
with your Amazon Web Services account.
The new pool inherits its configuration from the specified origination identity.
This includes keywords, message type, opt-out list, two-way configuration, and
self-managed opt-out configuration. Deletion protection isn't inherited from the
origination identity and defaults to false.
If the origination identity is a phone number and is already associated with
another pool, an Error is returned. A sender ID can be associated with multiple
pools.
"""
def create_pool(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreatePool", input, options)
end
@doc """
Deletes an existing configuration set.
A configuration set is a set of rules that you apply to voice and SMS messages
that you send. In a configuration set, you can specify a destination for
specific types of events related to voice and SMS messages.
"""
def delete_configuration_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteConfigurationSet", input, options)
end
@doc """
Deletes an existing default message type on a configuration set.
A message type is a type of messages that you plan to send. If you send
account-related messages or time-sensitive messages such as one-time passcodes,
choose **Transactional**. If you plan to send messages that contain marketing
material or other promotional content, choose **Promotional**. This setting
applies to your entire Amazon Web Services account.
"""
def delete_default_message_type(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteDefaultMessageType", input, options)
end
@doc """
Deletes an existing default sender ID on a configuration set.
A default sender ID is the identity that appears on recipients' devices when
they receive SMS messages. Support for sender ID capabilities varies by country
or region.
"""
def delete_default_sender_id(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteDefaultSenderId", input, options)
end
@doc """
Deletes an existing event destination.
An event destination is a location where you send response information about the
messages that you send. For example, when a message is delivered successfully,
you can send information about that event to an Amazon CloudWatch destination,
or send notifications to endpoints that are subscribed to an Amazon SNS topic.
"""
def delete_event_destination(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteEventDestination", input, options)
end
@doc """
Deletes an existing keyword from an origination phone number or pool.
A keyword is a word that you can search for on a particular phone number or
pool. It is also a specific word or phrase that an end user can send to your
number to elicit a response, such as an informational message or a special
offer. When your number receives a message that begins with a keyword, Amazon
Pinpoint responds with a customizable message.
Keywords "HELP" and "STOP" can't be deleted or modified.
"""
def delete_keyword(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteKeyword", input, options)
end
@doc """
Deletes an existing opt-out list.
All opted out phone numbers in the opt-out list are deleted.
If the specified opt-out list name doesn't exist or is in-use by an origination
phone number or pool, an Error is returned.
"""
def delete_opt_out_list(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteOptOutList", input, options)
end
@doc """
Deletes an existing opted out destination phone number from the specified
opt-out list.
Each destination phone number can only be deleted once every 30 days.
If the specified destination phone number doesn't exist or if the opt-out list
doesn't exist, an Error is returned.
"""
def delete_opted_out_number(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteOptedOutNumber", input, options)
end
@doc """
Deletes an existing pool.
Deleting a pool disassociates all origination identities from that pool.
If the pool status isn't active or if deletion protection is enabled, an Error
is returned.
A pool is a collection of phone numbers and SenderIds. A pool can include one or
more phone numbers and SenderIds that are associated with your Amazon Web
Services account.
"""
def delete_pool(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeletePool", input, options)
end
@doc """
Deletes an account-level monthly spending limit override for sending text
messages.
Deleting a spend limit override will set the `EnforcedLimit` to equal the
`MaxLimit`, which is controlled by Amazon Web Services. For more information on
spend limits (quotas) see [Amazon Pinpoint quotas
](https://docs.aws.amazon.com/pinpoint/latest/developerguide/quotas.html) in the
*Amazon Pinpoint Developer Guide*.
"""
def delete_text_message_spend_limit_override(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"DeleteTextMessageSpendLimitOverride",
input,
options
)
end
@doc """
Deletes an account level monthly spend limit override for sending voice
messages.
Deleting a spend limit override sets the `EnforcedLimit` equal to the
`MaxLimit`, which is controlled by Amazon Web Services. For more information on
spending limits (quotas) see [Amazon Pinpoint quotas](https://docs.aws.amazon.com/pinpoint/latest/developerguide/quotas.html)
in the *Amazon Pinpoint Developer Guide*.
"""
def delete_voice_message_spend_limit_override(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"DeleteVoiceMessageSpendLimitOverride",
input,
options
)
end
@doc """
Describes attributes of your Amazon Web Services account.
The supported account attributes include account tier, which indicates whether
your account is in the sandbox or production environment. When you're ready to
move your account out of the sandbox, create an Amazon Web Services Support case
for a service limit increase request.
New Amazon Pinpoint accounts are placed into an SMS or voice sandbox. The
sandbox protects both Amazon Web Services end recipients and SMS or voice
recipients from fraud and abuse.
"""
def describe_account_attributes(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeAccountAttributes", input, options)
end
@doc """
Describes the current Amazon Pinpoint SMS Voice V2 resource quotas for your
account.
The description for a quota includes the quota name, current usage toward that
quota, and the quota's maximum value.
When you establish an Amazon Web Services account, the account has initial
quotas on the maximum number of configuration sets, opt-out lists, phone
numbers, and pools that you can create in a given Region. For more information
see [ Amazon Pinpoint quotas
](https://docs.aws.amazon.com/pinpoint/latest/developerguide/quotas.html) in the
*Amazon Pinpoint Developer Guide*.
"""
def describe_account_limits(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeAccountLimits", input, options)
end
@doc """
Describes the specified configuration sets or all in your account.
If you specify configuration set names, the output includes information for only
the specified configuration sets. If you specify filters, the output includes
information for only those configuration sets that meet the filter criteria. If
you don't specify configuration set names or filters, the output includes
information for all configuration sets.
If you specify a configuration set name that isn't valid, an error is returned.
"""
def describe_configuration_sets(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeConfigurationSets", input, options)
end
@doc """
Describes the specified keywords or all keywords on your origination phone
number or pool.
A keyword is a word that you can search for on a particular phone number or
pool. It is also a specific word or phrase that an end user can send to your
number to elicit a response, such as an informational message or a special
offer. When your number receives a message that begins with a keyword, Amazon
Pinpoint responds with a customizable message.
If you specify a keyword that isn't valid, an Error is returned.
"""
def describe_keywords(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeKeywords", input, options)
end
@doc """
Describes the specified opt-out list or all opt-out lists in your account.
If you specify opt-out list names, the output includes information for only the
specified opt-out lists. Opt-out lists include only those that meet the filter
criteria. If you don't specify opt-out list names or filters, the output
includes information for all opt-out lists.
If you specify an opt-out list name that isn't valid, an Error is returned.
"""
def describe_opt_out_lists(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeOptOutLists", input, options)
end
@doc """
Describes the specified opted out destination numbers or all opted out
destination numbers in an opt-out list.
If you specify opted out numbers, the output includes information for only the
specified opted out numbers. If you specify filters, the output includes
information for only those opted out numbers that meet the filter criteria. If
you don't specify opted out numbers or filters, the output includes information
for all opted out destination numbers in your opt-out list.
If you specify an opted out number that isn't valid, an Error is returned.
"""
def describe_opted_out_numbers(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeOptedOutNumbers", input, options)
end
@doc """
Describes the specified origination phone number, or all the phone numbers in
your account.
If you specify phone number IDs, the output includes information for only the
specified phone numbers. If you specify filters, the output includes information
for only those phone numbers that meet the filter criteria. If you don't specify
phone number IDs or filters, the output includes information for all phone
numbers.
If you specify a phone number ID that isn't valid, an Error is returned.
"""
def describe_phone_numbers(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribePhoneNumbers", input, options)
end
@doc """
Retrieves the specified pools or all pools associated with your Amazon Web
Services account.
If you specify pool IDs, the output includes information for only the specified
pools. If you specify filters, the output includes information for only those
pools that meet the filter criteria. If you don't specify pool IDs or filters,
the output includes information for all pools.
If you specify a pool ID that isn't valid, an Error is returned.
A pool is a collection of phone numbers and SenderIds. A pool can include one or
more phone numbers and SenderIds that are associated with your Amazon Web
Services account.
"""
def describe_pools(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribePools", input, options)
end
@doc """
Describes the specified SenderIds or all SenderIds associated with your Amazon
Web Services account.
If you specify SenderIds, the output includes information for only the specified
SenderIds. If you specify filters, the output includes information for only
those SenderIds that meet the filter criteria. If you don't specify SenderIds or
filters, the output includes information for all SenderIds.
f you specify a sender ID that isn't valid, an Error is returned.
"""
def describe_sender_ids(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeSenderIds", input, options)
end
@doc """
Describes the current Amazon Pinpoint monthly spend limits for sending voice and
text messages.
When you establish an Amazon Web Services account, the account has initial
monthly spend limit in a given Region. For more information on increasing your
monthly spend limit, see [ Requesting increases to your monthly SMS spending quota for Amazon Pinpoint
](https://docs.aws.amazon.com/pinpoint/latest/userguide/channels-sms-awssupport-spend-threshold.html)
in the *Amazon Pinpoint User Guide*.
"""
def describe_spend_limits(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeSpendLimits", input, options)
end
@doc """
Removes the specified origination identity from an existing pool.
If the origination identity isn't associated with the specified pool, an Error
is returned.
"""
def disassociate_origination_identity(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisassociateOriginationIdentity", input, options)
end
@doc """
Lists all associated origination identities in your pool.
If you specify filters, the output includes information for only those
origination identities that meet the filter criteria.
"""
def list_pool_origination_identities(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListPoolOriginationIdentities", input, options)
end
@doc """
List all tags associated with a resource.
"""
def list_tags_for_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTagsForResource", input, options)
end
@doc """
Creates or updates a keyword configuration on an origination phone number or
pool.
A keyword is a word that you can search for on a particular phone number or
pool. It is also a specific word or phrase that an end user can send to your
number to elicit a response, such as an informational message or a special
offer. When your number receives a message that begins with a keyword, Amazon
Pinpoint responds with a customizable message.
If you specify a keyword that isn't valid, an Error is returned.
"""
def put_keyword(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutKeyword", input, options)
end
@doc """
Creates an opted out destination phone number in the opt-out list.
If the destination phone number isn't valid or if the specified opt-out list
doesn't exist, an Error is returned.
"""
def put_opted_out_number(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutOptedOutNumber", input, options)
end
@doc """
Releases an existing origination phone number in your account.
Once released, a phone number is no longer available for sending messages.
If the origination phone number has deletion protection enabled or is associated
with a pool, an Error is returned.
"""
def release_phone_number(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ReleasePhoneNumber", input, options)
end
@doc """
Request an origination phone number for use in your account.
For more information on phone number request see [ Requesting a number
](https://docs.aws.amazon.com/pinpoint/latest/userguide/settings-sms-request-number.html)
in the *Amazon Pinpoint User Guide*.
"""
def request_phone_number(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RequestPhoneNumber", input, options)
end
@doc """
Creates a new text message and sends it to a recipient's phone number.
SMS throughput limits are measured in Message Parts per Second (MPS). Your MPS
limit depends on the destination country of your messages, as well as the type
of phone number (origination number) that you use to send the message. For more
information, see [Message Parts per Second (MPS) limits](https://docs.aws.amazon.com/pinpoint/latest/userguide/channels-sms-limitations-mps.html)
in the *Amazon Pinpoint User Guide*.
"""
def send_text_message(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "SendTextMessage", input, options)
end
@doc """
Allows you to send a request that sends a text message through Amazon Pinpoint.
This operation uses [Amazon Polly](http://aws.amazon.com/polly/) to convert a
text script into a voice message.
"""
def send_voice_message(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "SendVoiceMessage", input, options)
end
@doc """
Sets the default message type on a configuration set.
Choose the category of SMS messages that you plan to send from this account. If
you send account-related messages or time-sensitive messages such as one-time
passcodes, choose **Transactional**. If you plan to send messages that contain
marketing material or other promotional content, choose **Promotional**. This
setting applies to your entire Amazon Web Services account.
"""
def set_default_message_type(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "SetDefaultMessageType", input, options)
end
@doc """
Sets default sender ID on a configuration set.
When sending a text message to a destination country that supports sender IDs,
the default sender ID on the configuration set specified will be used if no
dedicated origination phone numbers or registered sender IDs are available in
your account.
"""
def set_default_sender_id(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "SetDefaultSenderId", input, options)
end
@doc """
Sets an account level monthly spend limit override for sending text messages.
The requested spend limit must be less than or equal to the `MaxLimit`, which is
set by Amazon Web Services.
"""
def set_text_message_spend_limit_override(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "SetTextMessageSpendLimitOverride", input, options)
end
@doc """
Sets an account level monthly spend limit override for sending voice messages.
The requested spend limit must be less than or equal to the `MaxLimit`, which is
set by Amazon Web Services.
"""
def set_voice_message_spend_limit_override(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "SetVoiceMessageSpendLimitOverride", input, options)
end
@doc """
Adds or overwrites only the specified tags for the specified Amazon Pinpoint SMS
Voice, version 2 resource.
When you specify an existing tag key, the value is overwritten with the new
value. Each resource can have a maximum of 50 tags. Each tag consists of a key
and an optional value. Tag keys must be unique per resource. For more
information about tags, see [ Tagging Amazon Pinpoint resources](https://docs.aws.amazon.com/pinpoint/latest/developerguide/tagging-resources.html)
in the *Amazon Pinpoint Developer Guide*.
"""
def tag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TagResource", input, options)
end
@doc """
Removes the association of the specified tags from an Amazon Pinpoint SMS Voice
V2 resource.
For more information on tags see [ Tagging Amazon Pinpoint resources](https://docs.aws.amazon.com/pinpoint/latest/developerguide/tagging-resources.html)
in the *Amazon Pinpoint Developer Guide*.
"""
def untag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UntagResource", input, options)
end
@doc """
Updates an existing event destination in a configuration set.
You can update the IAM role ARN for CloudWatch Logs and Kinesis Data Firehose.
You can also enable or disable the event destination.
You may want to update an event destination to change its matching event types
or updating the destination resource ARN. You can't change an event
destination's type between CloudWatch Logs, Kinesis Data Firehose, and Amazon
SNS.
"""
def update_event_destination(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateEventDestination", input, options)
end
@doc """
Updates the configuration of an existing origination phone number.
You can update the opt-out list, enable or disable two-way messaging, change the
TwoWayChannelArn, enable or disable self-managed opt-outs, and enable or disable
deletion protection.
If the origination phone number is associated with a pool, an Error is returned.
"""
def update_phone_number(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdatePhoneNumber", input, options)
end
@doc """
Updates the configuration of an existing pool.
You can update the opt-out list, enable or disable two-way messaging, change the
`TwoWayChannelArn`, enable or disable self-managed opt-outs, enable or disable
deletion protection, and enable or disable shared routes.
"""
def update_pool(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdatePool", input, options)
end
end
|
lib/aws/generated/pinpoint_sms_voice_v2.ex
| 0.844905 | 0.550184 |
pinpoint_sms_voice_v2.ex
|
starcoder
|
defmodule Square.Payments do
@moduledoc """
Documentation for `Square.Payments`.
"""
@doc """
Retrieves a list of payments taken by the account making the request.
```
def list_payments(client,
[
begin_time: nil,
end_time: nil,
sort_order: nil,
cursor: nil,
location_id: nil,
total: nil,
last_4: nil,
card_brand: nil
])
```
### Parameters
| Parameter | Type | Tags | Description |
| --- | --- | --- | --- |
| `begin_time` | `String` | Query, Optional | Timestamp for the beginning of the reporting period, in RFC 3339 format.<br>Inclusive. Default: The current time minus one year. |
| `end_time` | `String` | Query, Optional | Timestamp for the end of the requested reporting period, in RFC 3339 format.<br><br>Default: The current time. |
| `sort_order` | `String` | Query, Optional | The order in which results are listed.<br>- `ASC` - oldest to newest<br>- `DESC` - newest to oldest (default). |
| `cursor` | `String` | Query, Optional | A pagination cursor returned by a previous call to this endpoint.<br>Provide this to retrieve the next set of results for the original query.<br><br>See [Pagination](https://developer.squareup.com/docs/basics/api101/pagination) for more information. |
| `location_id` | `String` | Query, Optional | Limit results to the location supplied. By default, results are returned<br>for all locations associated with the merchant. |
| `total` | `Float` | Query, Optional | The exact amount in the total_money for a `Payment`. |
| `last_4` | `String` | Query, Optional | The last 4 digits of `Payment` card. |
| `card_brand` | `String` | Query, Optional | The brand of `Payment` card. For example, `VISA` |
### Response Type
[`List Payments Response Map`](https://github.com/square/square-ruby-sdk/blob/master/doc/models/list-payments-response.md)
### Example Usage
iex> Square.client |> Square.Payments.list_payments()
"""
@spec list_payments(Tesla.Client.t(), list) ::
{:error, any} | {:ok, Tesla.Env.t()}
def list_payments(client, params \\ []),
do: Tesla.get(client, "payments", query: params)
@doc """
Retrieves details for a specific Payment.
```
def get_payment(payment_id)
```
### Parameters
| Parameter | Type | Tags | Description |
| --- | --- | --- | --- |
| `payment_id` | `String` | Template, Required | Unique ID for the desired `Payment`. |
### Response Type
[`Get Payment Response Map`](https://github.com/square/square-ruby-sdk/blob/master/doc/models/get-payment-response.md)
### Example Usage
iex> payment_id = "payment_id0"
iex> Square.client |> Square.Payments.get_payment(payment_id)
"""
@spec get_payment(Tesla.Client.t(), binary) :: {:error, any} | {:ok, Tesla.Env.t()}
def get_payment(client, payment_id), do: Tesla.get(client, "payments/#{payment_id}")
@doc """
Charges a payment source, for example, a card
represented by customer's card on file or a card nonce. In addition
to the payment source, the request must also include the
amount to accept for the payment.
There are several optional parameters that you can include in the request.
For example, tip money, whether to autocomplete the payment, or a reference ID
to correlate this payment with another system.
For more information about these
payment options, see [Take Payments](https://developer.squareup.com/docs/payments-api/take-payments).
The `PAYMENTS_WRITE_ADDITIONAL_RECIPIENTS` OAuth permission is required
to enable application fees.
```
def create_payment(client, body)
```
### Parameters
| Parameter | Type | Tags | Description |
| --- | --- | --- | --- |
| `body` | [`Create Payment Request Map`](https://github.com/square/square-ruby-sdk/blob/master/doc/models/create-payment-request.md) | Body,Required | A map containing the fields to POST for the request.<br><br>See the corresponding map definition for field details. |
### Response Type
[`Create Payment Response Map`](https://github.com/square/square-ruby-sdk/blob/master/doc/models/create-payment-response.md)
### Example Usage
iex> body %{
source_id: "ccof:uIbfJXhXETSP197M3GB",
idempotency_key: "<KEY>",
amount_money: %{
amount: 200,
currency: "USD"
},
app_fee_money: {
amount: 10,
currency: "USD",
},
autocomplete: true,
customer_id: "VDKXEEKPJN48QDG3BGGFAK05P8",
location_id: "XK3DBG77NJBFX",
reference_id: "123456",
note: "Brief description",
}
iex> Square.client |> Square.Payments.create_payment(body)
"""
@spec create_payment(Tesla.Client.t(), map) :: {:error, any} | {:ok, Tesla.Env.t()}
def create_payment(client, body \\ %{}), do: Tesla.post(client, "payments", body)
@doc """
Cancels (voids) a payment. If you set `autocomplete` to false when creating a payment,
you can cancel the payment using this endpoint. For more information, see
[Delayed Payments](https://developer.squareup.com/docs/payments-api/take-payments#delayed-payments).
```
def cancel_payment(client, payment_id)
```
### Parameters
| Parameter | Type | Tags | Description |
| --- | --- | --- | --- |
| `payment_id` | `String` | Template, Required | `payment_id` identifying the payment to be canceled. |
### Response Type
[`Cancel Payment Response Map`](https://github.com/square/square-ruby-sdk/blob/master/doc/models/cancel-payment-response.md)
### Example Usage
iex> payment_id = "payment_id0"
iex> Square.client |> Square.Payments.cancel_payment(payment_id)
"""
@spec cancel_payment(Tesla.Client.t(), binary) :: {:error, any} | {:ok, Tesla.Env.t()}
def cancel_payment(client, payment_id),
do: Tesla.post(client, "payments/#{payment_id}/cancel", %{})
@doc """
Cancels (voids) a payment identified by the idempotency key that is specified in the
request.
Use this method when status of a CreatePayment request is unknown. For example, after you send a
CreatePayment request a network error occurs and you don't get a response. In this case, you can
direct Square to cancel the payment using this endpoint. In the request, you provide the same
idempotency key that you provided in your CreatePayment request you want to cancel. After
cancelling the payment, you can submit your CreatePayment request again.
Note that if no payment with the specified idempotency key is found, no action is taken, the end
point returns successfully.
```
def cancel_payment_by_idempotency_key(client, body)
```
### Parameters
| Parameter | Type | Tags | Description |
| --- | --- | --- | --- |
| `body` | [`Cancel Payment by Idempotency Key Request Map`](https://github.com/square/square-ruby-sdk/blob/master/doc/models/cancel-payment-by-idempotency-key-request.md) | Body, Required | A map containing the fields to POST for the request.<br><br>See the corresponding map definition for field details. |
### Response Type
[`Cancel Payment by Idempotency Key Response Map`](https://github.com/square/square-ruby-sdk/blob/master/doc/models/cancel-payment-by-idempotency-key-response.md)
### Example Usage
iex> body = %{}
iex> body[:idempotency_key] = "a7e36d40-d24b-11e8-b568-0800200c9a66"
iex> Square.client |> Square.Pyaments.cancel_payment_by_idempotency_key(body)
"""
def cancel_payment_by_idempotency_key(client, body \\ %{}),
do: Tesla.post(client, "payments/cancel", body)
@doc """
Completes (captures) a payment.
By default, payments are set to complete immediately after they are created.
If you set autocomplete to false when creating a payment, you can complete (capture)
the payment using this endpoint. For more information, see
[Delayed Payments](https://developer.squareup.com/docs/payments-api/take-payments#delayed-payments).
```
def complete_payment(payment_id, body)
```
### Parameters
| Parameter | Type | Tags | Description |
| --- | --- | --- | --- |
| `payment_id` | `String` | Template, Required | Unique ID identifying the payment to be completed. |
| `body` | `Map` | Body, Required | A map containing the fields to POST for the request.<br><br>See the corresponding map definition for field details. |
### Response Type
[`Complete Payment Response Map`](https://github.com/square/square-ruby-sdk/blob/master/doc/models/complete-payment-response.md)
### Example Usage
iex> payment_id = "payment_id0"
iex> body = %{ key1: "val1", key2: "val2" }
iex> Square.client |> Square.Payments.complete_payment(payment_id, body)
"""
def complete_payment(client, payment_id, body \\ %{}),
do: Tesla.post(client, "payments/#{payment_id}/complete", body)
end
|
lib/api/payments_api.ex
| 0.932829 | 0.877529 |
payments_api.ex
|
starcoder
|
defmodule TwitterStream do
@moduledoc """
TwitterStream is a GenServer that can be invoked manually via `start_link/1` or
added as a `child_spec/1` to a supervision tree.
"""
use GenServer
alias TwitterStream.{Auth, Decoder}
@doc """
Start a twitter stream process given [Twitter Streaming API](https://developer.twitter.com/en/docs/tweets/filter-realtime/api-reference/post-statuses-filter) parameters and a process to sink tweets to.
Returns `{:ok, pid}`.
## The available keyword list of options are:
Parameters to send to the [Twitter Streaming API](https://developer.twitter.com/en/docs/tweets/filter-realtime/api-reference/post-statuses-filter).
```elixir
params: %{"track" => "developer", "language" => "en", "filter_level" => "low"}
```
Process to send all decoded tweets to.
```elixir
sink: self()
```
GenServer registration name, optional and defaults to `TwitterStream`.
```elixir
name: DeveloperTwitterStream
```
## Examples
iex> opts = [params: %{"track" => "developer"}, sink: self()]
iex> {:ok, pid} = TwitterStream.start_link(opts)
iex> flush()
{:tweet,
%{
"text" => "...",
...
}
}
"""
def start_link(opts) do
GenServer.start_link(__MODULE__, opts, name: opts[:name] || __MODULE__)
end
@doc false
def init(opts) do
http = Application.get_env(:twitter_stream, :http) || :hackney
url = "https://stream.twitter.com/1.1/statuses/filter.json"
headers = ["Authorization": Auth.oauth_header("post", url, opts[:params])]
params = Map.to_list(opts[:params])
stream_opts = [
{:async, :once}, {:stream_to, self()},
{:recv_timeout, :timer.minutes(3)}
]
case http.post(url, headers, {:form, params}, stream_opts) do
{:ok, _ref} -> {:ok, %{sink: opts[:sink]}}
error -> {:stop, error}
end
end
def handle_info({:hackney_response, ref, {:status, 200, "OK"}}, state) do
:hackney.stream_next(ref)
{:noreply, state}
end
def handle_info({:hackney_response, ref, {:status, 420, _}}, _state) do
:hackney.close(ref)
{:stop, :normal, %{}}
end
def handle_info({:hackney_response, ref, {:headers, _headers}}, state) do
:hackney.stream_next(ref)
{:noreply, state}
end
def handle_info({:hackney_response, ref, chunk}, state) when is_binary(chunk) do
state =
with {:json, true} <- {:json, Decoder.json?(chunk)},
%{"id" => _} = tweet <- Decoder.decode(chunk, state) do
send(state.sink, {:tweet, self(), tweet})
Map.delete(state, :decoder)
else
{:json, false} -> Map.delete(state, :decoder)
{:incomplete, decoder} -> Map.put(state, :decoder, decoder)
%{"limit" => _} -> Map.delete(state, :decoder)
end
:hackney.stream_next(ref)
{:noreply, state}
end
end
|
lib/twitter_stream.ex
| 0.894578 | 0.806396 |
twitter_stream.ex
|
starcoder
|
defmodule RateTheDub.ETagCache do
@moduledoc """
A Tesla middleware for caching requests based on ETags. Cached data is stored
in the database as `CachedPages`.
This is primarially to reduce the load on Jikan and to be a good neighbor and
all that, as well as to speed up the site by reducing API calls.
CachedPages are stored in the database to remain persistent accross restarts
and to be shared between different replicas when scaling.
"""
@behaviour Tesla.Middleware
require Logger
import Ecto.Query, warn: false
alias RateTheDub.Repo
alias RateTheDub.ETagCache.CachedPage
@doc """
Gets a single `CachedPage` based on its URL. This URL should include the
query parameters.
## Examples
iex> get_cached_page_by_url("https://example.com?a=1")
%CachedPage{}
iex> get_cached_page_by_url("https://fake.net")
nil
"""
def get_cached_page_by_url(url) do
Repo.get_by(CachedPage, url: url)
end
@doc """
Creates a cached page.
## Examples
iex> create_cached_page(%{field: value})
{:ok, %CachedPage{}}
iex> create_cached_page(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_cached_page(attrs \\ %{}) do
%CachedPage{}
|> CachedPage.changeset(attrs)
|> Repo.insert(on_conflict: :replace_all, conflict_target: :url)
end
@doc """
Deletes all cached pages that are not for a search URL to cut down on
duplication in the database.
To be used as a manual cleanup task when needed. The `:cache` option should be
used to make this less necessary.
"""
def delete_non_searches() do
CachedPage
|> where([p], not like(p.url, "%api.jikan.moe/v3/search%"))
|> Repo.delete_all()
end
@impl Tesla.Middleware
def call(%{opts: [cache: false]} = env, next, _), do: Tesla.run(env, next)
@impl Tesla.Middleware
def call(%{method: :get} = env, next, _) do
cached =
Tesla.build_url(env.url, env.query)
|> get_cached_page_by_url()
env
|> set_etag(cached)
|> Tesla.run(next)
|> process_resp(cached)
end
@impl Tesla.Middleware
def call(env, next, _), do: Tesla.run(env, next)
defp set_etag(env, cached) do
case cached do
%CachedPage{etag: etag} ->
Tesla.put_header(env, "If-None-Match", etag)
nil ->
env
end
end
defp process_resp({:error, _} = error, _), do: error
defp process_resp({:ok, %{status: 200} = env}, _) do
etag = Tesla.get_header(env, "etag")
full_url = Tesla.build_url(env.url, env.query)
attrs = %{url: full_url, etag: etag, body: env.body}
{:ok, _} = create_cached_page(attrs)
{:ok, env}
end
defp process_resp({:ok, %{status: 304} = env}, %{body: body}) do
Logger.info("Serving cached request for #{Tesla.build_url(env.url, env.query)}")
{:ok, Map.put(env, :body, body)}
end
defp process_resp({:ok, _} = resp, _), do: resp
end
|
lib/ratethedub/etag_cache.ex
| 0.847274 | 0.448547 |
etag_cache.ex
|
starcoder
|
defmodule Absinthe.Execution.Variables do
# Handles the logic around building and validating variable values for an
# execution.
@moduledoc false
alias Absinthe.Type
alias Absinthe.Language
alias Absinthe.Execution
defstruct raw: %{}, processed: %{}
# Build a variables map from the variable definitions in the selected operation
# and the variable values provided to the execution.
@doc false
@spec build(Execution.t) :: {%{binary => any}, Execution.t}
def build(execution) do
execution.selected_operation.variable_definitions
|> Enum.reduce({:ok, execution}, &build_definition/2)
end
def build_definition(definition, {status, execution}) do
case validate_definition_type(definition.type, execution) do
{:ok, schema_type, type_stack} ->
process_variable(definition, schema_type, type_stack, execution, status)
:error ->
inner_type = definition.type |> unwrap
execution = Execution.put_error(execution, :variable, inner_type.name, "Type `#{inner_type.name}': Not present in schema", at: definition.type)
{:error, execution}
end
end
defp unwrap(%{type: inner_type}), do: unwrap(inner_type)
defp unwrap(type), do: type
defp validate_definition_type(type, execution) do
validate_definition_type(type, [], execution)
end
defp validate_definition_type(%Language.NonNullType{type: inner_type}, acc, execution) do
validate_definition_type(inner_type, acc, execution)
end
defp validate_definition_type(%Language.ListType{type: inner_type}, acc, execution) do
validate_definition_type(inner_type, [Type.List | acc], execution)
end
defp validate_definition_type(%Language.NamedType{name: name}, acc, execution) do
case execution.schema.__absinthe_type__(name) do
nil -> :error
type -> {:ok, type, [name | acc]}
end
end
defp process_variable(definition, schema_type, type_stack, execution, status) do
case Execution.Variable.build(definition, schema_type, type_stack, execution) do
{:ok, variable, execution} ->
{status, put_variable(execution, definition.variable.name, variable)}
error ->
error
end
end
defp put_variable(execution, _, %{value: nil}) do
execution
end
defp put_variable(execution, name, variable) do
variables = execution.variables
|> Map.update!(:processed, &Map.put(&1, name, variable))
%{execution | variables: variables}
end
end
|
lib/absinthe/execution/variables.ex
| 0.703855 | 0.42922 |
variables.ex
|
starcoder
|
defmodule Bonny.Server.Reconciler do
@moduledoc """
Creates a stream that, when run, streams a list of resources and calls `reconcile/1`
on the given controller for each resource in the stream in parallel.
## Example
reconciliation_stream = Bonny.Server.Reconciler.get_stream(controller)
Task.async(fn -> Stream.run(reconciliation_stream) end)
"""
@doc """
Takes a controller that must define the following functions and returns a (prepared) stream.
* `conn/0` - should return a K8s.Conn.t()
* `reconcile_operation/0` - should return a K8s.Operation.t() list operation that produces the stream of resources
* `reconcile/1` - takes a map and processes it
"""
require Logger
@callback reconcile(map()) :: :ok | {:ok, any()} | {:error, any()}
@spec get_stream(module(), K8s.Conn.t(), K8s.Operation.t(), keyword()) :: Enumerable.t()
def get_stream(controller, conn, reconcile_operation, opts \\ []) do
{:ok, reconciliation_stream} = K8s.Client.stream(conn, reconcile_operation, opts)
reconcile_all(reconciliation_stream, controller)
end
defp reconcile_all(resource_stream, controller) do
resource_stream
|> Task.async_stream(
fn
resource when is_map(resource) ->
reconcile_single_resource(resource, controller)
metadata = %{module: controller}
Logger.debug("Reconciler fetch succeeded", metadata)
resource
{:error, error} ->
metadata = %{module: controller, error: error}
Logger.debug("Reconciler fetch failed", metadata)
error
end,
ordered: false
)
end
defp reconcile_single_resource(resource, controller) do
metadata = %{
module: controller,
name: K8s.Resource.name(resource),
namespace: K8s.Resource.namespace(resource),
kind: K8s.Resource.kind(resource),
api_version: resource["apiVersion"]
}
:telemetry.span([:reconciler, :reconcile], metadata, fn ->
case controller.reconcile(resource) do
:ok ->
Logger.debug("Reconciler reconciliation succeeded", metadata)
{:ok, metadata}
{:ok, _} ->
Logger.debug("Reconciler reconciliation succeeded", metadata)
{:ok, metadata}
{:error, error} ->
metadata = Map.put(metadata, :error, error)
Logger.error("Reconciler reconciliation failed", metadata)
{:error, metadata}
end
end)
end
end
|
lib/bonny/server/reconciler.ex
| 0.865039 | 0.415373 |
reconciler.ex
|
starcoder
|
defmodule Mix.Config do
# TODO: Convert them to hard deprecations on v1.13
@moduledoc deprecated: "Use Config and Config.Reader instead"
@moduledoc ~S"""
A simple configuration API and functions for managing config files.
This module is deprecated, use the modules `Config` and `Config.Reader`
from Elixir's standard library instead.
## Setting configuration
Most commonly, this module is used to define your own configuration:
use Mix.Config
config :root_key,
key1: "value1",
key2: "value2"
import_config "#{Mix.env()}.exs"
`use Mix.Config` will import the functions `config/2`, `config/3`
and `import_config/1` to help you manage your configuration.
## Evaluating configuration
Once a configuration is written to a file, the functions in this
module can be used to read and merge said configuration. The `eval!/2`
function allows you to evaluate a given configuration file and the `merge/2`
function allows you to deep merge the results of multiple configurations. Those
functions should not be invoked by users writing configurations but
rather by library authors.
## Examples
The most common use of `Mix.Config` is to define application
configuration so that `Application.get_env/3` and other `Application`
functions can be used to retrieve or further change them.
Application config files are typically placed in the `config/`
directory of your Mix projects. For example, the following config
# config/config.exs
config :my_app, :key, "value"
will be automatically loaded by Mix and persisted into the
`:my_app`'s application environment, which can be accessed in
its source code as follows:
"value" = Application.fetch_env!(:my_app, :key1)
"""
@doc false
defmacro __using__(_) do
quote do
import Mix.Config, only: [config: 2, config: 3, import_config: 1]
end
end
@doc """
Configures the given `root_key`.
Keyword lists are always deep merged.
## Examples
The given `opts` are merged into the existing configuration
for the given `root_key`. Conflicting keys are overridden by the
ones specified in `opts`. For example, the application
configuration below
config :logger,
level: :warn,
backends: [:console]
config :logger,
level: :info,
truncate: 1024
will have a final configuration for `:logger` of:
[level: :info, backends: [:console], truncate: 1024]
"""
@doc deprecated: "Use the Config module instead"
defdelegate config(root_key, opts), to: Config
@doc """
Configures the given `key` for the given `root_key`.
Keyword lists are always deep merged.
## Examples
The given `opts` are merged into the existing values for `key`
in the given `root_key`. Conflicting keys are overridden by the
ones specified in `opts`. For example, the application
configuration below
config :ecto, Repo,
log_level: :warn,
adapter: Ecto.Adapters.Postgres
config :ecto, Repo,
log_level: :info,
pool_size: 10
will have a final value of the configuration for the `Repo`
key in the `:ecto` application of:
[log_level: :info, pool_size: 10, adapter: Ecto.Adapters.Postgres]
"""
@doc deprecated: "Use the Config module instead"
defdelegate config(root_key, key, opts), to: Config
@doc ~S"""
Imports configuration from the given file or files.
If `path_or_wildcard` is a wildcard, then all the files
matching that wildcard will be imported; if no file matches
the wildcard, no errors are raised. If `path_or_wildcard` is
not a wildcard but a path to a single file, then that file is
imported; in case the file doesn't exist, an error is raised.
If path/wildcard is a relative path/wildcard, it will be expanded
relatively to the directory the current configuration file is in.
## Examples
This is often used to emulate configuration across environments:
import_config "#{Mix.env()}.exs"
Or to import files from children in umbrella projects:
import_config "../apps/*/config/config.exs"
"""
@doc deprecated: "Use the Config module instead"
defmacro import_config(path_or_wildcard) do
quote do
Mix.Config.__import__!(unquote(path_or_wildcard), __DIR__)
end
end
@doc false
def __import__!(path_or_wildcard, dir) do
path_or_wildcard = Path.expand(path_or_wildcard, dir)
paths =
if String.contains?(path_or_wildcard, ~w(* ? [ {)) do
Path.wildcard(path_or_wildcard)
else
[path_or_wildcard]
end
for path <- paths do
Config.__import__!(path)
end
:ok
end
## Mix API
@doc """
Evaluates the given configuration file.
It accepts a list of `imported_paths` that should raise if attempted
to be imported again (to avoid recursive imports).
It returns a tuple with the configuration and the imported paths.
"""
@doc deprecated: "Use Config.Reader.read_imports!/2 instead"
def eval!(file, imported_paths \\ []) do
Config.Reader.read_imports!(file,
imports: imported_paths,
env: Mix.env(),
target: Mix.target()
)
end
@doc """
Reads the configuration file.
The same as `eval!/2` but only returns the configuration
in the given file, without returning the imported paths.
It exists for convenience purposes. For example, you could
invoke it inside your `mix.exs` to read some external data
you decided to move to a configuration file:
subsystem: Mix.Config.read!("rel/subsystem.exs")
"""
@doc deprecated: "Use Config.Reader.read!/2 instead"
@spec read!(Path.t(), [Path.t()]) :: keyword
def read!(file, imported_paths \\ []) do
Config.Reader.read!(file, imports: imported_paths, env: Mix.env(), target: Mix.target())
end
@doc """
Merges two configurations.
The configurations are merged together with the values in
the second one having higher preference than the first in
case of conflicts. In case both values are set to keyword
lists, it deep merges them.
## Examples
iex> Mix.Config.merge([app: [k: :v1]], [app: [k: :v2]])
[app: [k: :v2]]
iex> Mix.Config.merge([app: [k: [v1: 1, v2: 2]]], [app: [k: [v2: :a, v3: :b]]])
[app: [k: [v1: 1, v2: :a, v3: :b]]]
iex> Mix.Config.merge([app1: []], [app2: []])
[app1: [], app2: []]
"""
@doc deprecated: "Use Config.Reader.merge/2 instead"
def merge(config1, config2) do
Config.__merge__(config1, config2)
end
@doc """
Persists the given configuration by modifying
the configured applications environment.
`config` should be a list of `{app, app_config}` tuples or a
`%{app => app_config}` map where `app` are the applications to
be configured and `app_config` are the configuration (as key-value
pairs) for each of those applications.
Returns the configured applications.
## Examples
Mix.Config.persist(logger: [level: :error], my_app: [my_config: 1])
#=> [:logger, :my_app]
"""
@doc deprecated: "Use Application.put_all_env/2 instead"
def persist(config) do
Application.put_all_env(config, persistent: true)
end
@doc false
@deprecated "Use eval!/2 instead"
def read_wildcard!(path, loaded_paths \\ []) do
paths =
if String.contains?(path, ~w(* ? [ {)) do
Path.wildcard(path)
else
[path]
end
Enum.reduce(paths, [], &merge(&2, read!(&1, loaded_paths)))
end
@doc false
@deprecated "Manually validate the data instead"
def validate!(config) do
validate!(config, "runtime")
end
defp validate!(config, file) do
if is_list(config) do
Enum.all?(config, fn
{app, value} when is_atom(app) ->
if Keyword.keyword?(value) do
true
else
raise ArgumentError,
"expected #{Path.relative_to_cwd(file)} config for app #{inspect(app)} " <>
"to return keyword list, got: #{inspect(value)}"
end
_ ->
false
end)
else
raise ArgumentError,
"expected #{Path.relative_to_cwd(file)} config to return " <>
"keyword list, got: #{inspect(config)}"
end
config
end
end
|
lib/mix/lib/mix/config.ex
| 0.615897 | 0.499512 |
config.ex
|
starcoder
|
defmodule MatrixReloaded do
@moduledoc """
Documentation for Matrix Reloaded library.
This a library is focusing only on updating, rearranging, getting/dropping
row/column of a matrix. Also contains a few matrix operations like addition,
subtraction or multiplication. Anyway if you need make fast operations on
matrices, please use [Matrex](https://hexdocs.pm/matrex/Matrex.html) library.
Each matrix is represented as a "list of lists" and functions mostly return
[Result](https://hexdocs.pm/result/api-reference.html). It means either tuple
of `{:ok, object}` or `{:error, "msg"}` where `object` is either `matrix` or
`submatrix`, `vector` or `number`.
Numbering of row and column of matrix starts from `0` and goes to `m - 1`
and `n - 1` where `{m, n}` is dimension (size) of matrix. Similarly for
a row or column vector.
In case of need, if you want to save your matrix to a file you can use package [CSVlixir](https://hexdocs.pm/csvlixir/api-reference.html) and then call function
```elixir
def save_csv(matrix, file_name \\\\ "matrix.csv") do
file_name
|> File.open([:write], fn file ->
matrix
|> CSVLixir.write()
|> Enum.each(&IO.write(file, &1))
end)
end
```
For example, you can choose where to save your matrix (in our case it's a `tmp` directory)
```elixir
MatrixReloaded.Matrix.new(3, 1)
|> Result.and_then(&MatrixReloaded.Matrix.save_csv(&1, "/tmp/matrix.csv"))
# {:ok, :ok}
```
## Examples:
iex> alias MatrixReloaded.Matrix
iex> up = Matrix.diag([2, 2, 2], 1)
iex> down = Matrix.diag([2, 2, 2], -1)
iex> diag = Matrix.diag([3, 3, 3, 3])
iex> band_mat = Result.and_then_x([up, down], &Matrix.add(&1, &2))
iex> band_mat = Result.and_then_x([band_mat, diag], &Matrix.add(&1, &2))
{:ok,
[
[3, 2, 0, 0],
[2, 3, 2, 0],
[0, 2, 3, 2],
[0, 0, 2, 3]
]
}
iex> ones = Matrix.new(2, 1)
iex> mat = Result.and_then_x([band_mat, ones], &Matrix.update(&1, &2, {1, 1}))
{:ok,
[
[3, 2, 0, 0],
[2, 1, 1, 0],
[0, 1, 1, 2],
[0, 0, 2, 3]
]
}
iex> mat |> Result.and_then(&Matrix.get_row(&1, 4))
iex>
{:error, "You can not get row from the matrix. The row number 4 is outside of matrix!"}
iex> mat |> Result.and_then(&Matrix.get_row(&1, 3))
iex>
{:ok, [0, 0, 2, 3]}
iex(4)> mat |> Result.and_then(&Matrix.drop_col(&1, 3))
{:ok,
[
[3, 2, 0],
[2, 3, 2],
[0, 2, 3],
[0, 0, 2]
]
}
"""
end
|
lib/matrix_reloaded.ex
| 0.8789 | 0.928279 |
matrix_reloaded.ex
|
starcoder
|
defmodule UnblockMeSolver.Move.Helper do
alias UnblockMeSolver.Move.Helper
@moduledoc false
@doc """
Moves a block right and returns a tuple {blocked_block, updated_problem}
of the block in the way (nil if no block is in the way) and the updated
problem (assuming it was succesful)
## Examples
iex> UnblockMeSolver.Move.Helper.right_with_next([
...> ['C', 'C', nil],
...> ['A', 'A', nil],
...> ['D', 'D', nil],
...>], 'A')
{nil, [
['C', 'C', nil],
[nil, 'A', 'A'],
['D', 'D', nil],
]}
iex> UnblockMeSolver.Move.Helper.right_with_next([
...> ['A', 'A', 'B'],
...> [nil, nil, 'B'],
...> [nil, nil, nil],
...>], 'A')
{'B', [
['A', 'A', 'B'],
[nil, nil, 'B'],
[nil, nil, nil],
]}
iex> UnblockMeSolver.Move.Helper.right_with_next([['A', 'A']], 'A')
{nil, [['A', 'A']]}
"""
def right_with_next(problem, block) do
cells_after_block = problem
|> Enum.find(fn row -> Enum.any?(row, fn x -> x == block end) end)
|> Enum.reverse
|> Enum.take_while(fn x -> x != block end)
|> Enum.reverse
next_block = Enum.at(cells_after_block, 0)
cond do
Enum.count(cells_after_block) == 0 -> {nil, problem}
next_block == nil -> {next_block, Helper.right_in_row(problem, block)}
true -> {next_block, problem}
end
end
@doc """
Moves a block left and returns a tuple {blocked_block, updated_problem}
of the block in the way (nil if no block is in the way) and the updated
problem (assuming it was succesful)
## Examples
iex> UnblockMeSolver.Move.Helper.left_with_next([
...> [nil, 'C', 'C'],
...> [nil, 'A', 'A'],
...> [nil, 'D', 'D'],
...>], 'A')
{nil, [
[nil, 'C', 'C'],
['A', 'A', nil],
[nil, 'D', 'D'],
]}
# iex> UnblockMeSolver.Move.Helper.left_with_next([
# ...> ['A', 'A', nil],
# ...> [nil, nil, nil],
# ...> [nil, nil, nil],
# ...>], 'A')
# {nil, [
# ['A', 'A', nil],
# [nil, nil, nil],
# [nil, nil, nil],
# ]}
"""
def left_with_next(problem, block) do
{next_block, rotated_problem} = problem
|> Helper.rotate_ccw
|> Helper.rotate_ccw
|> Helper.right_with_next(block)
{
next_block,
rotated_problem
|> Helper.rotate_cw
|> Helper.rotate_cw
}
end
@doc """
Moves a block down and returns a tuple {blocked_block, updated_problem}
of the block in the way (nil if no block is in the way) and the updated
problem (assuming it was succesful)
## Examples
iex> UnblockMeSolver.Move.Helper.down_with_next([
...> ['A'],
...> ['A'],
...> [nil],
...>], 'A')
{nil, [
[nil],
['A'],
['A'],
]}
iex> UnblockMeSolver.Move.Helper.down_with_next([
...> ['A', nil],
...> ['A', nil],
...> ['B', 'B'],
...>], 'A')
{'B', [
['A', nil],
['A', nil],
['B', 'B'],
]}
"""
def down_with_next(problem, block) do
{next_block, rotated_problem} = problem
|> Helper.rotate_ccw
|> Helper.right_with_next(block)
{
next_block,
rotated_problem
|> Helper.rotate_cw
}
end
@doc """
Moves a block up and returns a tuple {blocked_block, updated_problem}
of the block in the way (nil if no block is in the way) and the updated
problem (assuming it was succesful)
## Examples
iex> UnblockMeSolver.Move.Helper.up_with_next([
...> [nil],
...> ['A'],
...> ['A'],
...>], 'A')
{nil, [
['A'],
['A'],
[nil],
]}
iex> UnblockMeSolver.Move.Helper.up_with_next([
...> ['B', 'B'],
...> ['A', nil],
...> ['A', nil],
...>], 'A')
{'B', [
['B', 'B'],
['A', nil],
['A', nil],
]}
"""
def up_with_next(problem, block) do
{ next_block, rotated_problem } = problem
|> Helper.rotate_cw
|> Helper.right_with_next(block)
{
next_block,
rotated_problem
|> Helper.rotate_ccw
}
end
@doc """
Finds and moves a block right by 1 in a problem
## Examples
iex> UnblockMeSolver.Move.Helper.right_in_row([
...> ['C', 'C', nil, nil],
...> ['A', 'A', nil, nil],
...> ['D', 'D', nil, nil],
...>], 'A')
[
['C', 'C', nil, nil],
[nil, 'A', 'A', nil],
['D', 'D', nil, nil],
]
"""
def right_in_row(problem, block) do
problem
|> Enum.map(fn row ->
if Enum.any?(row, fn x -> x == block end) do
Helper.right(row, block)
else
row
end
end)
end
@doc """
Moves a block right by 1
## Examples
iex> UnblockMeSolver.Move.Helper.right(['A', 'A', nil], 'A')
[nil, 'A', 'A']
iex> UnblockMeSolver.Move.Helper.right([nil, 'A', 'A', nil], 'A')
[nil, nil, 'A', 'A']
"""
def right(row, block) do
row
|> Helper.stretch_right(block)
|> Helper.shrink_right(block)
end
@doc """
Copies the last block to the adjacent-right cell
## Examples
iex> UnblockMeSolver.Move.Helper.stretch_right(['A', 'A', nil], 'A')
['A', 'A', 'A']
"""
def stretch_right(row, block) do
blocks = Enum.filter(row, fn x -> x == block end)
all_but_the_first_part = Enum.drop_while(row, fn x -> x != block end)
last_part = Enum.drop_while(all_but_the_first_part, fn x -> x == block end)
Enum.take_while(row, fn x -> x != block end)
|> Enum.concat(blocks)
|> Enum.concat([block])
|> Enum.concat(Enum.drop(last_part, 1))
end
@doc """
Removes the first occurance of a block
## Examples
iex> UnblockMeSolver.Move.Helper.shrink_right(['A', 'A', 'A'], 'A')
[nil, 'A', 'A']
"""
def shrink_right(row, block) do
blocks = Enum.filter(row, fn x -> x == block end)
all_but_the_first_part = Enum.drop_while(row, fn x -> x != block end)
last_part = Enum.drop_while(all_but_the_first_part, fn x -> x == block end)
row
|> Enum.take_while(fn x -> x != block end)
|> Enum.concat([nil])
|> Enum.concat(Enum.drop(blocks, 1))
|> Enum.concat(last_part)
end
def rotate_ccw(problem) do
problem
|> Enum.map(&Enum.reverse/1)
|> Transpose.transpose()
end
def rotate_cw(problem) do
problem
|> Transpose.transpose()
|> Enum.map(&Enum.reverse/1)
end
end
|
lib/unblock_me_solver/move/helper.ex
| 0.8119 | 0.518059 |
helper.ex
|
starcoder
|
defmodule Elastic.Document.API do
@moduledoc ~S"""
The Document API provides some helpers for interacting with documents.
The Document API extracts away a lot of the repetition of querying /
indexing of a particular index. Here's an example:
```
defmodule Answer do
@es_type "answer"
@es_index "answer"
use Elastic.Document.API
defstruct id: nil, text: []
end
```
You may also specify the index at query/insertion as the last (optional) argument to
all Document functions. You will receive warnings if @es_index is undefined in
the using module, but you may either ignore these or specify `@es_index "N/A"` or other
unused value if a default index does not make sense for your collection, such as permission based
partitioning, or per-company partitioning in a SaaS application.
## Index
Then you can index a new `Answer` by doing:
```elixir
Answer.index(1, %{text: "This is an answer"})
```
or
```elixir
Answer.index(1, %{text: "This is an answer"}, "explicit_named_index")
```
if not using default index behavior. All examples below may also be
modified the same way if using an explicit index.
## Searching
The whole point of Elastic Search is to search for things, and there's a
function for that:
```elixir
Answer.search(%{
query: %{
match: %{text: "answer"}
},
})
```
The query syntax is exactly like the JSON you've come to know and love from
using Elastic Search, except it's Elixir maps.
This will return a list of `Answer` structs.
```
[
%Answer{id: 1, text: "This is an answer"},
...
]
```
If you want the raw search result, use `raw_search` instead:
```
Answer.raw_search(%{
query: %{
match: %{text: "answer"}
},
})
```
This will return the raw result, without the wrapping of the structs:
```
{:ok, 200,
[
%{"_id" => "1", "_index" => "answer",
"_source" => %{"text" => "This is an answer"}, "_type" => "answer", "_version" => 1,
"found" => true}
}
...
]
}
```
## Counting
Counting works the same as searching, but instead of returning all the hits,
it'll return a number.
```elixir
Answer.count(%{
query: %{
match: %{text: "answer"}
},
})
```
## Get
And you can get that answer with:
```elixir
Answer.get(1)
```
This will return an Answer struct:
```elixir
%Answer{id: 1, text: "This is an answer"}
```
## Raw Get
If you want the raw result, use `raw_get` instead:
```elixir
Answer.raw_get(1)
```
This returns the raw data from Elastic Search, without the wrapping of the struct:
```elixir
{:ok, 200,
%{"_id" => "1", "_index" => "answer",
"_source" => %{"text" => "This is an answer"}, "_type" => "answer", "_version" => 1,
"found" => true}
}
}
```
## Updating
You can update the answer by using `update` (or `index`, since `update` is just an "alias")
```elixir
Answer.update(1, %{text: "This is an answer"})
```
## Deleting
Deleting a document from the index is as easy as:
```elixir
Answer.delete(1)
```
"""
defmacro __using__(_) do
quote do
alias Elastic.Document
alias Elastic.HTTP
alias Elastic.Index
alias Elastic.Query
def index(id, data, es_index \\ @es_index) do
Document.index(es_index, @es_type, id, data)
end
def update(id, data, es_index \\ @es_index) do
Document.update(es_index, @es_type, id, data)
end
def get(id, es_index \\ @es_index) do
case raw_get(id, es_index) do
{:ok, 200, %{"_source" => source, "_id" => id}} ->
into_struct(id, source)
{:error, 404, %{"found" => false}} -> nil
other -> other
end
end
def delete(id, es_index \\ @es_index) do
Document.delete(es_index, @es_type, id)
end
def raw_get(id, es_index \\ @es_index) do
Document.get(es_index, @es_type, id)
end
def search(query, es_index \\ @es_index) do
result = Query.build(es_index, query) |> Index.search
{:ok, 200, %{"hits" => %{"hits" => hits}}} = result
Enum.map(hits, fn (%{"_source" => source, "_id" => id}) ->
into_struct(id, source)
end)
end
def raw_search(query, es_index \\ @es_index) do
search_query(query, es_index) |> Index.search
end
def search_query(query, es_index \\ @es_index) do
Query.build(es_index, query)
end
def raw_count(query, es_index \\ @es_index) do
Query.build(es_index, query) |> Index.count
end
def count(query) do
{:ok, 200, %{"count" => count}} = raw_count(query)
count
end
def index_exists? do
Index.exists?(@es_index)
end
defp into_struct(id, source) do
item = for {key, value} <- source, into: %{},
do: {String.to_atom(key), value}
struct(__MODULE__, Map.put(item, :id, id))
end
end
end
end
|
lib/elastic/document/api.ex
| 0.90703 | 0.944125 |
api.ex
|
starcoder
|
defmodule Earmark do
@moduledoc """
# Earmark—A Pure Elixir Markdown Processor
## Dependency
{ :earmark, "> x.y.z" }
## Usage
### API
html_doc = Earmark.to_html(markdown)
html_doc = Earmark.to_html(markdown, options)
(See the documentation for `to_html` for options)
### Command line
$ mix escript.build
$ ./earmark file.md
## Supports
Standard [Gruber markdown][gruber].
[gruber]: <http://daringfireball.net/projects/markdown/syntax>
## Extensions
### Tables
Github Flavored Markdown tables are supported
State | Abbrev | Capital
----: | :----: | -------
Texas | TX | Austin
Maine | MN | Augusta
Tables may have leading and trailing vertical bars on each line
| State | Abbrev | Capital |
| ----: | :----: | ------- |
| Texas | TX | Austin |
| Maine | MN | Augusta |
Tables need not have headers, in which case all column alignments
default to left.
| Texas | TX | Austin |
| Maine | MN | Augusta |
Currently we assume there are always spaces around interior vertical
bars. It isn't clear what the expectation is.
### Adding HTML attributes
HTML attributes can be added to any block-level element. We use
the Kramdown syntax: add the line `{:` _attrs_ `}` following the block.
_attrs_ can be one or more of:
* `.className`
* `#id`
* name=value, name="value", or name='value'
For example:
# Warning
{: .red}
Do not turn off the engine
if you are at altitude.
{: .boxed #warning spellcheck="true"}
## Limitations
* Nested block-level HTML is correctly handled only if each HTML
tag appears on its own line. So
<div>
<div>
hello
</div>
</div>
will work. However. the following won't
<div><div>
hello
</div></div>
* <NAME>'s tests contain an ambiguity when it comes to
lines that might be the start of a list inside paragraphs.
One test says that
This is the text
* of a paragraph
that I wrote
is a single paragraph. The "*" is not significant. However, another
test has
* A list item
* an another
and expects this to be a nested list. But, in reality, the second could just
be the continuation of a paragraph.
I've chosen always to use the second interpretation—a line that looks like
a list item will always be a list item.
## Security
Please be aware that Markdown is not a secure format. It produces HTML from Markdown
and HTML. It is your job to sanitize and or filter the output of `Markdown.html` if
you cannot trust the input and are to serve the produced HTML on the Web.
## Author
Copyright © 2014 <NAME>, The Pragmatic Programmers
@/+pragdave, <EMAIL>
Licensed under the same terms as Elixir, which is Apache 2.0.
"""
# #### Use as_html! if you do not care to catch errors
# html_doc = Earmark.as_html!(markdown)
# html_doc = Earmark.as_html!(markdown, options)
# (See the documentation for `as_html` for options)
#### Or do pattern matching on the result of as_html
# case Earmark.as_html( markdown )
# {:ok, html} -> html
# {:error, reason} -> ...
alias Earmark.Options
alias Earmark.Context
@doc """
Given a markdown document (as either a list of lines or
a string containing newlines), return an HTML representation.
The options are a `%Earmark.Options{}` structure:
* `renderer`: ModuleName
The module used to render the final document. Defaults to
`Earmark.HtmlRenderer`
* `gfm`: boolean
True by default. Turns on Github Flavored Markdown extensions
* `breaks`: boolean
Only applicable if `gfm` is enabled. Makes all line breaks
significant (so every line in the input is a new line in the
output.
* `smartypants`: boolean
Turns on smartypants processing, so quotes become curly, two
or three hyphens become en and em dashes, and so on. True by
default.
So, to format the document in `original` and disable smartypants,
you'd call
alias Earmark.Options
result = Earmark.to_html(original, %Options{smartypants: false})
"""
@spec to_html(String.t | list(String.t), %Options{}) :: String.t
def to_html(lines, options \\ %Options{})
def to_html(lines, options = %Options{}) do
lines |> parse(options) |> _to_html(options)
end
defp _to_html({blocks, context = %Context{}}, %Options{renderer: renderer, mapper: mapper}=_options) do
renderer.render(blocks, context, mapper)
end
@doc """
Given a markdown document (as either a list of lines or
a string containing newlines), return a parse tree and
the context necessary to render the tree.
The options are a `%Earmark.Options{}` structure. See `to_html`
for more details.
"""
@spec parse(String.t | list(String.t), %Options{}) :: { Earmark.Block.ts, %Context{} }
def parse(lines, options = %Options{mapper: mapper}) when is_list(lines) do
{ blocks, links } = Earmark.Parser.parse(lines, options, false)
context = %Earmark.Context{options: options, links: links }
|> Earmark.Inline.update_context
if options.footnotes do
{ blocks, footnotes } = Earmark.Parser.handle_footnotes(blocks, options, mapper)
context = put_in(context.footnotes, footnotes)
{ blocks, context }
else
{ blocks, context }
end
end
def parse(lines, options) when is_binary(lines) do
lines |> string_to_list |> parse(options)
end
@doc false
@spec string_to_list( String.t ) :: list(String.t)
defp string_to_list(document) do
document |> String.split(~r{\r\n?|\n})
end
@doc false
@spec pmap( list(A), (A -> Earmark.Line.t) ) :: Earmark.Line.ts
def pmap(collection, func) do
collection
|> Enum.map(fn item -> Task.async(fn -> func.(item) end) end)
|> Enum.map(&Task.await/1)
end
end
|
deps/earmark/lib/earmark.ex
| 0.756447 | 0.48249 |
earmark.ex
|
starcoder
|
defmodule Plug.Telemetry.ServerTiming do
@behaviour Plug
@moduledoc """
This plug provide support for [`Server-Timing`][st] header that allows to
display server-side measurements in browser developer tools and access it
programatically via Performance API in JavaScript.
## Usage
Just add it as a plug into your pipeline:
```
plug Plug.Telemetry.ServerTiming
```
And call `install/1` with list of `{event_name, measurement}` in your
application startup, for example for Phoenix and Ecto application:
```
Plug.Telemetry.ServerTiming.install([
{[:phoenix, :endpoint, :stop], :duration},
{[:my_app, :repo, :query], :queue_time},
{[:my_app, :repo, :query], :query_time},
{[:my_app, :repo, :query], :decode_time}
])
```
And then it will be visible in your DevTools.
### Important
You need to place this plug **BEFORE** `Plug.Telemetry` call as otherwise it
will not see it's events (`before_send` callbacks are called in reverse order
of declaration, so this one need to be added before `Plug.Telemetry` one.
## Caveats
This will not respond with events that happened in separate processes, only
events that happened in the Plug process will be recorded.
### WARNING
Current specification of `Server-Timing` do not provide a way to specify event
start time, which mean, that the data displayed in the DevTools isn't trace
report (like the content of the "regular" HTTP timings) but raw dump of the data
displayed as a bars. This can be a little bit confusing, but right now there is
nothing I can do about it.
[st]: https://w3c.github.io/server-timing/#the-server-timing-header-field
"""
import Plug.Conn
@impl true
@doc false
def init(opts), do: opts
@impl true
@doc false
def call(conn, _opts) do
Process.put(__MODULE__, {true, %{}})
register_before_send(conn, &timings/1)
end
@doc """
Define which events should be available within response headers.
"""
@spec install(events) :: :ok when events: map() | [{:telemetry.event_name(), atom()}]
def install(events) do
for {name, metric} <- events do
:ok = :telemetry.attach({__MODULE__, name, metric}, name, &__MODULE__.__handle__/4, metric)
end
:ok
end
@doc false
def __handle__(metric_name, measurements, _metadata, metric) do
with %{^metric => duration} <- measurements,
{true, data} <- Process.get(__MODULE__) do
Process.put(
__MODULE__,
{true, Map.update(data, {metric_name, metric}, duration, &(&1 + duration))}
)
:ok
else
_ -> :ok
end
end
defp timings(conn) do
{_, measurements} = Process.get(__MODULE__, {false, %{}})
if measurements == %{} do
conn
else
put_resp_header(conn, "server-timing", render_measurements(measurements))
end
end
defp render_measurements(measurements) do
millis = System.convert_time_unit(1, :millisecond, :native)
measurements
|> Enum.map(fn {{metric_name, metric}, measurement} ->
name = "#{Enum.join(metric_name, ".")}.#{metric}"
duration = measurement / millis
"#{name};dur=#{duration}"
end)
|> Enum.join(",")
end
end
|
lib/plug_telemetry_server_timing.ex
| 0.914434 | 0.769254 |
plug_telemetry_server_timing.ex
|
starcoder
|
defmodule PowPersistentSession.Plug.Cookie do
@moduledoc """
This plug will handle persistent user sessions with cookies.
The cookie and token will expire after 30 days. The token in the cookie can
only be used once to create a session.
If an assigned private `:pow_session_metadata` key exists in the conn with a
keyword list containing a `:fingerprint` key, that fingerprint value will be
set along with the user clause as the persistent session value as
`{[id: user_id], session_metadata: [fingerprint: fingerprint]}`.
The token used in the client is signed using `Pow.Plug.sign_token/4` to
prevent timing attacks.
## Example
defmodule MyAppWeb.Endpoint do
# ...
plug Pow.Plug.Session, otp_app: :my_app
plug PowPersistentSession.Plug.Cookie
#...
end
## Configuration options
* `:persistent_session_store` - see `PowPersistentSession.Plug.Base`
* `:cache_store_backend` - see `PowPersistentSession.Plug.Base`
* `:persistent_session_cookie_key` - session key name. This defaults to
"persistent_session". If `:otp_app` is used it'll automatically prepend
the key with the `:otp_app` value.
* `:persistent_session_ttl` - used for both backend store and max age for
cookie. See `PowPersistentSession.Plug.Base` for more.
* `:persistent_session_cookie_opts` - keyword list of cookie options, see
`Plug.Conn.put_resp_cookie/4` for options. The default options are
`[max_age: max_age, path: "/"]` where `:max_age` is the value defined in
`:persistent_session_ttl`.
## Custom metadata
You can assign a private `:pow_persistent_session_metadata` key in the conn
with custom metadata as a keyword list. The only current use this has is to
set `:session_metadata` that'll be passed on as `:pow_session_metadata` for
new session generation.
session_metadata =
conn.private
|> Map.get(:pow_session_metadata, [])
|> Keyword.take([:first_seen_at])
Plug.Conn.put_private(conn, :pow_persistent_session_metadata, session_metadata: session_metadata)
This ensure that you are able to keep session metadata consistent between
browser sessions.
When a persistent session token is used, the
`:pow_persistent_session_metadata` assigns key in the conn will be populated
with a `:session_metadata` keyword list so that the session metadata that was
pulled from the persistent session can be carried over to the new persistent
session. `:fingerprint` will always be ignored as to not record the old
fingerprint.
"""
use PowPersistentSession.Plug.Base
alias Plug.Conn
alias Pow.{Config, Operations, Plug, UUID}
@cookie_key "persistent_session"
@doc """
Sets a persistent session cookie with a randomly generated unique token.
The token is set as a key in the persistent session cache with the id fetched
from the struct. Any existing persistent session will be deleted first with
`delete/2`.
If an assigned private `:pow_session_metadata` key exists in the conn with a
keyword list containing a `:fingerprint` value, then that value will be set
in a `:session_metadata` keyword list in the persistent session metadata. The
value will look like:
`{[id: user_id], session_metadata: [fingerprint: fingerprint]}`
The unique token will be prepended by the `:otp_app` configuration value, if
present.
The token will be signed for public consumption with `Pow.Plug.sign_token/4`.
"""
@spec create(Conn.t(), map(), Config.t()) :: Conn.t()
def create(conn, user, config) do
conn
|> delete(config)
|> before_send_create(user, config)
end
defp before_send_create(conn, user, config) do
{store, store_config} = store(config)
token = gen_token(config)
value = persistent_session_value(conn, user, config)
register_before_send(conn, fn conn ->
store.put(store_config, token, value)
client_store_put(conn, token, config)
end)
end
defp persistent_session_value(conn, user, config) do
clauses = user_to_get_by_clauses!(user, config)
metadata =
conn.private
|> Map.get(:pow_persistent_session_metadata, [])
|> maybe_put_fingerprint_in_session_metadata(conn)
{clauses, metadata}
end
defp user_to_get_by_clauses!(user, config) do
case Operations.fetch_primary_key_values(user, config) do
{:ok, clauses} -> clauses
{:error, error} -> raise error
end
end
defp maybe_put_fingerprint_in_session_metadata(metadata, conn) do
conn.private
|> Map.get(:pow_session_metadata, [])
|> Keyword.get(:fingerprint)
|> case do
nil ->
metadata
fingerprint ->
session_metadata =
metadata
|> Keyword.get(:session_metadata, [])
|> Keyword.put_new(:fingerprint, fingerprint)
Keyword.put(metadata, :session_metadata, session_metadata)
end
end
@doc """
Expires the persistent session.
If a persistent session cookie exists the token in the persistent session
cache will be deleted, and cookie deleted with
`Plug.Conn.delete_resp_cookie/3.
"""
@spec delete(Conn.t(), Config.t()) :: Conn.t()
def delete(conn, config), do: before_send_delete(conn, config)
defp before_send_delete(conn, config) do
register_before_send(conn, fn conn ->
case client_store_fetch(conn, config) do
{nil, conn} ->
conn
{token, conn} ->
expire_token_in_store(token, config)
client_store_delete(conn, config)
end
end)
end
defp expire_token_in_store(token, config) do
{store, store_config} = store(config)
store.delete(store_config, token)
end
@doc """
Authenticates a user with the persistent session cookie.
If a persistent session cookie exists, it'll fetch the credentials from the
persistent session cache.
If credentials was fetched successfully, a global lock is set and the token
in the cache is deleted, a new session is created, and `create/2` is called
to create a new persistent session cookie. If setting the lock failed, the
user will fetched will be set for the `conn` with
`Pow.Plug.assign_current_user/3`.
If a `:session_metadata` keyword list is fetched from the persistent session
metadata, all the values will be merged into the private
`:pow_session_metadata` key in the conn.
The persistent session token will be decoded and verified with
`Pow.Plug.verify_token/4`.
"""
@spec authenticate(Conn.t(), Config.t()) :: Conn.t()
def authenticate(conn, config) do
case client_store_fetch(conn, config) do
{nil, conn} -> conn
{token, conn} -> do_authenticate(conn, token, config)
end
end
defp do_authenticate(conn, token, config) do
{store, store_config} = store(config)
{token, store.get(store_config, token)}
|> fetch_user(config)
|> case do
:error ->
conn
{token, nil} ->
expire_token_in_store(token, config)
conn
{token, {user, metadata}} ->
lock_auth_user(conn, token, user, metadata, config)
end
end
defp fetch_user({_token, :not_found}, _config), do: :error
defp fetch_user({token, {clauses, metadata}}, config) do
clauses
|> filter_invalid!()
|> Operations.get_by(config)
|> case do
nil -> {token, nil}
user -> {token, {user, metadata}}
end
end
# TODO: Remove by 1.1.0
defp fetch_user({token, user_id}, config),
do: fetch_user({token, {user_id, []}}, config)
defp filter_invalid!([id: _value] = clauses), do: clauses
defp filter_invalid!(clauses), do: raise "Invalid get_by clauses stored: #{inspect clauses}"
defp lock_auth_user(conn, token, user, metadata, config) do
id = {[__MODULE__, token], self()}
nodes = Node.list() ++ [node()]
case :global.set_lock(id, nodes, 0) do
true ->
conn
|> auth_user(user, metadata, config)
|> register_before_send(fn conn ->
:global.del_lock(id, nodes)
conn
end)
false ->
Plug.assign_current_user(conn, user, config)
end
end
defp auth_user(conn, user, metadata, config) do
conn
|> update_persistent_session_metadata(metadata)
|> update_session_metadata(metadata)
|> create(user, config)
|> Plug.create(user, config)
end
defp update_persistent_session_metadata(conn, metadata) do
case Keyword.get(metadata, :session_metadata) do
nil ->
conn
session_metadata ->
current_metadata =
conn.private
|> Map.get(:pow_persistent_session_metadata, [])
|> Keyword.get(:session_metadata, [])
metadata =
session_metadata
|> Keyword.merge(current_metadata)
|> Keyword.delete(:fingerprint)
Conn.put_private(conn, :pow_persistent_session_metadata, session_metadata: metadata)
end
end
defp update_session_metadata(conn, metadata) do
case Keyword.get(metadata, :session_metadata) do
nil ->
fallback_session_fingerprint(conn, metadata)
session_metadata ->
metadata = Map.get(conn.private, :pow_session_metadata, [])
Conn.put_private(conn, :pow_session_metadata, Keyword.merge(session_metadata, metadata))
end
end
# TODO: Remove by 1.1.0
defp fallback_session_fingerprint(conn, metadata) do
case Keyword.get(metadata, :session_fingerprint) do
nil ->
conn
fingerprint ->
metadata =
conn.private
|> Map.get(:pow_session_metadata, [])
|> Keyword.put(:fingerprint, fingerprint)
Conn.put_private(conn, :pow_session_metadata, metadata)
end
end
defp gen_token(config) do
uuid = UUID.generate()
Plug.prepend_with_namespace(config, uuid)
end
defp client_store_fetch(conn, config) do
conn = Conn.fetch_cookies(conn)
with token when is_binary(token) <- conn.req_cookies[cookie_key(config)],
{:ok, token} <- Plug.verify_token(conn, signing_salt(), token, config) do
{token, conn}
else
_any -> {nil, conn}
end
end
defp signing_salt(), do: Atom.to_string(__MODULE__)
defp client_store_put(conn, token, config) do
signed_token = Plug.sign_token(conn, signing_salt(), token, config)
conn
|> Conn.fetch_cookies()
|> Conn.put_resp_cookie(cookie_key(config), signed_token, cookie_opts(config))
end
defp client_store_delete(conn, config) do
conn
|> Conn.fetch_cookies()
|> Conn.delete_resp_cookie(cookie_key(config), cookie_opts(config))
end
defp cookie_key(config) do
Config.get(config, :persistent_session_cookie_key, default_cookie_key(config))
end
defp default_cookie_key(config) do
Plug.prepend_with_namespace(config, @cookie_key)
end
defp cookie_opts(config) do
config
|> Config.get(:persistent_session_cookie_opts, [])
|> Keyword.put_new(:max_age, max_age(config))
|> Keyword.put_new(:path, "/")
end
defp max_age(config) do
# TODO: Remove by 1.1.0
case Config.get(config, :persistent_session_cookie_max_age) do
nil ->
config
|> PowPersistentSession.Plug.Base.ttl()
|> Integer.floor_div(1000)
max_age ->
IO.warn("use of `:persistent_session_cookie_max_age` config value in #{inspect unquote(__MODULE__)} is deprecated, please use `:persistent_session_ttl`")
max_age
end
end
end
|
lib/extensions/persistent_session/plug/cookie.ex
| 0.837454 | 0.45641 |
cookie.ex
|
starcoder
|
defmodule WeebPotion.Struct.Client do
defstruct token_type: :Wolke, token: nil, application_name: nil, version: nil, environment: "dev", headers: nil
@typedoc """
This type represents the client needed to authenticate requests and hold small pieces of information. The keys are as follows:
* `:token_type` - An atom that can be either `:Wolke` or `:Bearer`. All modern weeb.sh tokens are of the `:Wolke` type.
* `:token` - A binary string equal to your weeb.sh token used to authenticate requests.
* `:application_name` - A binary string equal to the name of the application.
* `:version` - A binary string equal to the version of the application.
* `:environment` - A binary string representing the environment of the application, such as `dev` or `alpha`.
* `:headers` - A key-word list containing the HTTP Headers, used to avoid re-creating constant information.
"""
@type t :: %__MODULE__{}
@doc false
def start(opts), do: WeebPotion.Requester.start
@doc """
Constructs a new client struct using the options passed in via the `opts` key-word list.
## Parameters
- `opts`: A key-word list containing options to construct a client with.
## Examples
```
iex>WeebPotion.Struct.Client.new(token: "<PASSWORD>", name: "test", version: "0.1.0")
%WeebPotion.Struct.Client{
application_name: "test",
auth_header: [
Authorization: "Wolke redacted",
"User-Agent": "test/0.1.0/dev"
],
environment: "dev",
token: "red<PASSWORD>",
token_type: :Wolke,
version: "0.1.0"
}
```
"""
@spec new(list()) :: t
def new(opts) when (is_list(opts)) do
token_type = opts[:token_type] || :Wolke
token = opts[:token]
if !is_binary(token), do: raise "token is nil or not a binary string"
environment = opts[:environment] || "dev"
version = opts[:version]
if !is_binary(version), do: raise "version is nil or not a binary string"
name = opts[:application_name]
if !is_binary(name), do: raise "application_name is nil or not a binary string"
%__MODULE__{token_type: token_type, token: token, application_name: name, version: version, environment: environment,
headers: ["Authorization": "#{token_type} #{token}", "User-Agent": "#{name}/#{version}/#{environment}"]}
end
end
|
lib/weebpotion/struct/client.ex
| 0.918031 | 0.814901 |
client.ex
|
starcoder
|
defmodule GGity.Geom.Point do
@moduledoc false
alias GGity.{Geom, Plot}
@type t() :: %__MODULE__{}
@type plot() :: %Plot{}
@type record() :: map()
@type mapping() :: map()
defstruct data: nil,
mapping: nil,
stat: :identity,
position: :identity,
key_glyph: :point,
alpha: 1,
color: "black",
shape: :circle,
size: 6,
custom_attributes: nil
@spec new(mapping(), keyword()) :: Geom.Point.t()
def new(mapping, options) do
struct(Geom.Point, [{:mapping, mapping} | options])
end
@spec draw(Geom.Point.t(), list(map()), plot()) :: iolist()
def draw(%Geom.Point{} = geom_point, data, plot), do: points(geom_point, data, plot)
defp points(%Geom.Point{} = geom_point, data, %Plot{scales: scales} = plot) do
scale_transforms =
geom_point.mapping
|> Map.keys()
|> Enum.reduce(%{}, fn aesthetic, mapped ->
Map.put(mapped, aesthetic, Map.get(scales[aesthetic], :transform))
end)
transforms =
geom_point
|> Map.take([:alpha, :color, :shape, :size])
|> Enum.reduce(%{}, fn
{:size, fixed_value}, fixed ->
Map.put(fixed, :size, fn _value -> :math.pow(fixed_value, 2) end)
{aesthetic, fixed_value}, fixed ->
Map.put(fixed, aesthetic, fn _value -> fixed_value end)
end)
|> Map.merge(scale_transforms)
Enum.map(data, fn row -> point(row, transforms, geom_point, plot) end)
end
defp point(row, transforms, geom_point, plot) do
mapping = geom_point.mapping
custom_attributes = GGity.Layer.custom_attributes(geom_point, plot, row)
transformed_values = [
transforms.x.(row[mapping.x]),
transforms.y.(row[mapping.y]),
transforms.alpha.(row[mapping[:alpha]]),
transforms.color.(row[mapping[:color]]),
transforms.shape.(row[mapping[:shape]]),
transforms.size.(row[mapping[:size]])
]
labelled_values = Enum.zip([:x, :y, :fill_opacity, :color, :shape, :size], transformed_values)
GGity.Shapes.draw(
labelled_values[:shape],
{labelled_values[:x] + plot.area_padding,
(plot.width - labelled_values[:y]) / plot.aspect_ratio + plot.area_padding},
labelled_values[:size],
Keyword.take(labelled_values, [:color, :fill_opacity]) ++ custom_attributes
)
end
end
|
lib/ggity/geom/point.ex
| 0.891261 | 0.61594 |
point.ex
|
starcoder
|
defmodule Xpeg do
@moduledoc """
XPeg is a pure Elixir pattern matching library. It provides macros to compile
patterns and grammars (PEGs) to Elixir function which will parse a string and
collect selected parts of the input. PEGs are not unlike regular expressions,
but offer more power and flexibility, and have less ambiguities. (More about
PEGs on [Wikipedia](https://en.wikipedia.org/wiki/Parsing_expression_grammar))
Some use cases where XPeg is useful are configuration or data file parsers,
robust protocol implementations, input validation, lexing of programming
languages or domain specific languages.
## Examples
p = Xpeg.peg :dict do
:dict <- :pair * star("," * :pair) * !1
:pair <- :word * "=" * :number * fn [a,b|cs] -> [{b,a}|cs] end
:word <- cap(+{'a'..'z'})
:number <- cap(+{'0'..'9'}) * fn [v|cs] -> [String.to_integer(v) | cs] end
end
Xpeg.match(p, "grass=4,horse=1,star=2")
"""
@doc false
defp collect(stack, acc, caps) do
case {stack, acc} do
{[{:open, s, si} | stack], _} ->
collect(stack, [{:open, s, si} | acc], caps)
{[{:close, _sc, sic, type} | stack], [{:open, so, sio} | acc]} ->
len = sic - sio
l = Enum.take(so, len)
# Convert capture to requested type
cap = case type do
:str -> to_string(l)
:int -> :erlang.list_to_integer(l)
:float -> try do
:erlang.list_to_float(l)
rescue
_ -> elem(Float.parse(to_string(l)), 0)
end
end
collect(stack, acc, [cap | caps])
{_, acc} ->
{acc, caps}
end
end
@doc false
def collect_captures(cap_stack, captures_prev) do
{cap_stack, captures} =
cap_stack
|> Enum.reverse()
|> collect([], [])
{cap_stack, captures ++ captures_prev}
end
@doc false
def dump_inst(inst) do
case inst do
{:code, code} -> [:code, Macro.to_string(code)]
inst -> Tuple.to_list(inst)
end
|> Enum.map(&inspect/1) |> Enum.join(" ")
end
@doc false
def trace(ip, cmd, s) do
ip = to_string(ip) |> String.pad_leading(4, " ")
s = Enum.take(s, 20) |> inspect |> String.pad_trailing(22, " ")
IO.puts(" #{ip} | #{s} | #{cmd} ")
end
@doc false
def unalias(name) do
case name do
{:__aliases__, _, [name]} -> name
_ -> name
end
end
@doc false
defp make(start, rules, options) do
ast = %{start: unalias(start), rules: rules}
|> Xpeg.Linker.link_grammar(options)
|> Xpeg.Codegen.emit(options)
id = String.to_atom("#{inspect start}-#{inspect(make_ref())}")
{id, ast}
end
@doc """
Define a PEG grammar which uses `start` as the initial rule
"""
defmacro peg(start, _rules = [{:do, v}]) do
{id, ast} = make(start, Xpeg.Parser.parse(v), [])
quote do
Module.create(unquote(id), unquote(ast), Macro.Env.location(__ENV__))
end
end
@doc """
Define a PEG grammar which uses `start` as the initial rule, allowing
for additional options:
- `:trace` - if `true`, a trace is dumped during parser execution
- `:dump_ir` - if `true`, the IR (intermediate representation) of the
generated parser is dumped at compile time
- `:dump_code` - if `true`, the generated Elixir code for the parser
is dumped at compile time
- `:dump_graph` - if `true`, generate a graphical 'railroad' diagram
of the grammar at compile time
- `:userdata` - if `true`, elixir functions that are embedded in the grammar
take an additional accumulator argument and should return a tuple
`{captures | acc}` - the resulting accumulator value is available as
the `userdata` field in the return value of the `match()1 function
"""
defmacro peg(start, options, [{:do, v}]) do
if options[:dump_graph] do Xpeg.Railroad.draw(v) end
{id, ast} = make(start, Xpeg.Parser.parse(v), options)
quote do
Module.create(unquote(id), unquote(ast), Macro.Env.location(__ENV__))
end
end
@doc """
Define a grammar with one anonymous rule.
"""
defmacro patt(v) do
{id, ast} = make(:anon, %{anon: Xpeg.Parser.parse(v)}, [])
quote do
Module.create(unquote(id), unquote(ast), Macro.Env.location(__ENV__))
end
end
@doc """
Execute a grammar against a subject string. The result is a map with the following fields:
- `captures`: The captures made by the grammar
- `result`: either `:ok` or `:error`, depending on a successful match of the subject
- `time`: Time taken to match the subject (seconds)
- `match_len`: The total number of UTF-8 characters matched
- `userdata`: Returned userdata
"""
def match(module, s, userdata \\ nil) do
ctx = userdata
module = elem(module, 1)
s = if is_binary(s) do to_charlist(s) else s end
{t1, _} = :erlang.statistics(:runtime)
{ctx, rest, si, result, cap_stack, captures} = module.parse(0, s, 0, ctx, [], [], [], [])
{_cap_stack, captures} = collect_captures(cap_stack, captures)
{t2, _} = :erlang.statistics(:runtime)
%{
captures: captures,
result: result,
rest: rest,
time: (t2-t1) / 1000,
match_len: si,
userdata: ctx,
}
end
end
# set ft=elixir
|
lib/xpeg.ex
| 0.872904 | 0.662278 |
xpeg.ex
|
starcoder
|
defmodule Re.PriceSuggestions do
@moduledoc """
Module for suggesting prices according to stored factors
"""
NimbleCSV.define(PriceSuggestionsParser, separator: ",", escape: "\"")
alias Re.{
Listing,
PriceSuggestions.Factors,
PriceSuggestions.Request,
Repo,
User
}
import Ecto.Query
alias Ecto.Changeset
def suggest_price(listing) do
listing
|> preload_if_struct()
|> get_factor_by_address()
|> do_suggest_price(listing)
end
defp get_factor_by_address(%{address: %{state: state, city: city, street: street}}),
do: Repo.get_by(Factors, state: state, city: city, street: street)
defp do_suggest_price(nil, _), do: {:error, :street_not_covered}
defp do_suggest_price(factors, listing) do
{:ok,
factors.intercept + (listing.area || 0) * factors.area +
(listing.bathrooms || 0) * factors.bathrooms + (listing.rooms || 0) * factors.rooms +
(listing.garage_spots || 0) * factors.garage_spots}
end
defp preload_if_struct(%Listing{} = listing), do: Repo.preload(listing, :address)
defp preload_if_struct(listing), do: listing
def save_factors(file) do
file
|> PriceSuggestionsParser.parse_string()
|> Stream.map(&csv_to_map/1)
|> Enum.each(&persist/1)
end
defp csv_to_map([state, city, street, intercept, area, bathrooms, rooms, garage_spots, r2]) do
%{
state: :binary.copy(state),
city: :binary.copy(city),
street: :binary.copy(street),
intercept: intercept |> Float.parse() |> elem(0),
area: area |> Float.parse() |> elem(0),
bathrooms: bathrooms |> Float.parse() |> elem(0),
rooms: rooms |> Float.parse() |> elem(0),
garage_spots: garage_spots |> Float.parse() |> elem(0),
r2: r2 |> Float.parse() |> elem(0)
}
end
defp persist(%{state: state, city: city, street: street} = line) do
case Repo.get_by(Factors, state: state, city: city, street: street) do
nil ->
%Factors{}
|> Factors.changeset(line)
|> Repo.insert()
factor ->
factor
|> Factors.changeset(line)
|> Repo.update()
end
end
def create_request(params, %{id: address_id}, user) do
%Request{}
|> Changeset.change(address_id: address_id)
|> attach_user(user)
|> Request.changeset(params)
|> Repo.insert()
end
defp attach_user(changeset, %User{id: id}),
do: Changeset.change(changeset, user_id: id)
defp attach_user(changeset, _), do: changeset
def generate_price_comparison do
to_write =
Listing
|> where([l], l.status == "active")
|> preload(:address)
|> Repo.all()
|> Enum.map(&compare_prices/1)
|> Enum.filter(fn
{:error, _} -> false
_other -> true
end)
|> Enum.map(&encode/1)
|> Enum.join("\n")
File.write("export.txt", to_write)
end
defp compare_prices(listing) do
case suggest_price(listing) do
{:error, :street_not_covered} ->
{:error, :street_not_covered}
suggested_price ->
%{listing_id: listing.id, actual_price: listing.price, suggested_price: suggested_price}
end
end
defp encode(%{
listing_id: listing_id,
actual_price: actual_price,
suggested_price: suggested_price
}) do
"ID: #{listing_id}, Preço atual: #{actual_price}, Preço Sugerido: #{suggested_price}"
end
end
|
apps/re/lib/price_suggestions/price_suggestions.ex
| 0.638385 | 0.40204 |
price_suggestions.ex
|
starcoder
|
defmodule Kiq.Worker do
@moduledoc """
Defines a behavior and macro to guide the creation of worker modules.
Worker modules do the work of processing a job. At a minimum they must define
a `perform` function, which will be called with the arguments that were
enqueued with the `Kiq.Job`.
## Defining Workers
Define a worker to process jobs in the `events` queue:
defmodule MyApp.Workers.Business do
use Kiq.Worker, queue: "events", retry: 10, dead: false
@impl Kiq.Worker
def perform(args) do
IO.inspect(args)
end
end
The `perform/1` function will always receive a list of arguments. In this
example the worker will simply inspect any arguments that are provided.
## Enqueuing Jobs
All workers implement a `new/1` function that converts a list of arguments
into a `Kiq.Job` that is suitable for enqueuing:
["doing", "business"]
|> MyApp.Workers.Business.new()
|> MyApp.Kiq.enqueue()
"""
alias Kiq.Job
@type args :: list(any())
@type opts :: [
queue: binary(),
dead: boolean(),
expires_in: pos_integer(),
retry: boolean(),
unique_for: pos_integer(),
unique_until: binary()
]
@doc """
Build a job for this worker using all default options.
Any additional arguments that are provided will be merged into the job.
"""
@callback new(args :: args()) :: Job.t()
@doc """
The `perform/1` function is called with the enqueued arguments.
The return value is not important.
"""
@callback perform(args :: args()) :: any()
@allowed_opts [:queue, :dead, :expires_in, :retry, :unique_for, :unique_until]
defmacro __using__(opts) do
opts =
opts
|> Keyword.take(@allowed_opts)
|> Keyword.put_new(:queue, "default")
quote do
alias Kiq.Worker
@behaviour Worker
@impl Worker
def new(args) when is_list(args) do
Worker.new(__MODULE__, args, unquote(opts))
end
@impl Worker
def perform(args) when is_list(args) do
:ok
end
defoverridable Worker
end
end
@doc false
@spec new(module(), map() | Keyword.t(), opts()) :: Job.t()
def new(module, args, opts) do
opts
|> Keyword.put(:args, args)
|> Keyword.put(:class, module)
|> Job.new()
end
end
|
lib/kiq/worker.ex
| 0.832781 | 0.520009 |
worker.ex
|
starcoder
|
defmodule XDR.Union do
@moduledoc """
This module manages the `Discriminated Union` type based on the RFC4506 XDR Standard.
"""
@behaviour XDR.Declaration
alias XDR.UnionError
defstruct [:discriminant, :arms, :value]
@type discriminant :: XDR.Enum.t() | XDR.Int.t() | XDR.UInt.t() | struct()
@type arms :: keyword() | map()
@typedoc """
`XDR.Union` structure type specification.
"""
@type t :: %XDR.Union{discriminant: discriminant(), arms: arms(), value: any()}
@doc """
Create a new `XDR.Union` structure with the `discriminant`, `arms` and `value` passed.
"""
@spec new(discriminant :: discriminant(), arms :: arms(), value :: any()) :: t()
def new(discriminant, arms, value \\ nil),
do: %XDR.Union{discriminant: discriminant, arms: arms, value: value}
@doc """
Encode a `XDR.Union` structure into a XDR format.
"""
@impl true
def encode_xdr(%{discriminant: %{identifier: identifier}}) when not is_atom(identifier),
do: {:error, :not_atom}
def encode_xdr(%{
discriminant: %{__struct__: xdr_type, identifier: identifier} = discriminant,
arms: arms,
value: value
}) do
encoded_discriminant = xdr_type.encode_xdr!(discriminant)
encoded_arm = identifier |> get_arm(arms) |> encode_arm(value)
{:ok, encoded_discriminant <> encoded_arm}
end
def encode_xdr(%{
discriminant: %{__struct__: xdr_type, datum: datum} = discriminant,
arms: arms,
value: value
}) do
encoded_discriminant = xdr_type.encode_xdr!(discriminant)
encoded_arm = datum |> get_arm(arms) |> encode_arm(value)
{:ok, encoded_discriminant <> encoded_arm}
end
@doc """
Encode a `XDR.Union` structure into a XDR format.
If the `union` is not valid, an exception is raised.
"""
@impl true
def encode_xdr!(union) do
case encode_xdr(union) do
{:ok, binary} -> binary
{:error, reason} -> raise(UnionError, reason)
end
end
@doc """
Decode the Discriminated Union in XDR format to a `XDR.Union` structure.
"""
@impl true
def decode_xdr(bytes, union) do
bytes
|> decode_union_discriminant(union)
|> decode_union_arm()
end
@doc """
Decode the Discriminated Union in XDR format to a `XDR.Union` structure.
If the binaries are not valid, an exception is raised.
"""
@impl true
def decode_xdr!(bytes, union) do
case decode_xdr(bytes, union) do
{:ok, result} -> result
{:error, reason} -> raise(UnionError, reason)
end
end
@spec decode_union_discriminant(bytes :: binary(), struct :: map()) ::
{struct(), binary()} | {:error, :not_binary | :not_list}
defp decode_union_discriminant(bytes, _union) when not is_binary(bytes),
do: {:error, :not_binary}
defp decode_union_discriminant(_bytes, %{discriminant: %{declarations: declarations}})
when not is_list(declarations),
do: {:error, :not_list}
defp decode_union_discriminant(
bytes,
%{discriminant: %{__struct__: xdr_type, declarations: declarations}} = union
) do
{discriminant, rest} = xdr_type.decode_xdr!(bytes, %{declarations: declarations})
{%{union | discriminant: discriminant}, rest}
end
defp decode_union_discriminant(bytes, %{discriminant: %{__struct__: xdr_type}} = union) do
case xdr_type.decode_xdr!(bytes) do
{%{datum: datum}, rest} ->
{%{union | discriminant: datum}, rest}
{discriminant, rest} ->
{%{union | discriminant: discriminant}, rest}
end
end
@spec encode_arm(arm :: struct() | atom() | module(), value :: any()) :: binary()
defp encode_arm(xdr_type, %{__struct__: xdr_type} = value) do
xdr_type.encode_xdr!(value)
end
defp encode_arm(arm, value) when is_atom(arm) do
value |> arm.new() |> arm.encode_xdr!()
end
defp encode_arm(%{__struct__: xdr_type} = arm, nil) do
xdr_type.encode_xdr!(arm)
end
@spec decode_union_arm({:error, atom()}) :: {:error, atom()}
defp decode_union_arm({:error, reason}), do: {:error, reason}
@spec decode_union_arm({struct(), binary()}) :: {:ok, {term(), binary()}} | {:error, atom()}
defp decode_union_arm(
{%{discriminant: %{identifier: identifier} = discriminant, arms: arms}, rest}
) do
identifier
|> get_arm(arms)
|> get_arm_module()
|> decode_arm(discriminant, rest)
end
defp decode_union_arm({%{discriminant: discriminant, arms: arms}, rest}) do
discriminant
|> get_arm(arms)
|> get_arm_module()
|> decode_arm(discriminant, rest)
end
@spec decode_arm(
xdr_type :: struct() | atom() | non_neg_integer(),
discriminant :: non_neg_integer() | XDR.Enum.t(),
rest :: binary()
) :: {:ok, {term(), binary()}} | {:error, atom()}
defp decode_arm(nil, _discriminant, _rest), do: {:error, :invalid_arm}
defp decode_arm(xdr_type, discriminant, rest) do
{decoded_arm, rest} = xdr_type.decode_xdr!(rest)
{:ok, {{discriminant, decoded_arm}, rest}}
end
@spec get_arm_module(arm :: struct() | module()) :: module()
defp get_arm_module(%{__struct__: xdr_type}), do: xdr_type
defp get_arm_module(arm) when is_atom(arm), do: arm
@spec get_arm(identifier :: atom() | number(), arms :: arms()) :: struct() | module() | nil
defp get_arm(identifier, arms) do
case arms[identifier] do
nil -> arms[:default]
arm -> arm
end
end
end
|
lib/xdr/union.ex
| 0.929256 | 0.434041 |
union.ex
|
starcoder
|
defmodule OpentelemetryOban do
@moduledoc """
OpentelemetryOban uses [telemetry](https://hexdocs.pm/telemetry/) handlers to create
`OpenTelemetry` spans for Oban events. The Oban telemetry events that are used are documented [here](https://hexdocs.pm/oban/Oban.Telemetry.html).
## Usage
Add in your application start function a call to `setup/0`:
def start(_type, _args) do
# this register a tracer for your application
OpenTelemetry.register_application_tracer(:my_app)
# this configures the oban tracing
OpentelemetryOban.setup()
children = [
...
]
...
end
"""
require OpenTelemetry.Tracer
alias OpenTelemetry.Span
alias OpentelemetryOban.Reason
@tracer_id :opentelemetry_oban
@event_names Enum.flat_map([:job, :producer, :plugin], fn event_kind ->
Enum.map([:start, :stop, :exception], fn event_name ->
[:oban, event_kind, event_name]
end)
end) ++
Enum.flat_map(
[
:init,
:refresh,
:put_meta,
:fetch_jobs,
:complete_job,
:discard_job,
:error_job,
:snooze_job,
:cancel_job
],
fn event_kind ->
Enum.map([:start, :stop, :exception], fn event_name ->
[:oban, :engine, event_kind, event_name]
end)
end
) ++
Enum.map([:start, :stop, :exception], fn event_name ->
[:oban, :notifier, :notify, event_name]
end) ++
[[:oban, :circuit, :trip], [:oban, :circuit, :open]]
@type setup_opts :: [duration() | sampler()]
@type duration :: {:duration, {atom(), System.time_unit()}}
@type sampler :: {:sampler, :otel_sampler.t() | sampler_fun() | nil}
@type sampler_fun :: (telemetry_data() -> :otel_sampler.t() | nil)
@type telemetry_data :: %{event: [atom()], measurements: map(), meta: map()}
@doc """
Initializes and configures the telemetry handlers.
"""
@spec setup(setup_opts()) :: :ok | {:error, :already_exists}
def setup(opts \\ []) do
{:ok, otel_phx_vsn} = :application.get_key(@tracer_id, :vsn)
OpenTelemetry.register_tracer(@tracer_id, otel_phx_vsn)
config =
Enum.reduce(
opts,
%{duration: %{key: :"oban.duration_ms", timeunit: :millisecond}, sampler: nil},
fn
{:duration, {key, timeunit}}, acc when is_atom(key) ->
%{acc | duration: %{key: key, timeunit: timeunit}}
{:sampler, sampler}, acc ->
%{acc | sampler: sampler}
end
)
:telemetry.attach_many(__MODULE__, @event_names, &process_event/4, config)
end
@doc false
def process_event([:oban, event, event_kind, :start] = meta_event, measurements, meta, config) do
span_name = span_name({event, event_kind}, meta)
attributes = start_attributes({event, event_kind}, measurements, meta, config)
start_opts =
%{kind: :internal}
|> maybe_put_sampler(config.sampler, %{
event: meta_event,
measurements: measurements,
meta: meta
})
OpentelemetryTelemetry.start_telemetry_span(@tracer_id, span_name, meta, start_opts)
|> Span.set_attributes(attributes)
end
def process_event([:oban, event_kind, :start] = event, measurements, meta, config) do
span_name = span_name(event_kind, meta)
attributes = start_attributes(event_kind, measurements, meta, config)
start_opts =
%{kind: :internal}
|> maybe_put_sampler(config.sampler, %{event: event, measurements: measurements, meta: meta})
OpentelemetryTelemetry.start_telemetry_span(@tracer_id, span_name, meta, start_opts)
|> Span.set_attributes(attributes)
end
def process_event([:oban, event_kind, :stop], measurements, meta, config) do
ctx = OpentelemetryTelemetry.set_current_telemetry_span(@tracer_id, meta)
attributes = stop_attributes(event_kind, measurements, meta, config)
Span.set_attributes(ctx, attributes)
OpentelemetryTelemetry.end_telemetry_span(@tracer_id, meta)
end
def process_event([:oban, event, event_kind, :stop], measurements, meta, config) do
ctx = OpentelemetryTelemetry.set_current_telemetry_span(@tracer_id, meta)
attributes = stop_attributes({event, event_kind}, measurements, meta, config)
Span.set_attributes(ctx, attributes)
OpentelemetryTelemetry.end_telemetry_span(@tracer_id, meta)
end
def process_event([:oban, event_kind, :exception], measurements, meta, config) do
ctx = OpentelemetryTelemetry.set_current_telemetry_span(@tracer_id, meta)
attributes = exception_attributes(event_kind, measurements, meta, config)
Span.set_attributes(ctx, attributes)
register_exception_event(event_kind, ctx, meta)
OpentelemetryTelemetry.end_telemetry_span(@tracer_id, meta)
end
def process_event([:oban, event, event_kind, :exception], measurements, meta, config) do
ctx = OpentelemetryTelemetry.set_current_telemetry_span(@tracer_id, meta)
attributes = exception_attributes({event, event_kind}, measurements, meta, config)
Span.set_attributes(ctx, attributes)
register_exception_event({event, event_kind}, ctx, meta)
OpentelemetryTelemetry.end_telemetry_span(@tracer_id, meta)
end
def process_event([:oban, :circuit = event_kind, :trip], measurements, meta, config) do
ctx =
OpentelemetryTelemetry.start_telemetry_span(@tracer_id, "Oban circuit tripped", meta, %{
kind: :internal
})
attributes =
exception_attributes(event_kind, measurements, meta, config) ++
[{"oban.event", "circuit_tripped"}]
Span.set_attributes(ctx, attributes)
register_exception_event(event_kind, ctx, meta)
OpentelemetryTelemetry.end_telemetry_span(@tracer_id, meta)
end
defp span_name(:job, %{job: %{worker: worker}}) do
"Oban job #{module_to_string(worker)}"
end
defp span_name(:producer, %{queue: queue}) do
"Oban producer #{queue}"
end
defp span_name(:plugin, %{plugin: plugin}) do
"Oban plugin #{module_to_string(plugin)}"
end
defp span_name({:engine, event_name}, _meta) do
"Oban engine #{event_name}"
end
defp span_name({:notifier, event_name}, _meta) do
"Oban notifier #{event_name}"
end
defp start_attributes(
:job,
_measurements,
%{
job: %{
worker: worker,
queue: queue,
max_attempts: max_attempts,
attempt: attempt,
scheduled_at: scheduled_at,
attempted_at: attempted_at
}
},
_config
) do
[
{"oban.worker", module_to_string(worker)},
{"oban.event", "job"},
{"oban.queue", queue},
{"oban.max_attempts", max_attempts},
{"oban.attempt", attempt},
{"oban.scheduled_at", to_iso8601(scheduled_at)},
{"oban.attempted_at", to_iso8601(attempted_at)}
]
end
defp start_attributes(:producer, _measurements, %{queue: queue}, _config) do
[
{"oban.event", "producer"},
{"oban.queue", queue}
]
end
defp start_attributes(:plugin, _measurements, %{plugin: plugin}, _config) do
[
{"oban.event", "plugin"},
{"oban.plugin", module_to_string(plugin)}
]
end
defp start_attributes({:engine, event}, _measurements, %{engine: engine}, _config) do
[
{"oban.event", "engine"},
{"oban.engine", module_to_string(engine)},
{"oban.engine_operation", event}
]
end
defp start_attributes({:notifier, event}, _measurements, _meta, _config) do
[
{"oban.event", "notifier"},
{"oban.subevent", event}
]
end
defp start_attributes(_event_kind, _measurements, _meta, _config), do: []
defp stop_attributes(
:job,
%{
duration: native_duration,
queue_time: native_queue_time,
cancelled_at: cancelled_at,
completed_at: completed_at,
discarded_at: discarded_at
},
%{state: state},
config
) do
[
duration_attribute(native_duration, config),
duration_attribute(native_queue_time, config),
state_attribute(state),
{"oban.cancelled_at", to_iso8601(cancelled_at)},
{"oban.completed_at", to_iso8601(completed_at)},
{"oban.discarded_at", to_iso8601(discarded_at)}
]
|> Enum.reject(&is_nil/1)
end
defp stop_attributes(:producer, _measurements, %{dispatched_count: dispatched_count}, _config) do
[{"oban.dispatched_count", dispatched_count}]
end
defp stop_attributes(_event_kind, _measurements, _meta, _config), do: []
defp exception_attributes(:job, %{duration: native_duration}, %{state: state}, config) do
[
duration_attribute(native_duration, config),
state_attribute(state)
]
end
defp exception_attributes(_, %{duration: native_duration}, _meta, config) do
[duration_attribute(native_duration, config)]
end
defp exception_attributes(_event_kind, _measurements, _meta, _config), do: []
defp register_exception_event(_event_kind, ctx, %{
kind: kind,
reason: reason,
stacktrace: stacktrace
}) do
{[reason: reason], attrs} = Reason.normalize(reason) |> Keyword.split([:reason])
exception = Exception.normalize(kind, reason, stacktrace)
message = Exception.message(exception)
Span.record_exception(ctx, exception, stacktrace, attrs)
Span.set_status(ctx, OpenTelemetry.status(:error, message))
end
defp state_attribute(state) do
{"oban.state", state}
end
defp duration_attribute(native_duration, %{duration: %{key: key, timeunit: timeunit}}) do
duration = System.convert_time_unit(native_duration, :native, timeunit)
{key, duration}
end
defp to_iso8601(nil), do: nil
defp to_iso8601(%DateTime{} = datetime), do: DateTime.to_iso8601(datetime)
defp module_to_string(module_name) when is_binary(module_name), do: module_name
defp module_to_string(module) when is_atom(module) do
case to_string(module) do
"Elixir." <> name -> name
erlang_module -> ":#{erlang_module}"
end
end
defp maybe_put_sampler(opts, nil, _telemetry_data), do: opts
defp maybe_put_sampler(opts, sampler_fun, telemetry_data) when is_function(sampler_fun) do
sampler = sampler_fun.(telemetry_data)
maybe_put_sampler(opts, sampler, telemetry_data)
end
defp maybe_put_sampler(opts, sampler, _telemetry_data) do
Map.put(opts, :sampler, sampler)
end
end
|
lib/opentelemetry_oban.ex
| 0.810066 | 0.466967 |
opentelemetry_oban.ex
|
starcoder
|
defmodule Google.Protobuf.Type do
@moduledoc false
alias Pbuf.Decoder
import Bitwise, only: [bsr: 2, band: 2]
@derive Jason.Encoder
defstruct [
name: "",
fields: [],
oneofs: [],
options: [],
source_context: nil,
syntax: 0
]
@type t :: %__MODULE__{
name: String.t,
fields: [Google.Protobuf.Field.t],
oneofs: [String.t],
options: [Google.Protobuf.Option.t],
source_context: Google.Protobuf.SourceContext.t,
syntax: Google.Protobuf.Syntax.t
}
@spec new(Enum.t) :: t
def new(data) do
struct(__MODULE__, data)
end
@spec encode_to_iodata!(t | map) :: iodata
def encode_to_iodata!(data) do
alias Elixir.Pbuf.Encoder
[
Encoder.field(:string, data.name, <<10>>),
Encoder.repeated_field(:struct, data.fields, <<18>>),
Encoder.repeated_field(:string, data.oneofs, <<26>>),
Encoder.repeated_field(:struct, data.options, <<34>>),
Encoder.field(:struct, data.source_context, <<42>>),
Encoder.enum_field(Google.Protobuf.Syntax, data.syntax, <<48>>),
]
end
@spec encode!(t | map) :: binary
def encode!(data) do
:erlang.iolist_to_binary(encode_to_iodata!(data))
end
@spec decode!(binary) :: t
def decode!(data) do
Decoder.decode!(__MODULE__, data)
end
@spec decode(binary) :: {:ok, t} | :error
def decode(data) do
Decoder.decode(__MODULE__, data)
end
def decode(acc, <<10, data::binary>>) do
Decoder.field(:string, :name, acc, data)
end
def decode(acc, <<18, data::binary>>) do
Decoder.struct_field(Google.Protobuf.Field, :fields, acc, data)
end
def decode(acc, <<26, data::binary>>) do
Decoder.field(:string, :oneofs, acc, data)
end
def decode(acc, <<34, data::binary>>) do
Decoder.struct_field(Google.Protobuf.Option, :options, acc, data)
end
def decode(acc, <<42, data::binary>>) do
Decoder.struct_field(Google.Protobuf.SourceContext, :source_context, acc, data)
end
def decode(acc, <<48, data::binary>>) do
Decoder.enum_field(Google.Protobuf.Syntax, :syntax, acc, data)
end
# failed to decode, either this is an unknown tag (which we can skip), or
# it is a wrong type (which is an error)
def decode(acc, data) do
{prefix, data} = Decoder.varint(data)
tag = bsr(prefix, 3)
type = band(prefix, 7)
case tag in [1,2,3,4,5,6] do
false -> {acc, Decoder.skip(type, data)}
true ->
err = %Decoder.Error{
tag: tag,
module: __MODULE__,
message: "#{__MODULE__} tag #{tag} has an incorrect type of #{type}"
}
{:error, err}
end
end
def __finalize_decode__(args) do
struct = Elixir.Enum.reduce(args, %__MODULE__{}, fn
{:options, v}, acc -> Map.update(acc, :options, [v], fn e -> [v | e] end)
{:oneofs, v}, acc -> Map.update(acc, :oneofs, [v], fn e -> [v | e] end)
{:fields, v}, acc -> Map.update(acc, :fields, [v], fn e -> [v | e] end)
{k, v}, acc -> Map.put(acc, k, v)
end)
struct = Map.put(struct, :options, Elixir.Enum.reverse(struct.options))
struct = Map.put(struct, :oneofs, Elixir.Enum.reverse(struct.oneofs))
struct = Map.put(struct, :fields, Elixir.Enum.reverse(struct.fields))
struct
end
end
defmodule Google.Protobuf.Field do
@moduledoc false
alias Pbuf.Decoder
import Bitwise, only: [bsr: 2, band: 2]
@derive Jason.Encoder
defstruct [
kind: 0,
cardinality: 0,
number: 0,
name: "",
type_url: "",
oneof_index: 0,
packed: false,
options: [],
json_name: "",
default_value: ""
]
@type t :: %__MODULE__{
kind: Google.Protobuf.Field.Kind.t,
cardinality: Google.Protobuf.Field.Cardinality.t,
number: integer,
name: String.t,
type_url: String.t,
oneof_index: integer,
packed: boolean,
options: [Google.Protobuf.Option.t],
json_name: String.t,
default_value: String.t
}
defmodule Cardinality do
@moduledoc false
@type t :: :CARDINALITY_UNKNOWN | 0 | :CARDINALITY_OPTIONAL | 1 | :CARDINALITY_REQUIRED | 2 | :CARDINALITY_REPEATED | 3
@spec to_int(t | non_neg_integer) :: integer
def to_int(:CARDINALITY_OPTIONAL), do: 1
def to_int(1), do: 1
def to_int(:CARDINALITY_REPEATED), do: 3
def to_int(3), do: 3
def to_int(:CARDINALITY_REQUIRED), do: 2
def to_int(2), do: 2
def to_int(:CARDINALITY_UNKNOWN), do: 0
def to_int(0), do: 0
def to_int(invalid) do
raise Pbuf.Encoder.Error,
type: __MODULE__,
value: invalid,
tag: nil,
message: "#{inspect(invalid)} is not a valid enum value for #{__MODULE__}"
end
@spec from_int(integer) :: t
def from_int(1), do: :CARDINALITY_OPTIONAL
def from_int(3), do: :CARDINALITY_REPEATED
def from_int(2), do: :CARDINALITY_REQUIRED
def from_int(0), do: :CARDINALITY_UNKNOWN
def from_int(_unknown), do: :invalid
end
defmodule Kind do
@moduledoc false
@type t :: :TYPE_UNKNOWN | 0 | :TYPE_DOUBLE | 1 | :TYPE_FLOAT | 2 | :TYPE_INT64 | 3 | :TYPE_UINT64 | 4 | :TYPE_INT32 | 5 | :TYPE_FIXED64 | 6 | :TYPE_FIXED32 | 7 | :TYPE_BOOL | 8 | :TYPE_STRING | 9 | :TYPE_GROUP | 10 | :TYPE_MESSAGE | 11 | :TYPE_BYTES | 12 | :TYPE_UINT32 | 13 | :TYPE_ENUM | 14 | :TYPE_SFIXED32 | 15 | :TYPE_SFIXED64 | 16 | :TYPE_SINT32 | 17 | :TYPE_SINT64 | 18
@spec to_int(t | non_neg_integer) :: integer
def to_int(:TYPE_BOOL), do: 8
def to_int(8), do: 8
def to_int(:TYPE_BYTES), do: 12
def to_int(12), do: 12
def to_int(:TYPE_DOUBLE), do: 1
def to_int(1), do: 1
def to_int(:TYPE_ENUM), do: 14
def to_int(14), do: 14
def to_int(:TYPE_FIXED32), do: 7
def to_int(7), do: 7
def to_int(:TYPE_FIXED64), do: 6
def to_int(6), do: 6
def to_int(:TYPE_FLOAT), do: 2
def to_int(2), do: 2
def to_int(:TYPE_GROUP), do: 10
def to_int(10), do: 10
def to_int(:TYPE_INT32), do: 5
def to_int(5), do: 5
def to_int(:TYPE_INT64), do: 3
def to_int(3), do: 3
def to_int(:TYPE_MESSAGE), do: 11
def to_int(11), do: 11
def to_int(:TYPE_SFIXED32), do: 15
def to_int(15), do: 15
def to_int(:TYPE_SFIXED64), do: 16
def to_int(16), do: 16
def to_int(:TYPE_SINT32), do: 17
def to_int(17), do: 17
def to_int(:TYPE_SINT64), do: 18
def to_int(18), do: 18
def to_int(:TYPE_STRING), do: 9
def to_int(9), do: 9
def to_int(:TYPE_UINT32), do: 13
def to_int(13), do: 13
def to_int(:TYPE_UINT64), do: 4
def to_int(4), do: 4
def to_int(:TYPE_UNKNOWN), do: 0
def to_int(0), do: 0
def to_int(invalid) do
raise Pbuf.Encoder.Error,
type: __MODULE__,
value: invalid,
tag: nil,
message: "#{inspect(invalid)} is not a valid enum value for #{__MODULE__}"
end
@spec from_int(integer) :: t
def from_int(8), do: :TYPE_BOOL
def from_int(12), do: :TYPE_BYTES
def from_int(1), do: :TYPE_DOUBLE
def from_int(14), do: :TYPE_ENUM
def from_int(7), do: :TYPE_FIXED32
def from_int(6), do: :TYPE_FIXED64
def from_int(2), do: :TYPE_FLOAT
def from_int(10), do: :TYPE_GROUP
def from_int(5), do: :TYPE_INT32
def from_int(3), do: :TYPE_INT64
def from_int(11), do: :TYPE_MESSAGE
def from_int(15), do: :TYPE_SFIXED32
def from_int(16), do: :TYPE_SFIXED64
def from_int(17), do: :TYPE_SINT32
def from_int(18), do: :TYPE_SINT64
def from_int(9), do: :TYPE_STRING
def from_int(13), do: :TYPE_UINT32
def from_int(4), do: :TYPE_UINT64
def from_int(0), do: :TYPE_UNKNOWN
def from_int(_unknown), do: :invalid
end
@spec new(Enum.t) :: t
def new(data) do
struct(__MODULE__, data)
end
@spec encode_to_iodata!(t | map) :: iodata
def encode_to_iodata!(data) do
alias Elixir.Pbuf.Encoder
[
Encoder.enum_field(Google.Protobuf.Field.Kind, data.kind, <<8>>),
Encoder.enum_field(Google.Protobuf.Field.Cardinality, data.cardinality, <<16>>),
Encoder.field(:int32, data.number, <<24>>),
Encoder.field(:string, data.name, <<34>>),
Encoder.field(:string, data.type_url, <<50>>),
Encoder.field(:int32, data.oneof_index, <<56>>),
Encoder.field(:bool, data.packed, <<64>>),
Encoder.repeated_field(:struct, data.options, <<74>>),
Encoder.field(:string, data.json_name, <<82>>),
Encoder.field(:string, data.default_value, <<90>>),
]
end
@spec encode!(t | map) :: binary
def encode!(data) do
:erlang.iolist_to_binary(encode_to_iodata!(data))
end
@spec decode!(binary) :: t
def decode!(data) do
Decoder.decode!(__MODULE__, data)
end
@spec decode(binary) :: {:ok, t} | :error
def decode(data) do
Decoder.decode(__MODULE__, data)
end
def decode(acc, <<8, data::binary>>) do
Decoder.enum_field(Google.Protobuf.Field.Kind, :kind, acc, data)
end
def decode(acc, <<16, data::binary>>) do
Decoder.enum_field(Google.Protobuf.Field.Cardinality, :cardinality, acc, data)
end
def decode(acc, <<24, data::binary>>) do
Decoder.field(:int32, :number, acc, data)
end
def decode(acc, <<34, data::binary>>) do
Decoder.field(:string, :name, acc, data)
end
def decode(acc, <<50, data::binary>>) do
Decoder.field(:string, :type_url, acc, data)
end
def decode(acc, <<56, data::binary>>) do
Decoder.field(:int32, :oneof_index, acc, data)
end
def decode(acc, <<64, data::binary>>) do
Decoder.field(:bool, :packed, acc, data)
end
def decode(acc, <<74, data::binary>>) do
Decoder.struct_field(Google.Protobuf.Option, :options, acc, data)
end
def decode(acc, <<82, data::binary>>) do
Decoder.field(:string, :json_name, acc, data)
end
def decode(acc, <<90, data::binary>>) do
Decoder.field(:string, :default_value, acc, data)
end
# failed to decode, either this is an unknown tag (which we can skip), or
# it is a wrong type (which is an error)
def decode(acc, data) do
{prefix, data} = Decoder.varint(data)
tag = bsr(prefix, 3)
type = band(prefix, 7)
case tag in [1,2,3,4,6,7,8,9,10,11] do
false -> {acc, Decoder.skip(type, data)}
true ->
err = %Decoder.Error{
tag: tag,
module: __MODULE__,
message: "#{__MODULE__} tag #{tag} has an incorrect type of #{type}"
}
{:error, err}
end
end
def __finalize_decode__(args) do
struct = Elixir.Enum.reduce(args, %__MODULE__{}, fn
{:options, v}, acc -> Map.update(acc, :options, [v], fn e -> [v | e] end)
{k, v}, acc -> Map.put(acc, k, v)
end)
struct = Map.put(struct, :options, Elixir.Enum.reverse(struct.options))
struct
end
end
defmodule Google.Protobuf.Enum do
@moduledoc false
alias Pbuf.Decoder
import Bitwise, only: [bsr: 2, band: 2]
@derive Jason.Encoder
defstruct [
name: "",
enumvalue: [],
options: [],
source_context: nil,
syntax: 0
]
@type t :: %__MODULE__{
name: String.t,
enumvalue: [Google.Protobuf.EnumValue.t],
options: [Google.Protobuf.Option.t],
source_context: Google.Protobuf.SourceContext.t,
syntax: Google.Protobuf.Syntax.t
}
@spec new(Enum.t) :: t
def new(data) do
struct(__MODULE__, data)
end
@spec encode_to_iodata!(t | map) :: iodata
def encode_to_iodata!(data) do
alias Elixir.Pbuf.Encoder
[
Encoder.field(:string, data.name, <<10>>),
Encoder.repeated_field(:struct, data.enumvalue, <<18>>),
Encoder.repeated_field(:struct, data.options, <<26>>),
Encoder.field(:struct, data.source_context, <<34>>),
Encoder.enum_field(Google.Protobuf.Syntax, data.syntax, <<40>>),
]
end
@spec encode!(t | map) :: binary
def encode!(data) do
:erlang.iolist_to_binary(encode_to_iodata!(data))
end
@spec decode!(binary) :: t
def decode!(data) do
Decoder.decode!(__MODULE__, data)
end
@spec decode(binary) :: {:ok, t} | :error
def decode(data) do
Decoder.decode(__MODULE__, data)
end
def decode(acc, <<10, data::binary>>) do
Decoder.field(:string, :name, acc, data)
end
def decode(acc, <<18, data::binary>>) do
Decoder.struct_field(Google.Protobuf.EnumValue, :enumvalue, acc, data)
end
def decode(acc, <<26, data::binary>>) do
Decoder.struct_field(Google.Protobuf.Option, :options, acc, data)
end
def decode(acc, <<34, data::binary>>) do
Decoder.struct_field(Google.Protobuf.SourceContext, :source_context, acc, data)
end
def decode(acc, <<40, data::binary>>) do
Decoder.enum_field(Google.Protobuf.Syntax, :syntax, acc, data)
end
# failed to decode, either this is an unknown tag (which we can skip), or
# it is a wrong type (which is an error)
def decode(acc, data) do
{prefix, data} = Decoder.varint(data)
tag = bsr(prefix, 3)
type = band(prefix, 7)
case tag in [1,2,3,4,5] do
false -> {acc, Decoder.skip(type, data)}
true ->
err = %Decoder.Error{
tag: tag,
module: __MODULE__,
message: "#{__MODULE__} tag #{tag} has an incorrect type of #{type}"
}
{:error, err}
end
end
def __finalize_decode__(args) do
struct = Elixir.Enum.reduce(args, %__MODULE__{}, fn
{:options, v}, acc -> Map.update(acc, :options, [v], fn e -> [v | e] end)
{:enumvalue, v}, acc -> Map.update(acc, :enumvalue, [v], fn e -> [v | e] end)
{k, v}, acc -> Map.put(acc, k, v)
end)
struct = Map.put(struct, :options, Elixir.Enum.reverse(struct.options))
struct = Map.put(struct, :enumvalue, Elixir.Enum.reverse(struct.enumvalue))
struct
end
end
defmodule Google.Protobuf.EnumValue do
@moduledoc false
alias Pbuf.Decoder
import Bitwise, only: [bsr: 2, band: 2]
@derive Jason.Encoder
defstruct [
name: "",
number: 0,
options: []
]
@type t :: %__MODULE__{
name: String.t,
number: integer,
options: [Google.Protobuf.Option.t]
}
@spec new(Enum.t) :: t
def new(data) do
struct(__MODULE__, data)
end
@spec encode_to_iodata!(t | map) :: iodata
def encode_to_iodata!(data) do
alias Elixir.Pbuf.Encoder
[
Encoder.field(:string, data.name, <<10>>),
Encoder.field(:int32, data.number, <<16>>),
Encoder.repeated_field(:struct, data.options, <<26>>),
]
end
@spec encode!(t | map) :: binary
def encode!(data) do
:erlang.iolist_to_binary(encode_to_iodata!(data))
end
@spec decode!(binary) :: t
def decode!(data) do
Decoder.decode!(__MODULE__, data)
end
@spec decode(binary) :: {:ok, t} | :error
def decode(data) do
Decoder.decode(__MODULE__, data)
end
def decode(acc, <<10, data::binary>>) do
Decoder.field(:string, :name, acc, data)
end
def decode(acc, <<16, data::binary>>) do
Decoder.field(:int32, :number, acc, data)
end
def decode(acc, <<26, data::binary>>) do
Decoder.struct_field(Google.Protobuf.Option, :options, acc, data)
end
# failed to decode, either this is an unknown tag (which we can skip), or
# it is a wrong type (which is an error)
def decode(acc, data) do
{prefix, data} = Decoder.varint(data)
tag = bsr(prefix, 3)
type = band(prefix, 7)
case tag in [1,2,3] do
false -> {acc, Decoder.skip(type, data)}
true ->
err = %Decoder.Error{
tag: tag,
module: __MODULE__,
message: "#{__MODULE__} tag #{tag} has an incorrect type of #{type}"
}
{:error, err}
end
end
def __finalize_decode__(args) do
struct = Elixir.Enum.reduce(args, %__MODULE__{}, fn
{:options, v}, acc -> Map.update(acc, :options, [v], fn e -> [v | e] end)
{k, v}, acc -> Map.put(acc, k, v)
end)
struct = Map.put(struct, :options, Elixir.Enum.reverse(struct.options))
struct
end
end
defmodule Google.Protobuf.Option do
@moduledoc false
alias Pbuf.Decoder
import Bitwise, only: [bsr: 2, band: 2]
@derive Jason.Encoder
defstruct [
name: "",
value: nil
]
@type t :: %__MODULE__{
name: String.t,
value: Google.Protobuf.Any.t
}
@spec new(Enum.t) :: t
def new(data) do
struct(__MODULE__, data)
end
@spec encode_to_iodata!(t | map) :: iodata
def encode_to_iodata!(data) do
alias Elixir.Pbuf.Encoder
[
Encoder.field(:string, data.name, <<10>>),
Encoder.field(:struct, data.value, <<18>>),
]
end
@spec encode!(t | map) :: binary
def encode!(data) do
:erlang.iolist_to_binary(encode_to_iodata!(data))
end
@spec decode!(binary) :: t
def decode!(data) do
Decoder.decode!(__MODULE__, data)
end
@spec decode(binary) :: {:ok, t} | :error
def decode(data) do
Decoder.decode(__MODULE__, data)
end
def decode(acc, <<10, data::binary>>) do
Decoder.field(:string, :name, acc, data)
end
def decode(acc, <<18, data::binary>>) do
Decoder.struct_field(Google.Protobuf.Any, :value, acc, data)
end
# failed to decode, either this is an unknown tag (which we can skip), or
# it is a wrong type (which is an error)
def decode(acc, data) do
{prefix, data} = Decoder.varint(data)
tag = bsr(prefix, 3)
type = band(prefix, 7)
case tag in [1,2] do
false -> {acc, Decoder.skip(type, data)}
true ->
err = %Decoder.Error{
tag: tag,
module: __MODULE__,
message: "#{__MODULE__} tag #{tag} has an incorrect type of #{type}"
}
{:error, err}
end
end
def __finalize_decode__(args) do
struct = Elixir.Enum.reduce(args, %__MODULE__{}, fn
{k, v}, acc -> Map.put(acc, k, v)
end)
struct
end
end
defmodule Google.Protobuf.Syntax do
@moduledoc false
@type t :: :SYNTAX_PROTO2 | 0 | :SYNTAX_PROTO3 | 1
@spec to_int(t | non_neg_integer) :: integer
def to_int(:SYNTAX_PROTO2), do: 0
def to_int(0), do: 0
def to_int(:SYNTAX_PROTO3), do: 1
def to_int(1), do: 1
def to_int(invalid) do
raise Pbuf.Encoder.Error,
type: __MODULE__,
value: invalid,
tag: nil,
message: "#{inspect(invalid)} is not a valid enum value for #{__MODULE__}"
end
@spec from_int(integer) :: t
def from_int(0), do: :SYNTAX_PROTO2
def from_int(1), do: :SYNTAX_PROTO3
def from_int(_unknown), do: :invalid
end
|
lib/protoc/google/protobuf/type.pb.ex
| 0.729038 | 0.596991 |
type.pb.ex
|
starcoder
|
defmodule BlueBird.Controller do
@moduledoc """
Defines macros used to add documentation to api routes.
## Usage
Use `api/3` in your controllers. Optionally add the `apigroup/1` or
`apigroup/2` macro to your controllers.
defmodule MyApp.Web.UserController do
use BlueBird.Controller
alias MyApp.Accounts
apigroup "Customers", "These are the routes that we'll talk about."
api :GET, "users" do
title "List users"
description "Lists all active users"
end
def index(conn, _params) do
users = Accounts.list_users()
render(conn, "index.html", users: users)
end
end
Instead of adding `use BlueBird.Controller` to every controller module, you
can also add it to the `web.ex` controller function to make it available
in every controller.
def controller do
quote do
...
use BlueBird.Controller
...
end
end
"""
alias BlueBird.{Parameter, Route}
defmacro __using__(_) do
quote do
import BlueBird.Controller, only: [api: 3, apigroup: 1, apigroup: 2, parameters: 1, notes: 1,
warnings: 1]
end
end
@doc """
Describes a route.
```
api <method> <url> do ... end
```
- `method`: HTTP method (GET, POST, PUT etc.)
- `url`: Route as defined in the Phoenix router
- `title` (optional): Title for the action
- `description` (optional): Description of the route
- `note` (optional): Note
- `warning` (optional): Warning
- `parameter` (optional): Used for path and query parameters. It takes the
name as defined in the route and the type. The third parameter is an
optional keyword list with additional options. See `BlueBird.Parameter`
for descriptions of the available options.
## Example
api :GET, "user/:id/posts/:slug" do
title "Show post"
description "Show post by user ID and post slug"
note "You should really know this."
warning "Please don't ever do this."
parameter :id, :integer
parameter :slug, :string, [
description: "This is the post slug.",
example: "whatever"
]
"""
defmacro api(method, path, do: block) do
method_str = method_to_string(method)
metadata = block
|> extract_metadata
|> extract_shared_params
title = extract_option(metadata, :title)
description = extract_option(metadata, :description)
note = extract_option(metadata, :note)
warning = extract_option(metadata, :warning)
parameters = extract_parameters(metadata)
quote do
@spec api_doc(String.t, String.t) :: Route.t
def api_doc(unquote(method_str), unquote(path)) do
%Route{
title: unquote(title),
description: unquote(description),
note: unquote(note),
method: unquote(method_str),
warning: unquote(warning),
path: unquote(path),
parameters: unquote(Macro.escape(parameters))
}
end
end
end
defp extract_shared_params(metadata) do
metadata
|> Enum.map(&eval_shared_param/1)
end
defp eval_shared_param({:shared_item, value}) do
value
|> Code.eval_quoted
|> elem(0)
|> List.first()
end
defp eval_shared_param(value), do: value
@doc """
Defines the name and an optional description for a resource group.
BlueBird defines groups by the controller. By default, the group name
is taken from the controller name. If you want to specify a different name,
you can use this macro. You can also add a group description as a second
parameter.
## Example
apigroup "resource group name"
or
apigroup "resource group name", "description"
"""
defmacro apigroup(name, description \\ "") do
name = to_string(name)
description = to_string(description)
quote do
@spec api_group :: %{name: String.t, description: String.t}
def api_group do
%{
name: unquote(name),
description: unquote(description)
}
end
end
end
@doc """
Defines a list of parameters that can be used via the `shared_item` macro.
Each item in the list is the exact same as what would normally be passed directly to the
`parameter` key inside of an `api` macro.
## Example
parameters [
[:some_param, :string, [description: "Neato param"]]
]
"""
defmacro parameters(params) do
for param <- params do
[name | spread] = param
quote do
def parameter(unquote(name), arg_name), do: {:parameter, [arg_name | unquote(spread)]}
def parameter(unquote(name)), do: {:parameter, unquote(param)}
end
end
end
@doc """
Defines a list of notes that can be used via the `shared_item` macro.
## Example
notes [
[:authenticated, "Authentication required"]
]
"""
defmacro notes(notes) do
for note <- notes do
[name | spread] = note
quote do
def note(unquote(name)), do: {:note, unquote(spread)}
end
end
end
@doc """
Defines a list of warnings that can be used via the `shared_item` macro.
## Example
notes [
[:destructive, "This will permanently destroy everything"]
]
"""
defmacro warnings(warnings) do
for warning <- warnings do
[name | spread] = warning
quote do
def warning(unquote(name)), do: {:warning, unquote(spread)}
end
end
end
@spec method_to_string(String.t | atom) :: String.t
defp method_to_string(method) when is_binary(method) or is_atom(method) do
method
|> to_string
|> String.upcase
end
@spec extract_metadata(
{:__block__, any, {String.t, any, [any]}} | {String.t, any, [any]} | nil) ::
[{atom, any}]
defp extract_metadata({:__block__, _, data}) do
Enum.map(data, fn({name, _line, params}) ->
{name, params}
end)
end
defp extract_metadata({key, _, data}), do: [{key, data}]
defp extract_metadata(nil), do: []
@spec extract_option([{atom, any}], atom) :: nil | any
defp extract_option(metadata, key) do
values = metadata |> Keyword.get(key)
cond do
is_nil(values) -> nil
length(values) == 1 -> List.first(values)
true -> raise ArgumentError,
"Expected single value for #{key}, got #{length(values)}"
end
end
@spec extract_parameters([{atom, any}]) :: [Parameter.t]
defp extract_parameters(metadata) do
metadata
|> Keyword.get_values(:parameter)
|> Enum.reduce([], fn(param, list) -> [param_to_map(param) | list] end)
|> Enum.reverse
end
@spec param_to_map([any]) :: Parameter.t
defp param_to_map([name, type, options]) when is_list(options) do
Map.merge(
%Parameter{
name: name |> to_string |> wrap_in_backticks,
type: to_string(type)
},
options |> wrap_param_options |> Enum.into(%{})
)
end
defp param_to_map([name, type]) do
%Parameter{
name: name |> to_string |> wrap_in_backticks,
type: to_string(type)
}
end
defp param_to_map([_, _, _]) do
raise ArgumentError, "The parameter macro expects a keyword list as " <>
"third argument."
end
defp param_to_map(_) do
raise ArgumentError,
"""
Wrong number of arguments for parameter option.
Expected either two or three arguments: The name, the type
and an optional keyword list. Correct usage:
parameter :name, :type
or
parameter :name, :type, [description: "description",
optional: true]
"""
end
defp wrap_param_options(options) do
options
|> Enum.map(&_wrap_param_options/1)
end
defp _wrap_param_options({:members, values}) when is_list(values), do: {:members, values |> Enum.map(&wrap_in_backticks/1)}
defp _wrap_param_options({key, value}) when key in [:default, :example], do: {key, wrap_in_backticks(value)}
defp _wrap_param_options(v), do: v
defp wrap_in_backticks(v) when is_list(v), do: v |> Enum.map(&wrap_in_backticks/1)
defp wrap_in_backticks(v), do: "`#{v}`"
end
|
lib/blue_bird/controller.ex
| 0.861086 | 0.5919 |
controller.ex
|
starcoder
|
defmodule AWS.Organizations do
@moduledoc """
AWS Organizations API Reference
AWS Organizations is a web service that enables you to consolidate your
multiple AWS accounts into an *organization* and centrally manage your
accounts and their resources.
This guide provides descriptions of the Organizations API. For more
information about using this service, see the [AWS Organizations User
Guide](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_introduction.html).
**API Version**
This version of the Organizations API Reference documents the Organizations
API version 2016-11-28.
<note> As an alternative to using the API directly, you can use one of the
AWS SDKs, which consist of libraries and sample code for various
programming languages and platforms (Java, Ruby, .NET, iOS, Android, and
more). The SDKs provide a convenient way to create programmatic access to
AWS Organizations. For example, the SDKs take care of cryptographically
signing requests, managing errors, and retrying requests automatically. For
more information about the AWS SDKs, including how to download and install
them, see [Tools for Amazon Web Services](http://aws.amazon.com/tools/).
</note> We recommend that you use the AWS SDKs to make programmatic API
calls to Organizations. However, you also can use the Organizations Query
API to make direct calls to the Organizations web service. To learn more
about the Organizations Query API, see [Making Query
Requests](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_query-requests.html)
in the *AWS Organizations User Guide*. Organizations supports GET and POST
requests for all actions. That is, the API does not require you to use GET
for some actions and POST for others. However, GET requests are subject to
the limitation size of a URL. Therefore, for operations that require larger
sizes, use a POST request.
**Signing Requests**
When you send HTTP requests to AWS, you must sign the requests so that AWS
can identify who sent them. You sign requests with your AWS access key,
which consists of an access key ID and a secret access key. We strongly
recommend that you do not create an access key for your root account.
Anyone who has the access key for your root account has unrestricted access
to all the resources in your account. Instead, create an access key for an
IAM user account that has administrative privileges. As another option, use
AWS Security Token Service to generate temporary security credentials, and
use those credentials to sign requests.
To sign requests, we recommend that you use [Signature Version
4](http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).
If you have an existing application that uses Signature Version 2, you do
not have to update it to use Signature Version 4. However, some operations
now require Signature Version 4. The documentation for operations that
require version 4 indicate this requirement.
When you use the AWS Command Line Interface (AWS CLI) or one of the AWS
SDKs to make requests to AWS, these tools automatically sign the requests
for you with the access key that you specify when you configure the tools.
In this release, each organization can have only one root. In a future
release, a single organization will support multiple roots.
**Support and Feedback for AWS Organizations**
We welcome your feedback. Send your comments to
[<EMAIL>](mailto:<EMAIL>)
or post your feedback and questions in the [AWS Organizations support
forum](http://forums.aws.amazon.com/forum.jspa?forumID=219). For more
information about the AWS support forums, see [Forums
Help](http://forums.aws.amazon.com/help.jspa).
**Endpoint to Call When Using the CLI or the AWS API**
For the current release of Organizations, you must specify the `us-east-1`
region for all AWS API and CLI calls. You can do this in the CLI by using
these parameters and commands:
<ul> <li> Use the following parameter with each command to specify both the
endpoint and its region:
`--endpoint-url https://organizations.us-east-1.amazonaws.com`
</li> <li> Use the default endpoint, but configure your default region with
this command:
`aws configure set default.region us-east-1`
</li> <li> Use the following parameter with each command to specify the
endpoint:
`--region us-east-1`
</li> </ul> For the various SDKs used to call the APIs, see the
documentation for the SDK of interest to learn how to direct the requests
to a specific endpoint. For more information, see [Regions and
Endpoints](http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region)
in the *AWS General Reference*.
**How examples are presented**
The JSON returned by the AWS Organizations service as response to your
requests is returned as a single long string without line breaks or
formatting whitespace. Both line breaks and whitespace are included in the
examples in this guide to improve readability. When example input
parameters also would result in long strings that would extend beyond the
screen, we insert line breaks to enhance readability. You should always
submit the input as a single JSON text string.
**Recording API Requests**
AWS Organizations supports AWS CloudTrail, a service that records AWS API
calls for your AWS account and delivers log files to an Amazon S3 bucket.
By using information collected by AWS CloudTrail, you can determine which
requests were successfully made to Organizations, who made the request,
when it was made, and so on. For more about AWS Organizations and its
support for AWS CloudTrail, see [Logging AWS Organizations Events with AWS
CloudTrail](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_cloudtrail-integration.html)
in the *AWS Organizations User Guide*. To learn more about CloudTrail,
including how to turn it on and find your log files, see the [AWS
CloudTrail User
Guide](http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
"""
@doc """
Sends a response to the originator of a handshake agreeing to the action
proposed by the handshake request.
This operation can be called only by the following principals when they
also have the relevant IAM permissions:
<ul> <li> **Invitation to join** or **Approve all features request**
handshakes: only a principal from the member account.
The user who calls the API for an invitation to join must have the
`organizations:AcceptHandshake` permission. If you enabled all features in
the organization, then the user must also have the
`iam:CreateServiceLinkedRole` permission so that Organizations can create
the required service-linked role named *OrgsServiceLinkedRoleName*. For
more information, see [AWS Organizations and Service-Linked
Roles](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_integration_services.html#orgs_integration_service-linked-roles)
in the *AWS Organizations User Guide*.
</li> <li> **Enable all features final confirmation** handshake: only a
principal from the master account.
For more information about invitations, see [Inviting an AWS Account to
Join Your
Organization](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_invites.html)
in the *AWS Organizations User Guide*. For more information about requests
to enable all features in the organization, see [Enabling All Features in
Your
Organization](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_org_support-all-features.html)
in the *AWS Organizations User Guide*.
</li> </ul> After you accept a handshake, it continues to appear in the
results of relevant APIs for only 30 days. After that it is deleted.
"""
def accept_handshake(client, input, options \\ []) do
request(client, "AcceptHandshake", input, options)
end
@doc """
Attaches a policy to a root, an organizational unit, or an individual
account. How the policy affects accounts depends on the type of policy:
<ul> <li> **Service control policy (SCP)** - An SCP specifies what
permissions can be delegated to users in affected member accounts. The
scope of influence for a policy depends on what you attach the policy to:
<ul> <li> If you attach an SCP to a root, it affects all accounts in the
organization.
</li> <li> If you attach an SCP to an OU, it affects all accounts in that
OU and in any child OUs.
</li> <li> If you attach the policy directly to an account, then it affects
only that account.
</li> </ul> SCPs essentially are permission "filters". When you attach one
SCP to a higher level root or OU, and you also attach a different SCP to a
child OU or to an account, the child policy can further restrict only the
permissions that pass through the parent filter and are available to the
child. An SCP that is attached to a child cannot grant a permission that is
not already granted by the parent. For example, imagine that the parent SCP
allows permissions A, B, C, D, and E. The child SCP allows C, D, E, F, and
G. The result is that the accounts affected by the child SCP are allowed to
use only C, D, and E. They cannot use A or B because they were filtered out
by the child OU. They also cannot use F and G because they were filtered
out by the parent OU. They cannot be granted back by the child SCP; child
SCPs can only filter the permissions they receive from the parent SCP.
AWS Organizations attaches a default SCP named `"FullAWSAccess` to every
root, OU, and account. This default SCP allows all services and actions,
enabling any new child OU or account to inherit the permissions of the
parent root or OU. If you detach the default policy, you must replace it
with a policy that specifies the permissions that you want to allow in that
OU or account.
For more information about how Organizations policies permissions work, see
[Using Service Control
Policies](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_scp.html)
in the *AWS Organizations User Guide*.
</li> </ul> This operation can be called only from the organization's
master account.
"""
def attach_policy(client, input, options \\ []) do
request(client, "AttachPolicy", input, options)
end
@doc """
Cancels a handshake. Canceling a handshake sets the handshake state to
`CANCELED`.
This operation can be called only from the account that originated the
handshake. The recipient of the handshake can't cancel it, but can use
`DeclineHandshake` instead. After a handshake is canceled, the recipient
can no longer respond to that handshake.
After you cancel a handshake, it continues to appear in the results of
relevant APIs for only 30 days. After that it is deleted.
"""
def cancel_handshake(client, input, options \\ []) do
request(client, "CancelHandshake", input, options)
end
@doc """
Creates an AWS account that is automatically a member of the organization
whose credentials made the request. This is an asynchronous request that
AWS performs in the background. If you want to check the status of the
request later, you need the `OperationId` response element from this
operation to provide as a parameter to the `DescribeCreateAccountStatus`
operation.
The user who calls the API for an invitation to join must have the
`organizations:CreateAccount` permission. If you enabled all features in
the organization, then the user must also have the
`iam:CreateServiceLinkedRole` permission so that Organizations can create
the required service-linked role named *OrgsServiceLinkedRoleName*. For
more information, see [AWS Organizations and Service-Linked
Roles](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_integration_services.html#orgs_integration_service-linked-roles)
in the *AWS Organizations User Guide*.
The user in the master account who calls this API must also have the
`iam:CreateRole` permission because AWS Organizations preconfigures the new
member account with a role (named `OrganizationAccountAccessRole` by
default) that grants users in the master account administrator permissions
in the new member account. Principals in the master account can assume the
role. AWS Organizations clones the company name and address information for
the new account from the organization's master account.
This operation can be called only from the organization's master account.
For more information about creating accounts, see [Creating an AWS Account
in Your
Organization](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html)
in the *AWS Organizations User Guide*.
<important> When you create an account in an organization using the AWS
Organizations console, API, or CLI commands, the information required for
the account to operate as a standalone account, such as a payment method
and signing the End User Licence Agreement (EULA) is *not* automatically
collected. If you must remove an account from your organization later, you
can do so only after you provide the missing information. Follow the steps
at [ To leave an organization when all required account information has not
yet been
provided](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info)
in the *AWS Organizations User Guide*.
</important> <note> When you create a member account with this operation,
you can choose whether to create the account with the **IAM User and Role
Access to Billing Information** switch enabled. If you enable it, IAM users
and roles that have appropriate permissions can view billing information
for the account. If you disable this, then only the account root user can
access billing information. For information about how to disable this for
an account, see [Granting Access to Your Billing Information and
Tools](http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/grantaccess.html).
</note> This operation can be called only from the organization's master
account.
<important> If you get an exception that indicates that you exceeded your
account limits for the organization or that you can"t add an account
because your organization is still initializing, please contact [ AWS
Customer Support](https://console.aws.amazon.com/support/home#/).
</important>
"""
def create_account(client, input, options \\ []) do
request(client, "CreateAccount", input, options)
end
@doc """
Creates an AWS organization. The account whose user is calling the
CreateOrganization operation automatically becomes the [master
account](http://docs.aws.amazon.com/IAM/latest/UserGuide/orgs_getting-started_concepts.html#account)
of the new organization.
This operation must be called using credentials from the account that is to
become the new organization's master account. The principal must also have
the relevant IAM permissions.
By default (or if you set the `FeatureSet` parameter to `ALL`), the new
organization is created with all features enabled and service control
policies automatically enabled in the root. If you instead choose to create
the organization supporting only the consolidated billing features by
setting the `FeatureSet` parameter to `CONSOLIDATED_BILLING"`, then no
policy types are enabled by default and you cannot use organization
policies.
"""
def create_organization(client, input, options \\ []) do
request(client, "CreateOrganization", input, options)
end
@doc """
Creates an organizational unit (OU) within a root or parent OU. An OU is a
container for accounts that enables you to organize your accounts to apply
policies according to your business requirements. The number of levels deep
that you can nest OUs is dependent upon the policy types enabled for that
root. For service control policies, the limit is five.
For more information about OUs, see [Managing Organizational
Units](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_ous.html)
in the *AWS Organizations User Guide*.
This operation can be called only from the organization's master account.
"""
def create_organizational_unit(client, input, options \\ []) do
request(client, "CreateOrganizationalUnit", input, options)
end
@doc """
Creates a policy of a specified type that you can attach to a root, an
organizational unit (OU), or an individual AWS account.
For more information about policies and their use, see [Managing
Organization
Policies](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies.html).
This operation can be called only from the organization's master account.
"""
def create_policy(client, input, options \\ []) do
request(client, "CreatePolicy", input, options)
end
@doc """
Declines a handshake request. This sets the handshake state to `DECLINED`
and effectively deactivates the request.
This operation can be called only from the account that received the
handshake. The originator of the handshake can use `CancelHandshake`
instead. The originator can't reactivate a declined request, but can
re-initiate the process with a new handshake request.
After you decline a handshake, it continues to appear in the results of
relevant APIs for only 30 days. After that it is deleted.
"""
def decline_handshake(client, input, options \\ []) do
request(client, "DeclineHandshake", input, options)
end
@doc """
Deletes the organization. You can delete an organization only by using
credentials from the master account. The organization must be empty of
member accounts, OUs, and policies.
"""
def delete_organization(client, input, options \\ []) do
request(client, "DeleteOrganization", input, options)
end
@doc """
Deletes an organizational unit from a root or another OU. You must first
remove all accounts and child OUs from the OU that you want to delete.
This operation can be called only from the organization's master account.
"""
def delete_organizational_unit(client, input, options \\ []) do
request(client, "DeleteOrganizationalUnit", input, options)
end
@doc """
Deletes the specified policy from your organization. Before you perform
this operation, you must first detach the policy from all OUs, roots, and
accounts.
This operation can be called only from the organization's master account.
"""
def delete_policy(client, input, options \\ []) do
request(client, "DeletePolicy", input, options)
end
@doc """
Retrieves Organizations-related information about the specified account.
This operation can be called only from the organization's master account.
"""
def describe_account(client, input, options \\ []) do
request(client, "DescribeAccount", input, options)
end
@doc """
Retrieves the current status of an asynchronous request to create an
account.
This operation can be called only from the organization's master account.
"""
def describe_create_account_status(client, input, options \\ []) do
request(client, "DescribeCreateAccountStatus", input, options)
end
@doc """
Retrieves information about a previously requested handshake. The handshake
ID comes from the response to the original `InviteAccountToOrganization`
operation that generated the handshake.
You can access handshakes that are ACCEPTED, DECLINED, or CANCELED for only
30 days after they change to that state. They are then deleted and no
longer accessible.
This operation can be called from any account in the organization.
"""
def describe_handshake(client, input, options \\ []) do
request(client, "DescribeHandshake", input, options)
end
@doc """
Retrieves information about the organization that the user's account
belongs to.
This operation can be called from any account in the organization.
<note> Even if a policy type is shown as available in the organization, it
can be disabled separately at the root level with `DisablePolicyType`. Use
`ListRoots` to see the status of policy types for a specified root.
</note>
"""
def describe_organization(client, input, options \\ []) do
request(client, "DescribeOrganization", input, options)
end
@doc """
Retrieves information about an organizational unit (OU).
This operation can be called only from the organization's master account.
"""
def describe_organizational_unit(client, input, options \\ []) do
request(client, "DescribeOrganizationalUnit", input, options)
end
@doc """
Retrieves information about a policy.
This operation can be called only from the organization's master account.
"""
def describe_policy(client, input, options \\ []) do
request(client, "DescribePolicy", input, options)
end
@doc """
Detaches a policy from a target root, organizational unit, or account. If
the policy being detached is a service control policy (SCP), the changes to
permissions for IAM users and roles in affected accounts are immediate.
**Note:** Every root, OU, and account must have at least one SCP attached.
If you want to replace the default `FullAWSAccess` policy with one that
limits the permissions that can be delegated, then you must attach the
replacement policy before you can remove the default one. This is the
authorization strategy of
[whitelisting](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_about-scps.html#orgs_policies_whitelist).
If you instead attach a second SCP and leave the `FullAWSAccess` SCP still
attached, and specify `"Effect": "Deny"` in the second SCP to override the
`"Effect": "Allow"` in the `FullAWSAccess` policy (or any other attached
SCP), then you are using the authorization strategy of
[blacklisting](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_about-scps.html#orgs_policies_blacklist).
This operation can be called only from the organization's master account.
"""
def detach_policy(client, input, options \\ []) do
request(client, "DetachPolicy", input, options)
end
@doc """
Disables the integration of an AWS service (the service that is specified
by `ServicePrincipal`) with AWS Organizations. When you disable
integration, the specified service no longer can create a [service-linked
role](http://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html)
in *new* accounts in your organization. This means the service can't
perform operations on your behalf on any new accounts in your organization.
The service can still perform operations in older accounts until the
service completes its clean-up from AWS Organizations.
<p/> <important> We recommend that you disable integration between AWS
Organizations and the specified AWS service by using the console or
commands that are provided by the specified service. Doing so ensures that
the other service is aware that it can clean up any resources that are
required only for the integration. How the service cleans up its resources
in the organization's accounts depends on that service. For more
information, see the documentation for the other AWS service.
</important> After you perform the `DisableAWSServiceAccess` operation, the
specified service can no longer perform operations in your organization's
accounts unless the operations are explicitly permitted by the IAM policies
that are attached to your roles.
For more information about integrating other services with AWS
Organizations, including the list of services that work with Organizations,
see [Integrating AWS Organizations with Other AWS
Services](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html)
in the *AWS Organizations User Guide*.
This operation can be called only from the organization's master account.
"""
def disable_a_w_s_service_access(client, input, options \\ []) do
request(client, "DisableAWSServiceAccess", input, options)
end
@doc """
Disables an organizational control policy type in a root. A policy of a
certain type can be attached to entities in a root only if that type is
enabled in the root. After you perform this operation, you no longer can
attach policies of the specified type to that root or to any OU or account
in that root. You can undo this by using the `EnablePolicyType` operation.
This operation can be called only from the organization's master account.
<note> If you disable a policy type for a root, it still shows as enabled
for the organization if all features are enabled in that organization. Use
`ListRoots` to see the status of policy types for a specified root. Use
`DescribeOrganization` to see the status of policy types in the
organization.
</note>
"""
def disable_policy_type(client, input, options \\ []) do
request(client, "DisablePolicyType", input, options)
end
@doc """
Enables the integration of an AWS service (the service that is specified by
`ServicePrincipal`) with AWS Organizations. When you enable integration,
you allow the specified service to create a [service-linked
role](http://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html)
in all the accounts in your organization. This allows the service to
perform operations on your behalf in your organization and its accounts.
<important> We recommend that you enable integration between AWS
Organizations and the specified AWS service by using the console or
commands that are provided by the specified service. Doing so ensures that
the service is aware that it can create the resources that are required for
the integration. How the service creates those resources in the
organization's accounts depends on that service. For more information, see
the documentation for the other AWS service.
</important> For more information about enabling services to integrate with
AWS Organizations, see [Integrating AWS Organizations with Other AWS
Services](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html)
in the *AWS Organizations User Guide*.
This operation can be called only from the organization's master account
and only if the organization has [enabled all
features](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_org_support-all-features.html).
"""
def enable_a_w_s_service_access(client, input, options \\ []) do
request(client, "EnableAWSServiceAccess", input, options)
end
@doc """
Enables all features in an organization. This enables the use of
organization policies that can restrict the services and actions that can
be called in each account. Until you enable all features, you have access
only to consolidated billing, and you can't use any of the advanced account
administration features that AWS Organizations supports. For more
information, see [Enabling All Features in Your
Organization](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_org_support-all-features.html)
in the *AWS Organizations User Guide*.
<important> This operation is required only for organizations that were
created explicitly with only the consolidated billing features enabled, or
that were migrated from a Consolidated Billing account family to
Organizations. Calling this operation sends a handshake to every invited
account in the organization. The feature set change can be finalized and
the additional features enabled only after all administrators in the
invited accounts approve the change by accepting the handshake.
</important> After you enable all features, you can separately enable or
disable individual policy types in a root using `EnablePolicyType` and
`DisablePolicyType`. To see the status of policy types in a root, use
`ListRoots`.
After all invited member accounts accept the handshake, you finalize the
feature set change by accepting the handshake that contains `"Action":
"ENABLE_ALL_FEATURES"`. This completes the change.
After you enable all features in your organization, the master account in
the organization can apply policies on all member accounts. These policies
can restrict what users and even administrators in those accounts can do.
The master account can apply policies that prevent accounts from leaving
the organization. Ensure that your account administrators are aware of
this.
This operation can be called only from the organization's master account.
"""
def enable_all_features(client, input, options \\ []) do
request(client, "EnableAllFeatures", input, options)
end
@doc """
Enables a policy type in a root. After you enable a policy type in a root,
you can attach policies of that type to the root, any OU, or account in
that root. You can undo this by using the `DisablePolicyType` operation.
This operation can be called only from the organization's master account.
You can enable a policy type in a root only if that policy type is
available in the organization. Use `DescribeOrganization` to view the
status of available policy types in the organization.
To view the status of policy type in a root, use `ListRoots`.
"""
def enable_policy_type(client, input, options \\ []) do
request(client, "EnablePolicyType", input, options)
end
@doc """
Sends an invitation to another account to join your organization as a
member account. Organizations sends email on your behalf to the email
address that is associated with the other account's owner. The invitation
is implemented as a `Handshake` whose details are in the response.
<important> You can invite AWS accounts only from the same seller as the
master account. For example, if your organization's master account was
created by Amazon Internet Services Pvt. Ltd (AISPL), an AWS seller in
India, then you can only invite other AISPL accounts to your organization.
You can't combine accounts from AISPL and AWS, or any other AWS seller. For
more information, see [Consolidated Billing in
India](http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/useconsolidatedbilliing-India.html).
</important> This operation can be called only from the organization's
master account.
<important> If you get an exception that indicates that you exceeded your
account limits for the organization or that you can"t add an account
because your organization is still initializing, please contact [ AWS
Customer Support](https://console.aws.amazon.com/support/home#/).
</important>
"""
def invite_account_to_organization(client, input, options \\ []) do
request(client, "InviteAccountToOrganization", input, options)
end
@doc """
Removes a member account from its parent organization. This version of the
operation is performed by the account that wants to leave. To remove a
member account as a user in the master account, use
`RemoveAccountFromOrganization` instead.
This operation can be called only from a member account in the
organization.
<important> <ul> <li> The master account in an organization with all
features enabled can set service control policies (SCPs) that can restrict
what administrators of member accounts can do, including preventing them
from successfully calling `LeaveOrganization` and leaving the organization.
</li> <li> You can leave an organization as a member account only if the
account is configured with the information required to operate as a
standalone account. When you create an account in an organization using the
AWS Organizations console, API, or CLI commands, the information required
of standalone accounts is *not* automatically collected. For each account
that you want to make standalone, you must accept the End User License
Agreement (EULA), choose a support plan, provide and verify the required
contact information, and provide a current payment method. AWS uses the
payment method to charge for any billable (not free tier) AWS activity that
occurs while the account is not attached to an organization. Follow the
steps at [ To leave an organization when all required account information
has not yet been
provided](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info)
in the *AWS Organizations User Guide*.
</li> <li> You can leave an organization only after you enable IAM user
access to billing in your account. For more information, see [Activating
Access to the Billing and Cost Management
Console](http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/grantaccess.html#ControllingAccessWebsite-Activate)
in the *AWS Billing and Cost Management User Guide*.
</li> </ul> </important>
"""
def leave_organization(client, input, options \\ []) do
request(client, "LeaveOrganization", input, options)
end
@doc """
Returns a list of the AWS services that you enabled to integrate with your
organization. After a service on this list creates the resources that it
requires for the integration, it can perform operations on your
organization and its accounts.
For more information about integrating other services with AWS
Organizations, including the list of services that currently work with
Organizations, see [Integrating AWS Organizations with Other AWS
Services](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html)
in the *AWS Organizations User Guide*.
This operation can be called only from the organization's master account.
"""
def list_a_w_s_service_access_for_organization(client, input, options \\ []) do
request(client, "ListAWSServiceAccessForOrganization", input, options)
end
@doc """
Lists all the accounts in the organization. To request only the accounts in
a specified root or OU, use the `ListAccountsForParent` operation instead.
<note> Always check the `NextToken` response parameter for a `null` value
when calling a `List*` operation. These operations can occasionally return
an empty set of results even when there are more results available. The
`NextToken` response parameter value is `null` *only* when there are no
more results to display.
</note> This operation can be called only from the organization's master
account.
"""
def list_accounts(client, input, options \\ []) do
request(client, "ListAccounts", input, options)
end
@doc """
Lists the accounts in an organization that are contained by the specified
target root or organizational unit (OU). If you specify the root, you get a
list of all the accounts that are not in any OU. If you specify an OU, you
get a list of all the accounts in only that OU, and not in any child OUs.
To get a list of all accounts in the organization, use the `ListAccounts`
operation.
<note> Always check the `NextToken` response parameter for a `null` value
when calling a `List*` operation. These operations can occasionally return
an empty set of results even when there are more results available. The
`NextToken` response parameter value is `null` *only* when there are no
more results to display.
</note> This operation can be called only from the organization's master
account.
"""
def list_accounts_for_parent(client, input, options \\ []) do
request(client, "ListAccountsForParent", input, options)
end
@doc """
Lists all of the OUs or accounts that are contained in the specified parent
OU or root. This operation, along with `ListParents` enables you to
traverse the tree structure that makes up this root.
<note> Always check the `NextToken` response parameter for a `null` value
when calling a `List*` operation. These operations can occasionally return
an empty set of results even when there are more results available. The
`NextToken` response parameter value is `null` *only* when there are no
more results to display.
</note> This operation can be called only from the organization's master
account.
"""
def list_children(client, input, options \\ []) do
request(client, "ListChildren", input, options)
end
@doc """
Lists the account creation requests that match the specified status that is
currently being tracked for the organization.
<note> Always check the `NextToken` response parameter for a `null` value
when calling a `List*` operation. These operations can occasionally return
an empty set of results even when there are more results available. The
`NextToken` response parameter value is `null` *only* when there are no
more results to display.
</note> This operation can be called only from the organization's master
account.
"""
def list_create_account_status(client, input, options \\ []) do
request(client, "ListCreateAccountStatus", input, options)
end
@doc """
Lists the current handshakes that are associated with the account of the
requesting user.
Handshakes that are ACCEPTED, DECLINED, or CANCELED appear in the results
of this API for only 30 days after changing to that state. After that they
are deleted and no longer accessible.
<note> Always check the `NextToken` response parameter for a `null` value
when calling a `List*` operation. These operations can occasionally return
an empty set of results even when there are more results available. The
`NextToken` response parameter value is `null` *only* when there are no
more results to display.
</note> This operation can be called from any account in the organization.
"""
def list_handshakes_for_account(client, input, options \\ []) do
request(client, "ListHandshakesForAccount", input, options)
end
@doc """
Lists the handshakes that are associated with the organization that the
requesting user is part of. The `ListHandshakesForOrganization` operation
returns a list of handshake structures. Each structure contains details and
status about a handshake.
Handshakes that are ACCEPTED, DECLINED, or CANCELED appear in the results
of this API for only 30 days after changing to that state. After that they
are deleted and no longer accessible.
<note> Always check the `NextToken` response parameter for a `null` value
when calling a `List*` operation. These operations can occasionally return
an empty set of results even when there are more results available. The
`NextToken` response parameter value is `null` *only* when there are no
more results to display.
</note> This operation can be called only from the organization's master
account.
"""
def list_handshakes_for_organization(client, input, options \\ []) do
request(client, "ListHandshakesForOrganization", input, options)
end
@doc """
Lists the organizational units (OUs) in a parent organizational unit or
root.
<note> Always check the `NextToken` response parameter for a `null` value
when calling a `List*` operation. These operations can occasionally return
an empty set of results even when there are more results available. The
`NextToken` response parameter value is `null` *only* when there are no
more results to display.
</note> This operation can be called only from the organization's master
account.
"""
def list_organizational_units_for_parent(client, input, options \\ []) do
request(client, "ListOrganizationalUnitsForParent", input, options)
end
@doc """
Lists the root or organizational units (OUs) that serve as the immediate
parent of the specified child OU or account. This operation, along with
`ListChildren` enables you to traverse the tree structure that makes up
this root.
<note> Always check the `NextToken` response parameter for a `null` value
when calling a `List*` operation. These operations can occasionally return
an empty set of results even when there are more results available. The
`NextToken` response parameter value is `null` *only* when there are no
more results to display.
</note> This operation can be called only from the organization's master
account.
<note> In the current release, a child can have only a single parent.
</note>
"""
def list_parents(client, input, options \\ []) do
request(client, "ListParents", input, options)
end
@doc """
Retrieves the list of all policies in an organization of a specified type.
<note> Always check the `NextToken` response parameter for a `null` value
when calling a `List*` operation. These operations can occasionally return
an empty set of results even when there are more results available. The
`NextToken` response parameter value is `null` *only* when there are no
more results to display.
</note> This operation can be called only from the organization's master
account.
"""
def list_policies(client, input, options \\ []) do
request(client, "ListPolicies", input, options)
end
@doc """
Lists the policies that are directly attached to the specified target root,
organizational unit (OU), or account. You must specify the policy type that
you want included in the returned list.
<note> Always check the `NextToken` response parameter for a `null` value
when calling a `List*` operation. These operations can occasionally return
an empty set of results even when there are more results available. The
`NextToken` response parameter value is `null` *only* when there are no
more results to display.
</note> This operation can be called only from the organization's master
account.
"""
def list_policies_for_target(client, input, options \\ []) do
request(client, "ListPoliciesForTarget", input, options)
end
@doc """
Lists the roots that are defined in the current organization.
<note> Always check the `NextToken` response parameter for a `null` value
when calling a `List*` operation. These operations can occasionally return
an empty set of results even when there are more results available. The
`NextToken` response parameter value is `null` *only* when there are no
more results to display.
</note> This operation can be called only from the organization's master
account.
<note> Policy types can be enabled and disabled in roots. This is distinct
from whether they are available in the organization. When you enable all
features, you make policy types available for use in that organization.
Individual policy types can then be enabled and disabled in a root. To see
the availability of a policy type in an organization, use
`DescribeOrganization`.
</note>
"""
def list_roots(client, input, options \\ []) do
request(client, "ListRoots", input, options)
end
@doc """
Lists all the roots, OUs, and accounts to which the specified policy is
attached.
<note> Always check the `NextToken` response parameter for a `null` value
when calling a `List*` operation. These operations can occasionally return
an empty set of results even when there are more results available. The
`NextToken` response parameter value is `null` *only* when there are no
more results to display.
</note> This operation can be called only from the organization's master
account.
"""
def list_targets_for_policy(client, input, options \\ []) do
request(client, "ListTargetsForPolicy", input, options)
end
@doc """
Moves an account from its current source parent root or OU to the specified
destination parent root or OU.
This operation can be called only from the organization's master account.
"""
def move_account(client, input, options \\ []) do
request(client, "MoveAccount", input, options)
end
@doc """
Removes the specified account from the organization.
The removed account becomes a stand-alone account that is not a member of
any organization. It is no longer subject to any policies and is
responsible for its own bill payments. The organization's master account is
no longer charged for any expenses accrued by the member account after it
is removed from the organization.
This operation can be called only from the organization's master account.
Member accounts can remove themselves with `LeaveOrganization` instead.
<important> <ul> <li> You can remove an account from your organization only
if the account is configured with the information required to operate as a
standalone account. When you create an account in an organization using the
AWS Organizations console, API, or CLI commands, the information required
of standalone accounts is *not* automatically collected. For an account
that you want to make standalone, you must accept the End User License
Agreement (EULA), choose a support plan, provide and verify the required
contact information, and provide a current payment method. AWS uses the
payment method to charge for any billable (not free tier) AWS activity that
occurs while the account is not attached to an organization. To remove an
account that does not yet have this information, you must sign in as the
member account and follow the steps at [ To leave an organization when all
required account information has not yet been
provided](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info)
in the *AWS Organizations User Guide*.
</li> <li> You can remove a member account only after you enable IAM user
access to billing in the member account. For more information, see
[Activating Access to the Billing and Cost Management
Console](http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/grantaccess.html#ControllingAccessWebsite-Activate)
in the *AWS Billing and Cost Management User Guide*.
</li> </ul> </important>
"""
def remove_account_from_organization(client, input, options \\ []) do
request(client, "RemoveAccountFromOrganization", input, options)
end
@doc """
Renames the specified organizational unit (OU). The ID and ARN do not
change. The child OUs and accounts remain in place, and any attached
policies of the OU remain attached.
This operation can be called only from the organization's master account.
"""
def update_organizational_unit(client, input, options \\ []) do
request(client, "UpdateOrganizationalUnit", input, options)
end
@doc """
Updates an existing policy with a new name, description, or content. If any
parameter is not supplied, that value remains unchanged. Note that you
cannot change a policy's type.
This operation can be called only from the organization's master account.
"""
def update_policy(client, input, options \\ []) do
request(client, "UpdatePolicy", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "organizations"}
host = get_host("organizations", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AWSOrganizationsV20161128.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/organizations.ex
| 0.798658 | 0.572842 |
organizations.ex
|
starcoder
|
defmodule Csvto.Type do
@base ~w(integer float boolean string binary decimal naive_datetime datetime date time)a
@composite ~w(array)a
def base_types(), do: @base
def primitive?({composite, _}) when composite in @composite, do: true
def primitive?(base) when base in @base, do: true
def primitive?(composite) when composite in @composite, do: true
def primitive?(_), do: false
def array?(:array), do: true
def array?({:array, _}), do: true
def array?(_), do: false
def default(:integer), do: 0
def default(:float), do: 0.0
def default(:boolean), do: false
def default(:string), do: ""
def default(:binary), do: ""
def default(:decimal), do: Decimal.new(0)
def default(:array), do: []
def default({:array, _}), do: []
def default(_), do: nil
def cast(:integer, value, _opts) do
case Integer.parse(value) do
{int, ""} -> {:ok, int}
_ -> :error
end
end
def cast(:float, value, _opts) do
case Float.parse(value) do
{float, ""} -> {:ok, float}
_ -> :error
end
end
@truely_value ~w(true 1 yes on)
@falsely_value ~w(false 0 no off)
def cast(:boolean, value, _opts) do
case String.downcase(value) do
truely when truely in @truely_value ->
{:ok, true}
falsely when falsely in @falsely_value ->
{:ok, false}
_ -> :error
end
end
def cast(:string, value, _opts), do: {:ok, value}
def cast(:binary, value, _opts), do: {:ok, value}
def cast(:decimal, value, _opts), do: Decimal.parse(value)
def cast(:naive_datetime, value, opts) do
cast_naive_datetime(value, opts)
end
def cast(:datetime, value, opts) do
case cast_naive_datetime(value, opts) do
{:ok, %NaiveDateTime{year: year, month: month, day: day,
hour: hour, minute: minute, second: second, microsecond: microsecond}} ->
{:ok, %DateTime{year: year, month: month, day: day,
hour: hour, minute: minute, second: second, microsecond: microsecond,
std_offset: 0, utc_offset: 0, zone_abbr: "UTC", time_zone: "Etc/UTC"}}
{:ok, _} = ok ->
ok
:error ->
:error
end
end
def cast(:date, value, opts) do
case Map.fetch(opts, :format) do
{:ok, format} ->
case do_parse_datetime(value, format) do
{:ok, naive_datetime} ->
{:ok, naive_datetime |> NaiveDateTime.to_date}
{:error, _} ->
:error
end
:error ->
case Date.from_iso8601(value) do
{:ok, _} = ok -> ok
{:error, _} -> :error
end
end
end
def cast(:time, value, opts) do
case Map.fetch(opts, :format) do
{:ok, format} ->
case do_parse_datetime(value, format) do
{:ok, naive_datetime} ->
{:ok, naive_datetime |> NaiveDateTime.to_time}
{:error, _} ->
:error
end
:error ->
case Time.from_iso8601(value) do
{:ok, _} = ok -> ok
{:error, _} -> :error
end
end
end
def cast({:array, subtype}, value, opts) do
with {:ok, elems} <- cast(:array, value, opts),
{:ok, array} <- cast_children(subtype, elems, opts),
do: {:ok, array |> Enum.reverse}
end
def cast(:array, value, opts) do
separator = Map.get(opts, :separator, "|")
{:ok, String.split(value, separator)}
end
def cast(_type, _value), do: :error
defp cast_children(type, children, opts) do
Enum.reduce_while(children, {:ok, []}, fn
elem, {:ok, arr} ->
case cast(type, elem, opts) do
{:ok, value} -> {:cont, {:ok, [value | arr]}}
:error -> {:halt, :error}
end
end)
end
defp cast_naive_datetime(binary, opts) when is_binary(binary) do
case Map.fetch(opts, :format) do
{:ok, format} ->
case do_parse_datetime(binary, format) do
{:ok, _} = ok ->
ok
{:error, _} ->
:error
end
:error ->
case NaiveDateTime.from_iso8601(binary) do
{:ok, _} = ok -> ok
{:error, _} -> :error
end
end
end
defp do_parse_datetime(binary, {parser, format}) do
Timex.parse(binary, format, parser)
end
defp do_parse_datetime(binary, format) when is_binary(format) do
Timex.parse(binary, format)
end
end
|
lib/csvto/type.ex
| 0.675978 | 0.468669 |
type.ex
|
starcoder
|
defmodule Zookeeper.DataWatch do
use GenServer
@moduledoc """
Watches a node for data updates and sends an event to the specified
watcher each time it changes
The event will also be sent the very first time its
registered to get the data.
If the node does not exist, then the event will be sent with
`nil` for all values.
"""
## Client
@doc """
Create a data watcher for a path.
"""
def start(client, path) do
GenServer.start(__MODULE__, {client, path, self()})
end
@doc """
Create a data watcher for a path.
"""
def start_link(client, path) do
GenServer.start_link(__MODULE__, {client, path, self()})
end
@doc """
Get current data.
"""
def data(pid) do
GenServer.call(pid, :data)
end
@doc """
Stop a data watcher.
"""
def stop(pid) do
GenServer.call(pid, :stop)
end
## Server
def init({client, path, watcher}) do
state = %{
client: client,
path: path,
watcher: watcher,
prior_data: nil
}
Process.monitor(client)
case maybe_get_data(state) do
{:noreply, state} -> {:ok, state}
{:stop, reason, _state} -> {:stop, reason}
end
end
def handle_call(:data, _from, state) do
{:reply, state.prior_data, state}
end
def handle_call(:stop, _from, state) do
{:stop, :normal, state}
end
def handle_info({Zookeeper.Client, _path, :exists}, state) do
maybe_get_data(state)
end
def handle_info({Zookeeper.Client, _path, :data}, state) do
maybe_get_data(state)
end
def handle_info({:DOWN, _ref, :process, pid, reason}, %{client: pid} = state) do
{:stop, reason, state}
end
def handle_info(_message, state) do
{:noreply, state}
end
## Private
defp maybe_get_data(state) do
if Process.alive?(state.watcher) do
state |> get_data
else
{:stop, :normal, state}
end
end
defp get_data(state) do
case Zookeeper.Client.get(state.client, state.path, self()) do
{:ok, {data, stat}} ->
{:noreply, maybe_send_data({data, stat}, state)}
{:error, :no_node} ->
case Zookeeper.Client.exists(state.client, state.path, self()) do
{:ok, _stat} -> get_data(state)
{:error, :no_node} -> {:noreply, maybe_send_data({nil, nil}, state)}
{:error, reason} -> {:stop, reason, state}
end
{:error, reason} ->
{:stop, reason, state}
end
end
defp maybe_send_data(data, state) do
unless state.prior_data == data do
send(state.watcher, {__MODULE__, state.client, state.path, :data, data})
end
%{state | prior_data: data}
end
end
|
lib/zookeeper/recipe/data_watch.ex
| 0.706089 | 0.458773 |
data_watch.ex
|
starcoder
|
defmodule MangoPay.Dispute do
@moduledoc """
Functions for MangoPay [dispute](https://docs.mangopay.com/endpoints/v2.01/disputes#e176_the-dispute-object) object.
"""
use MangoPay.Query.Base
set_path "disputes"
@doc """
Get a dispute.
## Examples
{:ok, dispute} = MangoPay.Dispute.get(id)
"""
def get id do
_get id
end
@doc """
Get a dispute.
## Examples
dispute = MangoPay.Dispute.get!(id)
"""
def get! id do
_get! id
end
@doc """
Update a dispute.
## Examples
params = %{
"Tag": "custom meta"
}
{:ok, dispute} = MangoPay.Dispute.update(id, params)
"""
def update id, params do
_update params, id
end
@doc """
Update a dispute.
## Examples
params = %{
"Tag": "custom meta"
}
dispute = MangoPay.Dispute.update(id, params)
"""
def update! id, params do
_update! params, id
end
@doc """
Close a dispute.
## Examples
{:ok, dispute} = MangoPay.Dispute.close("dispute_id")
"""
def close id do
_update %{}, id
end
@doc """
Close a dispute.
## Examples
dispute = MangoPay.Dispute.close!("dispute_id")
"""
def close! id do
_update! %{}, id
end
@doc """
Submit a dispute.
## Examples
params = %{
"ContestedFunds": %{
"Currency": "EUR",
"Amount": 12
}
}
{:ok, dispute} = MangoPay.Dispute.submit("dispute_id", params)
"""
def submit id, params do
_update params, resource_submit(id)
end
@doc """
Submit a dispute.
## Examples
params = %{
"ContestedFunds": %{
"Currency": "EUR",
"Amount": 12
}
}
dispute = MangoPay.Dispute.submit!("dispute_id", params)
"""
def submit! id, params do
_update! params, resource_submit(id)
end
@doc """
Resubmit a dispute.
## Examples
{:ok, dispute} = MangoPay.Dispute.resubmit("dispute_id")
"""
def resubmit id do
_update %{}, resource_submit(id)
end
@doc """
Resubmit a dispute.
## Examples
dispute = MangoPay.Dispute.resubmit!("dispute_id")
"""
def resubmit! id do
_update! %{}, resource_submit(id)
end
@doc """
List all disputes.
## Examples
query = %{
"Page": 1,
"Per_Page": 25,
"Sort": "CreationDate:DESC",
"BeforeDate": 1463440221,
"AfterDate": 1431817821,
"DisputeType": "CONTESTABLE,RETRIEVAL",
"Status": "CREATED, SUCCEEDED"
}
{:ok, disputes} = MangoPay.Dispute.all(query)
"""
def all(query \\ %{}) do
_all(nil, query)
end
@doc """
List all disputes.
## Examples
query = %{
"Page": 1,
"Per_Page": 25,
"Sort": "CreationDate:DESC",
"BeforeDate": 1463440221,
"AfterDate": 1431817821,
"DisputeType": "CONTESTABLE,RETRIEVAL",
"Status": "CREATED, SUCCEEDED"
}
disputes = MangoPay.Dispute.all(query)
"""
def all!(query \\ %{}) do
_all!(nil, query)
end
@doc """
All disputes for user.
## Examples
query = %{
"Page": 1,
"Per_Page": 25,
"Sort": "CreationDate:DESC",
"BeforeDate": 1463440221,
"AfterDate": 1431817821,
"DisputeType": "CONTESTABLE,RETRIEVAL",
"Status": "CREATED, SUCCEEDED"
}
{:ok, dispute} = MangoPay.Dispute.all_by_user("user_id", query)
"""
def all_by_user id, query \\ %{} do
_all [MangoPay.User.path(id), resource()], query
end
@doc """
All disputes for user.
## Examples
query = %{
"Page": 1,
"Per_Page": 25,
"Sort": "CreationDate:DESC",
"BeforeDate": 1463440221,
"AfterDate": 1431817821,
"DisputeType": "CONTESTABLE,RETRIEVAL",
"Status": "CREATED, SUCCEEDED"
}
dispute = MangoPay.Dispute.all_by_user!("user_id", query)
"""
def all_by_user! id, query \\ %{} do
_all! [MangoPay.User.path(id), resource()], query
end
@doc """
All disputes for a wallet.
## Examples
query = %{
"Page": 1,
"Per_Page": 25,
"Sort": "CreationDate:DESC",
"BeforeDate": 1463440221,
"AfterDate": 1431817821,
"DisputeType": "CONTESTABLE,RETRIEVAL",
"Status": "CREATED, SUCCEEDED"
}
{:ok, dispute} = MangoPay.Dispute.all_by_wallet(wallet_id, query)
"""
def all_by_wallet id, query \\ %{} do
_all [MangoPay.Wallet.path(id), resource()], query
end
@doc """
All disputes for a wall!et.
## Examples
query = %{
"Page": 1,
"Per_Page": 25,
"Sort": "CreationDate:DESC",
"BeforeDate": 1463440221,
"AfterDate": 1431817821,
"DisputeType": "CONTESTABLE,RETRIEVAL",
"Status": "CREATED, SUCCEEDED"
}
dispute = MangoPay.Dispute.all_by_wallet!(query)
"""
def all_by_wallet! id, query \\ %{} do
_all! [MangoPay.Wallet.path(id), resource()], query
end
@doc """
All disputes for that need settling.
## Examples
query = %{
"Page": 1,
"Per_Page": 25,
"Sort": "CreationDate:DESC",
"BeforeDate": 1463440221,
"AfterDate": 1431817821,
"DisputeType": "CONTESTABLE,RETRIEVAL",
"Status": "CREATED, SUCCEEDED"
}
{:ok, dispute} = MangoPay.Dispute.all_by_pending_settlement(query)
"""
def all_by_pending_settlement(query \\ %{}) do
_all [resource(), "pendingsettlement"], query
end
@doc """
All disputes for that need settling.
## Examples
query = %{
"Page": 1,
"Per_Page": 25,
"Sort": "CreationDate:DESC",
"BeforeDate": 1463440221,
"AfterDate": 1431817821,
"DisputeType": "CONTESTABLE,RETRIEVAL",
"Status": "CREATED, SUCCEEDED"
}
dispute = MangoPay.Dispute.all_by_pending_settlement!(query)
"""
def all_by_pending_settlement!(query \\ %{}) do
_all! [resource(), "pendingsettlement"], query
end
defp resource_submit(id) do
[resource(id), "submit"]
end
end
|
lib/mango_pay/dispute.ex
| 0.664976 | 0.46721 |
dispute.ex
|
starcoder
|
defmodule Plug.Upload do
@moduledoc """
A server that manages uploaded files.
Uploaded files are stored in a temporary directory
and removed from the directory after the process that
requested the file dies.
During the request, those files are represented with
the Plug.Upload struct that contains three fields:
* `:path` - the path to the uploaded file on the filesystem
* `:content_type` - the content type of the uploaded file
* `:filename` - the filename of the uploaded file given in the request
"""
defstruct [:path, :content_type, :filename]
@doc """
Requests a random file to be created in the upload directory
with the given prefix.
"""
@spec random_file(binary) ::
{:ok, binary} |
{:too_many_attempts, binary, pos_integer} |
{:no_tmp, [binary]}
def random_file(prefix) do
:gen_server.call(plug_server, {:random, prefix})
end
@doc """
Requests a random file to be created in the upload directory
with the given prefix. Raises on failure.
"""
@spec random_file!(binary) :: binary | no_return
def random_file!(prefix) do
case random_file(prefix) do
{:ok, path} ->
path
{:too_many_attempts, tmp, attempts} ->
raise "tried to #{attempts} times to create an uploaded file at #{tmp} but failed. What gives?"
{:no_tmp, _tmps} ->
raise "could not create a tmp directory to store uploads. Set PLUG_TMPDIR to a directory with write permission"
end
end
defp plug_server do
Process.whereis(__MODULE__) ||
raise "could not find process Plug.Upload. Have you started the :plug application?"
end
use GenServer
@doc """
Starts the upload handling server.
"""
def start_link() do
:gen_server.start_link({:local, __MODULE__}, __MODULE__, :ok, [])
end
## Callbacks
@temp_env_vars ~w(PLUG_TMPDIR TMPDIR TMP TEMP)s
@max_attempts 10
@doc false
def init(:ok) do
tmp = Enum.find_value @temp_env_vars, "/tmp", &System.get_env/1
cwd = Path.join(File.cwd!, "tmp")
ets = :ets.new(:plug_uploads, [:private])
{:ok, {[tmp, cwd], ets}}
end
@doc false
def handle_call({:random, prefix}, {pid, _ref}, {tmps, ets} = state) do
case find_tmp_dir(pid, tmps, ets) do
{:ok, tmp, paths} ->
{:reply, open_random_file(prefix, tmp, 0, pid, ets, paths), state}
{:no_tmp, _} = error ->
{:reply, error, state}
end
end
def handle_call(msg, from, state) do
super(msg, from, state)
end
@doc false
def handle_info({:DOWN, _ref, :process, pid, _reason}, {_, ets} = state) do
case :ets.lookup(ets, pid) do
[{pid, _tmp, paths}] ->
:ets.delete(ets, pid)
Enum.each paths, &:file.delete/1
[] ->
:ok
end
{:noreply, state}
end
def handle_info(msg, state) do
super(msg, state)
end
## Helpers
defp find_tmp_dir(pid, tmps, ets) do
case :ets.lookup(ets, pid) do
[{^pid, tmp, paths}] ->
{:ok, tmp, paths}
[] ->
if tmp = ensure_tmp_dir(tmps) do
:erlang.monitor(:process, pid)
:ets.insert(ets, {pid, tmp, []})
{:ok, tmp, []}
else
{:no_tmp, tmps}
end
end
end
defp ensure_tmp_dir(tmps) do
{mega, _, _} = :erlang.now
subdir = "/plug-" <> i(mega)
Enum.find_value(tmps, &write_tmp_dir(&1 <> subdir))
end
defp write_tmp_dir(path) do
case File.mkdir_p(path) do
:ok -> path
{:error, _} -> nil
end
end
defp open_random_file(prefix, tmp, attempts, pid, ets, paths) when attempts < @max_attempts do
path = path(prefix, tmp)
case :file.write_file(path, "", [:write, :exclusive, :binary]) do
:ok ->
:ets.update_element(ets, pid, {3, [path|paths]})
{:ok, path}
{:error, reason} when reason in [:eexist, :eaccess] ->
open_random_file(prefix, tmp, attempts + 1, pid, ets, paths)
end
end
defp open_random_file(_prefix, tmp, attempts, _pid, _ets, _paths) do
{:too_many_attempts, tmp, attempts}
end
defp i(integer), do: integer_to_binary(integer)
defp path(prefix, tmp) do
{_mega, sec, mili} = :erlang.now
tmp <> "/" <> prefix <> "-" <> i(sec) <> "-" <> i(mili)
end
end
|
lib/plug/upload.ex
| 0.68437 | 0.429788 |
upload.ex
|
starcoder
|
defmodule Oli.Activities.Realizer.Logic.Expression do
@moduledoc """
Represents a logical expression of the general form:
<<fact>> <<operator>> <<value>>
The supported facts are attached objectives, attached tags, activity type
and full text.
Four supported operators exist in two pairs: "equals", "doesNotEqual" and
"contains", "doesNotContain".
These operators work slightly differently depending on which fact they are applied to:
For tags and objectives, the value must be a list for all four operators.
Operator "contains" checks to see if the collection represented by the fact "contains"
the list represented by the "value", even as a subset. For instance: this expression
"tags contains [1, 2]" would evaluate to true if "tags" was equal to [1, 2] or [1, 2, 3], but
not if "tags" equals [1]. To represent the logic of "find activities that have either
tag 1 or tag 2", one would use a disjunctive clause of two separate expressions.
The "equals" operator seeks an exact match of both the value and the fact collections.
For activity type, the "contains" operator acts like the "IN" operator from SQL, as
it evaluates to true if the scalar value of the activity type exists within the value
collection. The "equals" operator takes a scalar value and seeks exact equality with the
activity type.
The "text" fact only supports the "contains" operator which takes a scalar value and performs
a full text search over the model of the activity.
"""
@derive Jason.Encoder
@enforce_keys [:fact, :operator, :value]
defstruct [:fact, :operator, :value]
@type operators ::
:contains | :does_not_contain | :equals | :does_not_equal | :in
@type facts :: :objectives | :tags | :type | :text
@type t() :: %__MODULE__{
fact: facts(),
operator: operators(),
value: [integer()] | String.t()
}
alias Oli.Activities.Realizer.Logic.Expression
def parse(%{"fact" => fact, "operator" => operator, "value" => value}) when is_list(value) do
case {fact, operator} do
{"objectives", "contains"} ->
{:ok, %Expression{fact: :objectives, operator: :contains, value: value}}
{"objectives", "equals"} ->
{:ok, %Expression{fact: :objectives, operator: :equals, value: value}}
{"objectives", "does_not_equal"} ->
{:ok, %Expression{fact: :objectives, operator: :does_not_equal, value: value}}
{"objectives", "does_not_contain"} ->
{:ok, %Expression{fact: :objectives, operator: :does_not_contain, value: value}}
{"tags", "contains"} ->
{:ok, %Expression{fact: :tags, operator: :contains, value: value}}
{"tags", "equals"} ->
{:ok, %Expression{fact: :tags, operator: :equals, value: value}}
{"tags", "does_not_contain"} ->
{:ok, %Expression{fact: :tags, operator: :does_not_contain, value: value}}
{"tags", "does_not_equal"} ->
{:ok, %Expression{fact: :tags, operator: :does_not_equal, value: value}}
{"type", "contains"} ->
{:ok, %Expression{fact: :type, operator: :contains, value: value}}
{"type", "does_not_contain"} ->
{:ok, %Expression{fact: :type, operator: :does_not_contain, value: value}}
_ ->
{:error, "invalid expression"}
end
end
def parse(%{"fact" => fact, "operator" => operator, "value" => value}) do
case {fact, operator} do
{"text", "contains"} when is_binary(value) ->
{:ok, %Expression{fact: :text, operator: :contains, value: value}}
_ ->
{:error, "invalid expression"}
end
end
def parse(expressions) when is_list(expressions) do
Enum.map(expressions, &parse/1)
|> Oli.Activities.ParseUtils.items_or_errors()
end
def parse(_) do
{:error, "invalid criteria expression"}
end
end
|
lib/oli/activities/realizer/logic/expression.ex
| 0.869963 | 0.824214 |
expression.ex
|
starcoder
|
defmodule Swiss.Map do
@moduledoc """
A few extra functions to deal with Maps.
"""
@doc """
Applies defaults to a map.
## Examples
iex> Swiss.Map.defaults(%{a: 42}, %{b: 12})
%{a: 42, b: 12}
iex> Swiss.Map.defaults(%{a: 42}, %{a: 44, b: 12})
%{a: 42, b: 12}
iex> Swiss.Map.defaults(%{a: 42, c: nil}, [a: nil, b: 12, c: 13])
%{a: 42, b: 12, c: nil}
"""
@spec defaults(Map.t(), Map.t() | keyword()) :: Map.t()
def defaults(map, defaults) when is_list(defaults),
do: defaults(map, Enum.into(defaults, %{}))
def defaults(map, defaults) when is_map(defaults),
do: Map.merge(defaults, map)
@doc """
Wrapper around `Map.from_struct/1` that tolerates `nil`.
## Examples
iex> Swiss.Map.from_struct(nil)
nil
iex> Swiss.Map.from_struct(%{__struct__: SomeStruct, life: 42})
%{life: 42}
"""
@spec from_struct(struct | nil) :: Map.t() | nil
def from_struct(nil), do: nil
def from_struct(struct), do: Map.from_struct(struct)
@doc """
Converts an atom-keyed map into a string-keyed map.
## Examples
iex> Swiss.Map.to_string_keys(%{life: 42})
%{"life" => 42}
iex> Swiss.Map.to_string_keys(%{"life" => 42, death: 27})
%{"life" => 42, "death" => 27}
"""
@spec to_string_keys(Map.t()) :: Map.t()
def to_string_keys(map) do
map
|> Map.to_list()
|> Stream.map(fn
{key, value} when is_atom(key) -> {Atom.to_string(key), value}
entry -> entry
end)
|> Enum.into(%{})
end
@doc """
Fetches a value from a map with indifferent access, i.e. given an atom,
returns the value that is keyed by that atom, or by its string equivalent.
If both atom and String keys exist in the map, the atom's value is returned.
## Examples
iex> Swiss.Map.indif_fetch!(%{life: 42}, :life)
42
iex> Swiss.Map.indif_fetch!(%{"life" => 42}, :life)
42
iex> Swiss.Map.indif_fetch!(%{:life => 42, "life" => 64}, :life)
42
iex> Swiss.Map.indif_fetch!(%{}, :life)
** (KeyError) key :life not found in: %{}
"""
@spec indif_fetch!(Map.t(), atom()) :: any()
def indif_fetch!(map, key) when is_atom(key) do
Map.get_lazy(map, key, fn ->
string_key = Atom.to_string(key)
if Map.has_key?(map, string_key) do
map[string_key]
else
raise KeyError, "key #{inspect(key)} not found in: #{inspect(map)}"
end
end)
end
@doc """
Runs `Map.put/3` only if `pred` returns truthy when called on the value.
The default behavior is to put unless the value is `nil`.
## Examples
iex> Swiss.Map.put_if(%{life: 42}, :life, 22)
%{life: 22}
iex> Swiss.Map.put_if(%{life: 42}, :life, nil)
%{life: 42}
iex> Swiss.Map.put_if(%{life: 42}, :life, nil, &is_nil/1)
%{life: nil}
iex> Swiss.Map.put_if(%{life: 42}, :life, 22, &(&1 < 55))
%{life: 22}
"""
@spec put_if(map(), any(), any(), (any() -> boolean())) :: map()
def put_if(map, key, value, pred \\ fn v -> !is_nil(v) end) do
if pred.(value) do
Map.put(map, key, value)
else
map
end
end
@doc """
Runs `Map.put/3` only if `cond` is truthy. Unlike `Swiss.Map.put_if/4`, takes
a function that is called when the condition passes, that should return the
value to insert in the map.
## Examples
iex> Swiss.Map.put_if_lazy(%{life: 42}, :life, fn -> 12 end, true)
%{life: 12}
iex> Swiss.Map.put_if_lazy(%{life: 42}, :life, fn -> 12 end, false)
%{life: 42}
"""
@spec put_if_lazy(map(), any(), (() -> any()), any()) :: map()
def put_if_lazy(map, key, value_fn, condition) do
if condition do
Map.put(map, key, value_fn.())
else
map
end
end
@doc """
Deep merges two maps. Only maps are merged, all other types are overridden.
## Examples
iex> Swiss.Map.deep_merge(%{user: %{id: 42}}, %{user: %{name: "João"}})
%{user: %{id: 42, name: "João"}}
iex> Swiss.Map.deep_merge(
...> %{user: %{id: 42, message: %{id: 22}}},
...> %{user: %{message: %{text: "hi"}}},
...> 1
...> )
%{user: %{id: 42, message: %{text: "hi"}}}
iex> Swiss.Map.deep_merge(
...> %{user: %{id: 42}, messages: [%{id: 1}]},
...> %{user: %{id: 30, age: 40}, messages: [%{id: 2}]}
...> )
%{user: %{id: 30, age: 40}, messages: [%{id: 2}]}
"""
@spec deep_merge(map(), map(), non_neg_integer() | :infinity) :: map()
def deep_merge(map_dest, map_src, max_depth \\ :infinity) do
deep_merge(map_dest, map_src, max_depth, 0)
end
defp deep_merge(map_dest, map_src, max_depth, depth)
when is_number(max_depth) and max_depth <= depth do
Map.merge(map_dest, map_src)
end
defp deep_merge(map_dest, map_src, max_depth, depth) do
Map.merge(map_dest, map_src, fn
_key, value_dest, value_src when is_map(value_dest) and is_map(value_src) ->
deep_merge(value_dest, value_src, max_depth, depth + 1)
_key, _value_dest, value_src ->
value_src
end)
end
@doc """
Appplies an updater function to all keys in the given map.
The updater function receives a `{key, value}` tuple and may return a new
value, or a new `{key, value}` tuple.
## Examples
iex> Swiss.Map.update_all(%{a: 1, b: 2}, &(elem(&1, 1) * 2))
%{a: 2, b: 4}
iex> Swiss.Map.update_all(%{a: 1, b: 2}, &{Atom.to_string(elem(&1, 0)), elem(&1, 1) * 3})
%{"a" => 3, "b" => 6}
"""
@spec update_all(map(), ({any(), any()} -> {any(), any()} | any())) :: map()
def update_all(map, updater) do
Enum.reduce(map, %{}, fn {key, value}, acc ->
case updater.({key, value}) do
{new_key, new_value} -> Map.put(acc, new_key, new_value)
new_value -> Map.put(acc, key, new_value)
end
end)
end
end
|
lib/swiss/map.ex
| 0.902982 | 0.686567 |
map.ex
|
starcoder
|
defmodule Alembic.Translator.Minecraft do
@moduledoc """
Translates between packets (as represented internally by Alembic) and the
actual, raw data that gets sent over a socket.
"""
@behaviour Alembic.Translator
alias Alembic.Translator.Minecraft.Packets
@doc """
Reads the next byte to come over the socket; assumes that byte is a packet
ID; uses the corresponding `Alembic.Minecraft.Packets.read_payload!/2`
clause to read the packet's payload; and translates the ID/payload pair
(essentially, the entire packet) into a request that can be handled by
the server's plugins.
Returns `{:ok, request}` in the event that a request is successfully read,
`{:error, reason}` in the event of a failure.
"""
def read_request(socket) do
try do
packet_id = read!(:byte, socket)
{:ok, payload} = Packets.read_payload!(packet_id, socket)
{:ok, Packets.parse_request(packet_id, payload)}
rescue
e in [RuntimeError] -> {:error, e.message}
end
end
@doc """
Encodes the event as a packet of the appropriate type and writes the packet
over the socket as a series of bytes. Returns `:ok` if the entire packet is
successfully written, `{:error, reason}` if something goes wrong.
"""
def write_event(socket, event) do
try do
# TODO
:ok
rescue
e in [RuntimeError] -> {:error, e.message}
end
end
@doc """
Reads a value of the specified type from the specified socket. Returns the
value that was read, raising an exception if something went wrong.
Strings, unlike most other data types that we need to read, are of
indeterminate length. Therefore, they are handled by a specialized clause
of the `read!/2` function.
"""
defp read!(:string, socket) do
case read!(:short, socket) do
0 ->
""
length ->
{:ok, bitstring} = socket.recv!(length * 2)
bitstring # TODO: convert to utf16
end
end
@doc """
Reads a value of the specified type from the specified socket. Returns the
value that was read, raising an exception if something went wrong.
Byte arrays, unlike most other data types that we need to read, are of
indeterminate length. Therefore, they are handled by a specialized clause
of the `read!/2` function.
"""
defp read!(:byte_array, socket) do
{:ok, bitstring} = read!(:short, socket) |> socket.recv!
bitstring # TODO: convert to byte array
end
@doc """
Reads a value of the specified type from the specified socket. Returns the
value that was read, raising an exception if something went wrong.
"""
defp read!(type, socket) do
{:ok, bitstring} = byte_length(type) |> socket.recv!
format!(type, bitstring)
end
@doc """
Returns the length, in bytes, of a value of the specified type. Used by
`read!/2` to determine how many bytes should be read from a socket before
attempting to translate those bytes into a value.
"""
defp byte_length(type) do
case type do
:bool -> 1
:byte -> 1
:double -> 8
:float -> 4
:int -> 4
:long -> 8
:short -> 2
end
end
@doc """
Matches the bitstring against a pattern associated with the specified type,
extracting and returning a value of that type. If the match fails, an
exception will be raised.
"""
defp format!(type, bitstring) do
s = byte_length(type) * 8
# Seems like there's probably a better way to do this.
# Maybe use macros or something?
case type do
:bool ->
<<byte :: size(s)>> = bitstring
byte === 1
:byte ->
<<byte :: size(s)>> = bitstring
byte
:double ->
<<double :: [size(s), float]>> = bitstring
double
:float ->
<<float :: [size(s), float]>> = bitstring
float
:int ->
<<int :: [size(s), signed]>> = bitstring
int
:long ->
<<long :: [size(s), signed]>> = bitstring
long
:short ->
<<short :: [size(s), signed]>> = bitstring
short
end
end
end
|
lib/minecraft/translator.ex
| 0.651355 | 0.484929 |
translator.ex
|
starcoder
|
defmodule Playground.Scenario.Counters.Many do
use Playground.Scenario
def scenario_type do
{:iterations, Stream.map(10..20, &round(:math.pow(2, &1)))}
end
def scenario_banner do
"""
Scenario: Comparison of Counters in ETS vs Atomics
Tasks:
- Sequentially update <count> ETS counters
- Sequentially update a <count>-arity atomics
- Concurrently update <count> ETS counters
- Concurrently update a <count>-arity atomics
- Concurrently get <count> ETS counters
- Concurrently get <count> ETS counters again
- Concurrently get <count>-arity atomics
- Concurrently get <count>-arity atomics again
"""
end
def scenario_arguments do
[]
end
def scenario_iteration(count) do
IO.write("#{String.pad_leading(Integer.to_string(count), 11)}: ")
table_options = [:set, :public, {:read_concurrency, true}, {:write_concurrency, true}]
table_ref = :ets.new(__MODULE__, table_options)
:ets.insert(table_ref, for(x <- 1..count, do: {x, 0}))
run_tasks("Sequential ets:update_counter/3", 1, fn _ ->
for x <- 1..count do
:ets.update_counter(table_ref, x, {2, 1})
end
end)
atomics_ref = :atomics.new(count, signed: false)
run_tasks("Sequential atomics:add/3", 1, fn _ ->
for _ <- 1..count do
:atomics.add(atomics_ref, count, 1)
end
end)
run_tasks("Concurrent ets:update_counter/3", count, fn x ->
:ets.update_counter(table_ref, x, {2, 1})
end)
run_tasks("Concurrent atomics:add/3", 1, fn x ->
:atomics.add(atomics_ref, x, 1)
end)
run_tasks("Concurrent ets:lookup_element/3", count, fn x ->
:ets.lookup_element(table_ref, x, 2)
end)
run_tasks("Concurrent ets:lookup_element/3", count, fn x ->
:ets.lookup_element(table_ref, x, 2)
end)
run_tasks("Concurrent atomics:get/2", count, fn x ->
:atomics.get(atomics_ref, x)
end)
run_tasks("Concurrent atomics:get/2", count, fn x ->
:atomics.get(atomics_ref, x)
end)
end
end
|
lib/playground/scenario/counters/many.ex
| 0.731442 | 0.617787 |
many.ex
|
starcoder
|
defmodule Resty do
@moduledoc """
This module makes it easy for Resty's modules to get default configuration
values.
All of these values can be changed in your config.exs file in order to
globally change the way Resty works.
"""
@doc """
Return the *default headers* that are going to be sent for every resource.
The defaults are:
```
[
"Content-Type": "application/json",
Accept: "application/json; Charset=utf-8"
]
```
This value can be configured in your config.exs file like this:
```
config :resty, headers: [
"Content-Type": "application/json",
Accept: "application/json; Charset=utf-8"
]
```
You can also set it on a per resource basis thanks to the
`Resty.Resource.Base.set_headers/1` macro.
"""
def default_headers do
Application.get_env(:resty, :headers,
"Content-Type": "application/json",
Accept: "application/json; Charset=utf-8"
)
end
@doc """
Return the global `Resty.Connection` that will be used to query every
resource.
This value can be configured in your config.exs file like this:
```
config :resty, connection: Resty.Connection.HTTPoison
```
You can also set it on a per resource basis thanks to the
`Resty.Resource.Base.set_connection/1` macro.
"""
def default_connection do
Application.get_env(:resty, :connection, Resty.Connection.HTTPoison)
end
@doc """
Return the `Resty.Auth` implementation that should be used to authenticate
outgoing requests.
The default is `Resty.Auth.Null`
This value can be configured in your config.exs file like this:
```
config :resty, auth: Resty.Auth.Null
```
You can also set it on a per resource basis thanks to the
`Resty.Resource.Base.with_auth/2` macro.
"""
def default_auth do
Application.get_env(:resty, :auth, Resty.Auth.Null)
end
@doc """
Return the `Resty.Serializer` implementation that should be used to serialize
and deserialize resources.
The default is `Resty.Serializer.Json`
This value can be configured in your config.exs file like this:
```
config :resty, serializer: Resty.Serializer.Json
```
You can also set it on a per resource basis thanks to the
`Resty.Resource.Base.set_serializer/2` macro.
"""
def default_serializer do
Application.get_env(:resty, :serialize, Resty.Serializer.Json)
end
@doc """
Return the default site that is going to be queried for every resource.
The default is `nil`.
This value can be configured in your config.exs file like this:
```
config :resty, site: "https://my-webservice.com/api/v2"
```
You can also set it on a per resource basis thanks to the
`Resty.Resource.Base.set_site/1` macro.
"""
def default_site do
Application.get_env(:resty, :site, nil)
end
end
|
lib/resty.ex
| 0.843927 | 0.652311 |
resty.ex
|
starcoder
|
defmodule Opencensus.Trace do
@moduledoc """
Macros to help Elixir programmers use OpenCensus tracing.
"""
@doc """
Wrap the given block in a child span with the given label/name and optional attributes.
Sets `Logger.metadata/0` with `Opencensus.Logger.set_logger_metadata/0` after changing the span
context tracked in the process dictionary.
No attributes:
```elixir
with_child_span "child_span" do
:do_something
end
with_child_span "child_span", %{} do
:do_something
end
```
Custom attributes:
```elixir
with_child_span "child_span", [:module, :function, %{"custom_id" => "xxx"}] do
:do_something
end
```
Automatic insertion of the `module`, `file`, `line`, or `function`:
```elixir
with_child_span "child_span", [:module, :function, %{}] do
:do_something
end
```
Collapsing multiple attribute maps (last wins):
```elixir
with_child_span "child_span", [:function, %{"a" => "b", "c" => "d"}, %{"c" => "e"}] do
:do_something
end
```
"""
defmacro with_child_span(label, attributes \\ quote(do: %{}), do: block) do
line = __CALLER__.line
module = __CALLER__.module
file = __CALLER__.file
function = format_function(__CALLER__.function)
computed_attributes =
compute_attributes(attributes, %{
line: line,
module: module,
file: file,
function: function
})
quote do
parent_span_ctx = :ocp.current_span_ctx()
new_span_ctx =
:oc_trace.start_span(unquote(label), parent_span_ctx, %{
:attributes => unquote(computed_attributes)
})
_ = :ocp.with_span_ctx(new_span_ctx)
Opencensus.Logger.set_logger_metadata()
try do
unquote(block)
after
_ = :oc_trace.finish_span(new_span_ctx)
_ = :ocp.with_span_ctx(parent_span_ctx)
Opencensus.Logger.set_logger_metadata()
end
end
end
defp compute_attributes(attributes, default_attributes) when is_list(attributes) do
{atoms, custom_attributes} = Enum.split_with(attributes, &is_atom/1)
default_attributes = compute_default_attributes(atoms, default_attributes)
case Enum.split_with(custom_attributes, fn
## map ast
{:%{}, _, _} -> true
_ -> false
end) do
{[ca_map | ca_maps], []} ->
## custom attributes are literal maps, merge 'em
{:%{}, meta, custom_attributes} =
List.foldl(ca_maps, ca_map, fn {:%{}, _, new_pairs}, {:%{}, meta, old_pairs} ->
{:%{}, meta,
:maps.to_list(:maps.merge(:maps.from_list(old_pairs), :maps.from_list(new_pairs)))}
end)
{:%{}, meta,
:maps.to_list(:maps.merge(:maps.from_list(custom_attributes), default_attributes))}
{_ca_maps, _other_calls} ->
[f_ca | r_ca] = custom_attributes
quote do
unquote(
List.foldl(r_ca ++ [Macro.escape(default_attributes)], f_ca, fn ca, acc ->
quote do
Map.merge(unquote(acc), unquote(ca))
end
end)
)
end
end
end
defp compute_attributes(attributes, _default_attributes) do
attributes
end
defp compute_default_attributes(atoms, default_attributes) do
List.foldl(atoms, %{}, fn
:default, _acc ->
default_attributes
atom, acc ->
Map.put(acc, atom, Map.fetch!(default_attributes, atom))
end)
end
defp format_function(nil), do: nil
defp format_function({name, arity}), do: "#{name}/#{arity}"
@doc """
Drop-in replacement for `Task.async/1` that propagates the process' span context.
Does NOT start a new span for what's inside. Consider `with_child_span/3`.
"""
@spec async((() -> any())) :: Task.t()
def async(fun) when is_function(fun, 0) do
async(:erlang, :apply, [fun, []])
end
@doc """
Drop-in replacement for `Task.async/3` that propagates the process' span context.
Does NOT start a new span for what's inside. Consider `with_child_span/3`.
"""
@spec async(module(), atom(), [term()]) :: Task.t()
def async(module, function_name, args)
when is_atom(module) and is_atom(function_name) and is_list(args) do
original_span_ctx = :ocp.current_span_ctx()
wrapper = fn ->
:ocp.with_span_ctx(original_span_ctx)
apply(module, function_name, args)
end
Task.async(wrapper)
end
@doc """
Drop-in replacement for `Task.await/2`.
"""
@spec await(Task.t(), :infinity | pos_integer()) :: term()
defdelegate await(task, timeout \\ 5000), to: Task
end
|
lib/opencensus/trace.ex
| 0.84317 | 0.837686 |
trace.ex
|
starcoder
|
defmodule MMDB2Decoder do
@moduledoc """
MMDB2 file format decoder.
## Usage
To prepare lookups in a given database you need to parse it
and hold the result available for later usage:
iex(1)> database = File.read!("/path/to/database.mmdb")
iex(2)> {:ok, meta, tree, data} = MMDB2Decoder.parse_database(database)
Using the returned database contents you can start looking up
individual entries:
iex(3)> {:ok, ip} = :inet.parse_address(String.to_charlist("127.0.0.1"))
iex(4)> MMDB2Decoder.lookup(ip, meta, tree, data)
{:ok, %{...}}
For more details on the lookup methods (and a function suitable for
direct piping) please see the individual function documentations.
## Lookup Options
The behaviour of the decoder can be adjusted by passing an option map as the
last argument to the lookup functions:
iex> MMDB2Decoder.lookup(ip, meta, tree, data, %{map_keys: :atoms!})
The following options are available:
- `:map_keys` defines the type of the keys in a decoded map:
- `:strings` is the default value
- `:atoms` uses `String.to_atom/1`
- `:atoms!` uses `String.to_existing_atom/1`
- `:double_precision` defines the precision of decoded Double values
- `nil` is the default for "unlimited" precision
- any value from `t:Float.precision_range/0` to round the precision to
- `:float_precision` defines the precision of decoded Float values
- `nil` is the default for "unlimited" precision
- any value from `t:Float.precision_range/0` to round the precision to
"""
alias MMDB2Decoder.Data
alias MMDB2Decoder.Database
alias MMDB2Decoder.LookupTree
alias MMDB2Decoder.Metadata
@type decode_options :: %{
optional(:double_precision) => nil | Float.precision_range(),
optional(:float_precision) => nil | Float.precision_range(),
optional(:map_keys) => nil | :atoms | :atoms! | :strings
}
@type decoded_value :: :cache_container | :end_marker | binary | boolean | list | map | number
@type lookup_value :: decoded_value | nil
@type lookup_result :: {:ok, lookup_value} | {:error, term}
@type parse_result :: {:ok, Metadata.t(), binary, binary} | {:error, term}
@type tree_result :: {:ok, non_neg_integer} | {:error, term}
@default_decode_options %{
double_precision: nil,
float_precision: nil,
map_keys: :strings
}
@doc """
Fetches the pointer of an IP in the data if available.
The pointer will be calculated to be relative to the start of the binary data.
## Usage
iex> MMDB2Decoder.find_pointer({127, 0, 0, 1}, meta, tree)
123456
"""
@spec find_pointer(:inet.ip_address(), Metadata.t(), binary) :: tree_result
def find_pointer(ip, meta, tree) do
case LookupTree.locate(ip, meta, tree) do
{:error, _} = error -> error
{:ok, pointer} -> {:ok, pointer - meta.node_count - 16}
end
end
@doc """
Calls `find_pointer/3` and raises if an error occurs.
"""
@spec find_pointer!(:inet.ip_address(), Metadata.t(), binary) :: non_neg_integer | no_return
def find_pointer!(ip, meta, tree) do
case find_pointer(ip, meta, tree) do
{:ok, pointer} -> pointer
{:error, error} -> raise Kernel.to_string(error)
end
end
@doc """
Looks up the data associated with an IP tuple.
This is probably the main function you will use. The `ip` address is expected
to be a 4- or 8-element tuple describing an IPv4 or IPv6 address. To obtain
this tuple from a string you can use `:inet.parse_address/1`.
## Usage
iex> MMDB2Decoder.lookup({127, 0, 0, 1}, meta, tree, data)
{
:ok,
%{
"continent" => %{...},
"country" => %{...},
"registered_country" => %{...}
}
}
The values for `meta`, `tree` and `data` can be obtained by
parsing the file contents of a database using `parse_database/1`.
"""
@spec lookup(:inet.ip_address(), Metadata.t(), binary, binary, decode_options) :: lookup_result
def lookup(ip, meta, tree, data, options \\ @default_decode_options) do
case find_pointer(ip, meta, tree) do
{:error, _} = error -> error
{:ok, pointer} -> lookup_pointer(pointer, data, options)
end
end
@doc """
Calls `lookup/4` and raises if an error occurs.
"""
@spec lookup!(:inet.ip_address(), Metadata.t(), binary, binary, decode_options) ::
lookup_value | no_return
def lookup!(ip, meta, tree, data, options \\ @default_decode_options) do
case lookup(ip, meta, tree, data, options) do
{:ok, result} -> result
{:error, error} -> raise Kernel.to_string(error)
end
end
@doc """
Fetches the data at a given pointer position.
The pointer is expected to be relative to the start of the binary data.
## Usage
iex> MMDB2Decoder.lookup_pointer(123456, data)
{
:ok,
%{
"continent" => %{...},
"country" => %{...},
"registered_country" => %{...}
}
}
"""
@spec lookup_pointer(non_neg_integer, binary, decode_options) :: {:ok, lookup_value}
def lookup_pointer(pointer, data, options \\ @default_decode_options) do
{:ok, Data.value(data, pointer, options)}
end
@doc """
Calls `lookup_pointer/3` and unrolls the return tuple.
"""
@spec lookup_pointer!(non_neg_integer, binary, decode_options) :: lookup_value
def lookup_pointer!(pointer, data, options \\ @default_decode_options) do
{:ok, value} = lookup_pointer(pointer, data, options)
value
end
@doc """
Parses a database binary and splits it into metadata, lookup tree and data.
It is expected that you pass the real contents of the file, not the name
of the database or the path to it.
## Usage
iex> MMDB2Decoder.parse_database(File.read!("/path/to/database.mmdb"))
{
:ok,
%MMDB2Decoder.Metadata{...},
<<...>>,
<<...>>
}
If parsing the database fails you will receive an appropriate error tuple:
iex> MMDB2Decoder.parse_database("invalid-database-contents")
{:error, :no_metadata}
"""
@spec parse_database(binary) :: parse_result
def parse_database(contents) do
case Database.split_contents(contents) do
[_] -> {:error, :no_metadata}
[data, meta] -> Database.split_data(meta, data)
end
end
@doc """
Utility method to pipe `parse_database/1` directly to `lookup/4`.
## Usage
Depending on how you handle the parsed database contents you may
want to pass the results directly to the lookup.
iex> "/path/to/database.mmdb"
...> |> File.read!()
...> |> MMDB2Decoder.parse_database()
...> |> MMDB2Decoder.pipe_lookup({127, 0, 0, 1})
{:ok, %{...}}
"""
@spec pipe_lookup(parse_result, :inet.ip_address(), decode_options) :: lookup_result
def pipe_lookup(parse_result, ip, options \\ @default_decode_options)
def pipe_lookup({:error, _} = error, _, _), do: error
def pipe_lookup({:ok, meta, tree, data}, ip, options),
do: lookup(ip, meta, tree, data, options)
@doc """
Calls `pipe_lookup/2` and raises if an error from `parse_database/1` is given
or occurs during `lookup/4`.
"""
@spec pipe_lookup!(parse_result, :inet.ip_address(), decode_options) ::
lookup_value | no_return
def pipe_lookup!(parse_result, ip, options \\ @default_decode_options)
def pipe_lookup!({:error, error}, _, _), do: raise(Kernel.to_string(error))
def pipe_lookup!({:ok, meta, tree, data}, ip, options),
do: lookup!(ip, meta, tree, data, options)
end
|
lib/mmdb2_decoder.ex
| 0.871884 | 0.688505 |
mmdb2_decoder.ex
|
starcoder
|
defmodule ExthCrypto.ECIES.Parameters do
@moduledoc """
Returns one set of the Standard ECIES parameters:
* ECIES using AES128 and HMAC-SHA-256-16
* ECIES using AES256 and HMAC-SHA-256-32
* ECIES using AES256 and HMAC-SHA-384-48
* ECIES using AES256 and HMAC-SHA-512-64
"""
defstruct mac: nil,
hasher: nil,
cipher: nil,
key_len: nil
@type t :: %__MODULE__{
mac: ExthCrypto.Hash.hash_algorithm(),
hasher: ExthCrypto.Hash.hash_type(),
cipher: ExthCrypto.Cipher.cipher(),
key_len: integer()
}
alias ExthCrypto.Hash.SHA
alias ExthCrypto.AES
@doc """
Returns curve parameters for ECIES with AES-256 symmetric
encryption and SHA-256 hash.
"""
@spec ecies_aes128_sha256() :: t
def ecies_aes128_sha256 do
%__MODULE__{
mac: :sha256,
hasher: {&SHA.sha256/1, nil, 32},
cipher: {AES, AES.block_size(), :ctr},
key_len: 16
}
end
@doc """
Returns curve parameters for ECIES with AES-256 symmetric
encryption and SHA-256 hash.
"""
@spec ecies_aes256_sha256() :: t
def ecies_aes256_sha256 do
%__MODULE__{
mac: :sha256,
hasher: {&SHA.sha256/1, nil, 32},
cipher: {AES, AES.block_size(), :ctr},
key_len: 32
}
end
@doc """
Returns curve parameters for ECIES with AES-256 symmetric
encryption and SHA-384 hash.
"""
@spec ecies_aes256_sha384() :: t
def ecies_aes256_sha384 do
%__MODULE__{
mac: :sha256,
hasher: {&SHA.sha384/1, nil, 48},
cipher: {AES, AES.block_size(), :ctr},
key_len: 32
}
end
@doc """
Returns curve parameters for ECIES with AES-256 symmetric
encryption and SHA-512 hash.
"""
@spec ecies_aes256_sha512() :: t
def ecies_aes256_sha512 do
%__MODULE__{
mac: :sha256,
hasher: {&SHA.sha512/1, nil, 64},
cipher: {AES, AES.block_size(), :ctr},
key_len: 32
}
end
@doc """
Returns the block size of a given set of ECIES params.
## Examples
iex> ExthCrypto.ECIES.Parameters.block_size(ExthCrypto.ECIES.Parameters.ecies_aes256_sha512)
32
"""
@spec block_size(t) :: integer()
def block_size(params) do
{_, block_size, _args} = params.cipher
block_size
end
@doc """
Returns the hash len of a given set of ECIES params.
## Examples
iex> ExthCrypto.ECIES.Parameters.hash_len(ExthCrypto.ECIES.Parameters.ecies_aes256_sha256)
32
iex> ExthCrypto.ECIES.Parameters.hash_len(ExthCrypto.ECIES.Parameters.ecies_aes256_sha512)
64
"""
@spec hash_len(t) :: integer()
def hash_len(params) do
# Get size of hash cipher
{_, _, hash_len} = params.hasher
hash_len
end
end
|
apps/exth_crypto/lib/ecies/parameters.ex
| 0.899646 | 0.49292 |
parameters.ex
|
starcoder
|
defmodule Descisionex.PaymentMatrix do
@moduledoc """
https://en.wikipedia.org/wiki/Decision-matrix_method
"""
alias Descisionex.{PaymentMatrix, Helper}
defstruct matrix: [],
variants: [],
variants_num: 0,
possible_steps: [],
possible_steps_num: 0,
hurwitz_additional_value: 0.5,
generalized_additional_value: 0.5,
wald_criterion: %{},
laplace_criterion: %{},
savage_criterion: %{},
hurwitz_criterion: %{},
generalized_criterion: %{}
@doc """
Set variants for payment matrix.
## Examples
iex> %Descisionex.PaymentMatrix{} |> Descisionex.PaymentMatrix.set_variants(["some", "variants"])
%Descisionex.PaymentMatrix{
generalized_additional_value: 0.5,
generalized_criterion: %{},
hurwitz_additional_value: 0.5,
hurwitz_criterion: %{},
laplace_criterion: %{},
matrix: [],
possible_steps: [],
possible_steps_num: 0,
savage_criterion: %{},
variants: ["some", "variants"],
variants_num: 2,
wald_criterion: %{}
}
"""
def set_variants(%PaymentMatrix{} = data, variants) do
data
|> Map.put(:variants, variants)
|> Map.put(:variants_num, Enum.count(variants))
end
@doc """
Set steps for payment matrix.
## Examples
iex> %Descisionex.PaymentMatrix{} |> Descisionex.PaymentMatrix.set_steps(["some", "steps"])
%Descisionex.PaymentMatrix{
generalized_additional_value: 0.5,
generalized_criterion: %{},
hurwitz_additional_value: 0.5,
hurwitz_criterion: %{},
laplace_criterion: %{},
matrix: [],
possible_steps: ["some", "steps"],
possible_steps_num: 2,
savage_criterion: %{},
variants: [],
variants_num: 0,
wald_criterion: %{}
}
"""
def set_steps(%PaymentMatrix{} = data, steps) do
data
|> Map.put(:possible_steps, steps)
|> Map.put(:possible_steps_num, Enum.count(steps))
end
@doc """
Set Hurwitz additional value for payment matrix (range from 0.1 to 0.9), defaults to 0.5.
## Examples
iex> %Descisionex.PaymentMatrix{} |> Descisionex.PaymentMatrix.set_hurwitz_additional_value(0.3)
%Descisionex.PaymentMatrix{
generalized_additional_value: 0.5,
generalized_criterion: %{},
hurwitz_additional_value: 0.3,
hurwitz_criterion: %{},
laplace_criterion: %{},
matrix: [],
possible_steps: [],
possible_steps_num: 0,
savage_criterion: %{},
variants: [],
variants_num: 0,
wald_criterion: %{}
}
iex> %Descisionex.PaymentMatrix{} |> Descisionex.PaymentMatrix.set_hurwitz_additional_value(0)
** (ArgumentError) Hurwitz additional value incorrect (number range must be from 0.1 to 0.9)
"""
def set_hurwitz_additional_value(%PaymentMatrix{} = data, value) do
if 0.1 <= value && value <= 0.9 do
Map.put(data, :hurwitz_additional_value, value)
else
raise ArgumentError,
message: "Hurwitz additional value incorrect (number range must be from 0.1 to 0.9)"
end
end
@doc """
Set Generalized additional value for payment matrix (range from 0.1 to 0.9), defaults to 0.5.
## Examples
iex> %Descisionex.PaymentMatrix{} |> Descisionex.PaymentMatrix.set_generalized_additional_value(0.3)
%Descisionex.PaymentMatrix{
generalized_additional_value: 0.3,
generalized_criterion: %{},
hurwitz_additional_value: 0.5,
hurwitz_criterion: %{},
laplace_criterion: %{},
matrix: [],
possible_steps: [],
possible_steps_num: 0,
savage_criterion: %{},
variants: [],
variants_num: 0,
wald_criterion: %{}
}
iex> %Descisionex.PaymentMatrix{} |> Descisionex.PaymentMatrix.set_generalized_additional_value(0)
** (ArgumentError) Generalized additional value incorrect (number range must be from 0.1 to 0.9)
"""
def set_generalized_additional_value(%PaymentMatrix{} = data, value) do
if 0.1 <= value && value <= 0.9 do
Map.put(data, :generalized_additional_value, value)
else
raise ArgumentError,
message: "Generalized additional value incorrect (number range must be from 0.1 to 0.9)"
end
end
@doc """
Calculates Wald criterion for payment matrix.
## Examples
iex> %Descisionex.PaymentMatrix{matrix: [[1, 2], [3, 4]]} |> Descisionex.PaymentMatrix.calculate_wald_criterion()
%Descisionex.PaymentMatrix{
generalized_additional_value: 0.5,
generalized_criterion: %{},
hurwitz_additional_value: 0.5,
hurwitz_criterion: %{},
laplace_criterion: %{},
matrix: [[1, 2], [3, 4]],
possible_steps: [],
possible_steps_num: 0,
savage_criterion: %{},
variants: [],
variants_num: 0,
wald_criterion: %{criterion: 4, strategy_index: 1}
}
"""
def calculate_wald_criterion(%PaymentMatrix{} = data) do
all_criteria = Enum.map(data.matrix, fn row -> Enum.max(row) end)
{wald_criterion, strategy_index} = Helper.find_max_criteria(all_criteria)
Map.put(data, :wald_criterion, %{criterion: wald_criterion, strategy_index: strategy_index})
end
@doc """
Calculates Laplace criterion for payment matrix (variants must be set).
## Examples
iex> %Descisionex.PaymentMatrix{matrix: [[1, 2], [3, 4]]} |> Descisionex.PaymentMatrix.calculate_laplace_criterion()
** (ArgumentError) For Laplace criterion variants must be set!
iex> %Descisionex.PaymentMatrix{matrix: [[1, 2], [3, 4]]} |> Descisionex.PaymentMatrix.set_variants(["some", "variants"]) |> Descisionex.PaymentMatrix.calculate_laplace_criterion()
%Descisionex.PaymentMatrix{
generalized_additional_value: 0.5,
generalized_criterion: %{},
hurwitz_additional_value: 0.5,
hurwitz_criterion: %{},
laplace_criterion: %{criterion: 3.5, strategy_index: 1},
matrix: [[1, 2], [3, 4]],
possible_steps: [],
possible_steps_num: 0,
savage_criterion: %{},
variants: ["some", "variants"],
variants_num: 2,
wald_criterion: %{}
}
"""
def calculate_laplace_criterion(%PaymentMatrix{} = data) do
variant_rows = data.variants_num
if variant_rows == 0,
do: raise(ArgumentError, message: "For Laplace criterion variants must be set!")
all_criteria =
data.matrix
|> Enum.map(fn row ->
Enum.map(row, fn element ->
Float.round(element / variant_rows, 3)
end)
end)
|> Enum.map(fn row -> Enum.sum(row) end)
{laplace_criterion, strategy_index} = Helper.find_max_criteria(all_criteria)
Map.put(data, :laplace_criterion, %{
criterion: laplace_criterion,
strategy_index: strategy_index
})
end
@doc """
Calculates Hurwitz criterion for payment matrix.
## Examples
iex> %Descisionex.PaymentMatrix{matrix: [[1, 2], [3, 4]]} |> Descisionex.PaymentMatrix.calculate_hurwitz_criterion()
%Descisionex.PaymentMatrix{
generalized_additional_value: 0.5,
generalized_criterion: %{},
hurwitz_additional_value: 0.5,
hurwitz_criterion: %{criterion: 3.5, strategy_index: 1},
laplace_criterion: %{},
matrix: [[1, 2], [3, 4]],
possible_steps: [],
possible_steps_num: 0,
savage_criterion: %{},
variants: [],
variants_num: 0,
wald_criterion: %{}
}
"""
def calculate_hurwitz_criterion(%PaymentMatrix{} = data) do
additional_value = data.hurwitz_additional_value
max =
data.matrix
|> Enum.map(fn row -> Enum.max(row) end)
|> Enum.map(fn element ->
num = element * additional_value
if is_float(num), do: Float.round(num, 3), else: num
end)
|> Enum.with_index()
min =
data.matrix
|> Enum.map(fn row -> Enum.min(row) end)
|> Enum.map(fn element ->
num = element * (1 - additional_value)
if is_float(num), do: Float.round(num, 3), else: num
end)
{hurwitz_criterion, strategy_index} =
max
|> Enum.map(fn {element, index} ->
element + Enum.at(min, index)
end)
|> Helper.find_max_criteria()
Map.put(data, :hurwitz_criterion, %{
criterion: hurwitz_criterion,
strategy_index: strategy_index
})
end
@doc """
Calculates Savage criterion for payment matrix.
## Examples
iex> %Descisionex.PaymentMatrix{matrix: [[1, 2], [3, 4]]} |> Descisionex.PaymentMatrix.calculate_savage_criterion()
%Descisionex.PaymentMatrix{
generalized_additional_value: 0.5,
generalized_criterion: %{},
hurwitz_additional_value: 0.5,
hurwitz_criterion: %{},
laplace_criterion: %{},
matrix: [[1, 2], [3, 4]],
possible_steps: [],
possible_steps_num: 0,
savage_criterion: %{criterion: 0, strategy_index: 1},
variants: [],
variants_num: 0,
wald_criterion: %{}
}
"""
def calculate_savage_criterion(%PaymentMatrix{} = data) do
matrix = data.matrix
max =
matrix
|> Matrix.transpose()
|> Enum.map(fn row -> Enum.max(row) end)
all_criteria =
matrix
|> Enum.map(fn row ->
Enum.zip(max, row)
|> Enum.map(fn {risk, elem} ->
num = risk - elem
if is_float(num), do: Float.round(num, 3), else: num
end)
end)
|> Enum.map(fn row -> Enum.max(row) end)
{savage_criterion, strategy_index} = Helper.find_min_criteria(all_criteria)
Map.put(data, :savage_criterion, %{
criterion: savage_criterion,
strategy_index: strategy_index
})
end
@doc """
Calculates generalized criterion for payment matrix.
## Examples
iex> %Descisionex.PaymentMatrix{matrix: [[1, 2], [3,4]]} |> Descisionex.PaymentMatrix.calculate_generalized_criterion()
%Descisionex.PaymentMatrix{
generalized_additional_value: 0.5,
generalized_criterion: %{criterion: 1.5, strategy_index: 0},
hurwitz_additional_value: 0.5,
hurwitz_criterion: %{},
laplace_criterion: %{},
matrix: [[1, 2], [3, 4]],
possible_steps: [],
possible_steps_num: 0,
savage_criterion: %{},
variants: [],
variants_num: 0,
wald_criterion: %{}
}
"""
def calculate_generalized_criterion(%PaymentMatrix{} = data) do
additional_value = data.generalized_additional_value
max =
data.matrix
|> Enum.map(fn row -> Enum.max(row) end)
|> Enum.map(fn element ->
num = element * additional_value
if is_float(num), do: Float.round(num, 3), else: num
end)
|> Enum.with_index()
min =
data.matrix
|> Enum.map(fn row -> Enum.min(row) end)
|> Enum.map(fn element ->
num = element * additional_value
if is_float(num), do: Float.round(num, 3), else: num
end)
{generalized_criterion, strategy_index} =
max
|> Enum.map(fn {element, index} ->
element + Enum.at(min, index)
end)
|> Helper.find_min_criteria()
Map.put(data, :generalized_criterion, %{
criterion: generalized_criterion,
strategy_index: strategy_index
})
end
@doc """
Calculates all criteria for payment matrix.
## Examples
iex> %Descisionex.PaymentMatrix{matrix: [[1, 2], [3,4]]} |> Descisionex.PaymentMatrix.set_variants(["some", "variants"]) |> Descisionex.PaymentMatrix.calculate_criteria()
%Descisionex.PaymentMatrix{
generalized_additional_value: 0.5,
generalized_criterion: %{criterion: 1.5, strategy_index: 0},
hurwitz_additional_value: 0.5,
hurwitz_criterion: %{criterion: 3.5, strategy_index: 1},
laplace_criterion: %{criterion: 3.5, strategy_index: 1},
matrix: [[1, 2], [3, 4]],
possible_steps: [],
possible_steps_num: 0,
savage_criterion: %{criterion: 0, strategy_index: 1},
variants: ["some", "variants"],
variants_num: 2,
wald_criterion: %{criterion: 4, strategy_index: 1}
}
"""
def calculate_criteria(%PaymentMatrix{} = data) do
data
|> calculate_wald_criterion()
|> calculate_savage_criterion()
|> calculate_laplace_criterion()
|> calculate_hurwitz_criterion()
|> calculate_generalized_criterion()
end
end
|
lib/algorithms/payment_matrix.ex
| 0.89468 | 0.564399 |
payment_matrix.ex
|
starcoder
|
defmodule Holidays.Define do
use GenServer
alias Holidays.DateCalculator.DateMath
def start_link() do
GenServer.start_link(__MODULE__, [], name: __MODULE__)
end
def holiday(name, %{month: month, day: day, regions: regions}) do
GenServer.cast(__MODULE__, {:add_entry, :static, {name, month, day, regions}})
end
def holiday(name, %{month: month, week: week, weekday: weekday, regions: regions}) do
GenServer.cast(__MODULE__, {:add_entry, :nth, {name, month, week, weekday, regions}})
end
def holiday(name, %{function: function, regions: regions}) do
GenServer.cast(__MODULE__, {:add_entry, :fun, {name, function, regions}})
end
@spec on(:calendar.date, [Holidays.region]) :: list
def on(date, regions) do
GenServer.call(__MODULE__, {:on, date, regions})
end
defp on_all(%{static: statics, nth: nths, fun: funs}, date) do
on_static(statics, date) ++
on_nth(nths, date) ++
on_fun(funs, date)
end
defp on_static(holidays, {_, month, day}) do
holidays
|> Enum.filter(fn
{_, ^month, ^day, _} -> true
_ -> false
end)
|> Enum.map(fn {name, _, _, regions} -> %{name: name, regions: regions} end)
end
defp on_nth(holidays, date) do
DateMath.get_week_and_weekday(date)
|> Enum.flat_map(&on_nth(&1, holidays, date))
end
defp on_nth({week, weekday}, holidays, {_, month, _}) do
holidays
|> Enum.filter(&match?({_, ^month, ^week, ^weekday, _}, &1))
|> Enum.map(fn {name, _, _, _, regions} -> %{name: name, regions: regions} end)
end
defp on_fun(holidays, date) do
holidays
|> Enum.filter(fn {_, fun, _} -> apply_fun(fun, date) == date end)
|> Enum.map(fn {name, _, regions} -> %{name: name, regions: regions} end)
end
defp apply_fun({mod, fun, args, days}, date) do
apply_fun({mod, fun, args}, date)
|> DateMath.add_days(days)
end
defp apply_fun({mod, fun, [:year]}, {year, _, _}) do
apply(mod, fun, [year])
end
defp region_match?(%{regions: holiday_regions}, regions_set) do
!(MapSet.new(holiday_regions)
|> MapSet.disjoint?(regions_set))
end
def init([]) do
{:ok, %{static: [], nth: [], fun: []}}
end
def handle_cast({:add_entry, type, definition}, state) do
{:noreply, Map.update!(state, type, &([definition | &1]))}
end
def handle_call({:on, date, regions}, _from, state) do
regions_set = MapSet.new(regions)
result = state
|> on_all(date)
|> Enum.filter(®ion_match?(&1, regions_set))
|> Enum.map(fn %{name: name} -> %{name: name} end)
{:reply, result, state}
end
end
|
lib/holidays/define.ex
| 0.694821 | 0.40698 |
define.ex
|
starcoder
|
defmodule Surface.API do
@moduledoc false
@types [:any, :css_class, :list, :event, :children, :boolean, :string, :date,
:datetime, :number, :integer, :decimal, :map, :fun, :atom, :module,
:changeset, :form]
@private_opts [:action, :to]
defmacro __using__([include: include]) do
arities = %{
property: [2, 3],
data: [2, 3],
context: [1]
}
functions = for func <- include, arity <- arities[func], into: [], do: {func, arity}
quote do
import unquote(__MODULE__), only: unquote(functions)
@before_compile unquote(__MODULE__)
@after_compile unquote(__MODULE__)
Module.register_attribute(__MODULE__, :assigns, accumulate: false)
for func <- unquote(include) do
Module.register_attribute(__MODULE__, func, accumulate: true)
end
end
end
defmacro __before_compile__(env) do
generate_docs(env)
[
quoted_property_funcs(env),
quoted_data_funcs(env),
quoted_context_funcs(env)
]
end
def __after_compile__(env, _) do
validate_has_init_context(env)
end
@doc "Defines a property for the component"
defmacro property(name_ast, type, opts \\ []) do
build_assign_ast(:property, name_ast, type, opts, __CALLER__)
end
@doc "Defines a data assign for the component"
defmacro data(name_ast, type, opts \\ []) do
build_assign_ast(:data, name_ast, type, opts, __CALLER__)
end
@doc """
Sets or retrieves a context assign.
### Usage
```
context set name, type, opts \\ []
context get name, opts
```
### Examples
```
context set form, :form
...
context get form, from: Form
```
"""
# context get
defmacro context({:get, _, [name_ast, opts]}) when is_list(opts) do
opts = [{:action, :get} | opts]
build_assign_ast(:context, name_ast, :any, opts, __CALLER__)
end
defmacro context({:get, _, [_name_ast, type]}) when type in @types do
message = "cannot redefine the type of the assign when using action :get. " <>
"The type is already defined by a parent component using action :set"
raise %CompileError{line: __CALLER__.line, file: __CALLER__.file, description: message}
end
defmacro context({:get, _, [name_ast, invalid_opts]}) do
build_assign_ast(:context, name_ast, :any, invalid_opts, __CALLER__)
end
defmacro context({:get, _, [name_ast]}) do
opts = [action: :get]
build_assign_ast(:context, name_ast, :any, opts, __CALLER__)
end
defmacro context({:get, _, nil}) do
message = "no name defined for context get"
raise %CompileError{line: __CALLER__.line, file: __CALLER__.file, description: message}
end
# context set
defmacro context({:set, _, [name_ast, type, opts]}) when is_list(opts) do
opts = Keyword.merge(opts, action: :set, to: __CALLER__.module)
build_assign_ast(:context, name_ast, type, opts, __CALLER__)
end
defmacro context({:set, _, [_name_ast, opts]}) when is_list(opts) do
message = "no type defined for context set. Type is required after the name."
raise %CompileError{line: __CALLER__.line, file: __CALLER__.file, description: message}
end
defmacro context({:set, _, [name_ast, type]}) do
opts = [action: :set, to: __CALLER__.module]
build_assign_ast(:context, name_ast, type, opts, __CALLER__)
end
defmacro context({:set, _, [_name_ast]}) do
message = "no type defined for context set. Type is required after the name."
raise %CompileError{line: __CALLER__.line, file: __CALLER__.file, description: message}
end
# invalid usage
defmacro context({_action, _, args}) when length(args) > 2 do
message = "invalid use of context. Usage: `context get name, opts`" <>
" or `context set name, type, opts \\ []`"
raise %CompileError{line: __CALLER__.line, file: __CALLER__.file, description: message}
end
defmacro context({action, _, _}) do
message = "invalid context action. Expected :get or :set, got: #{Macro.to_string(action)}"
raise %CompileError{line: __CALLER__.line, file: __CALLER__.file, description: message}
end
@doc false
def maybe_put_assign!(caller, assign) do
assign = %{assign | doc: pop_doc(caller.module)}
assigns = Module.get_attribute(caller.module, :assigns) || %{}
name = Keyword.get(assign.opts, :as, assign.name)
existing_assign = assigns[name]
if Keyword.get(assign.opts, :scope) != :only_children do
if existing_assign do
message = "cannot use name \"#{assign.name}\". There's already " <>
"a #{existing_assign.func} assign with the same name " <>
"at line #{existing_assign.line}." <> suggestion_for_duplicated_assign(assign)
raise %CompileError{line: assign.line, file: caller.file, description: message}
else
assigns = Map.put(assigns, name, assign)
Module.put_attribute(caller.module, :assigns, assigns)
end
end
Module.put_attribute(caller.module, assign.func, assign)
assign
end
defp suggestion_for_duplicated_assign(%{func: :context, opts: opts}) do
"\nHint: " <>
case Keyword.get(opts, :action) do
:set ->
"""
if you only need this context assign in the child components, \
you can set option :scope as :only_children to solve the issue.\
"""
:get ->
"you can use the :as option to set another name for the context assign."
end
end
defp suggestion_for_duplicated_assign(_assign) do
""
end
defp quoted_data_funcs(env) do
data = Module.get_attribute(env.module, :data) || []
quote do
@doc false
def __data__() do
unquote(Macro.escape(data))
end
end
end
defp quoted_property_funcs(env) do
props = Module.get_attribute(env.module, :property) || []
props_names = Enum.map(props, fn prop -> prop.name end)
props_by_name = for p <- props, into: %{}, do: {p.name, p}
quote do
@doc false
def __props__() do
unquote(Macro.escape(props))
end
@doc false
def __validate_prop__(prop) do
prop in unquote(props_names)
end
@doc false
def __get_prop__(name) do
Map.get(unquote(Macro.escape(props_by_name)), name)
end
end
end
defp quoted_context_funcs(env) do
context = Module.get_attribute(env.module, :context) || []
{gets, sets} = Enum.split_with(context, fn c -> c.opts[:action] == :get end)
sets_in_scope = Enum.filter(sets, fn var -> var.opts[:scope] != :only_children end)
assigns = gets ++ sets_in_scope
quote do
@doc false
def __context_gets__() do
unquote(Macro.escape(gets))
end
@doc false
def __context_sets__() do
unquote(Macro.escape(sets))
end
@doc false
def __context_sets_in_scope__() do
unquote(Macro.escape(sets_in_scope))
end
@doc false
def __context_assigns__() do
unquote(Macro.escape(assigns))
end
end
end
defp build_assign_ast(func, name_ast, type, opts, caller) do
validate!(func, name_ast, type, opts, caller)
{name, _, _} = name_ast
quote do
unquote(__MODULE__).maybe_put_assign!(__ENV__, %{
func: unquote(func),
name: unquote(name),
type: unquote(type),
doc: nil,
opts: unquote(opts),
opts_ast: unquote(Macro.escape(opts)),
line: unquote(caller.line)
})
end
end
defp validate!(func, name_ast, type, opts, caller) do
{evaluated_opts, _} = Code.eval_quoted(opts, [], caller)
with {:ok, name} <- validate_name(func, name_ast),
:ok <- validate_type(func, name, type),
:ok <- validate_opts(func, name, type, evaluated_opts),
:ok <- validate_required_opts(func, type, evaluated_opts) do
:ok
else
{:error, message} ->
file = Path.relative_to_cwd(caller.file)
raise %CompileError{line: caller.line, file: file, description: message}
end
end
defp validate_name(_func, {name, meta, context})
when is_atom(name) and is_list(meta) and is_atom(context) do
{:ok, name}
end
defp validate_name(func, name_ast) do
{:error, "invalid #{func} name. Expected a variable name, got: #{Macro.to_string(name_ast)}"}
end
# defp validate_type(_func, _name, nil) do
# {:error, "action :set requires the type of the assign as third argument"}
# end
defp validate_type(_func, _name, type) when type in @types do
:ok
end
defp validate_type(func, name, type) do
message =
"""
invalid type #{Macro.to_string(type)} for #{func} #{name}.
Expected one of #{inspect(@types)}.
Hint: Use :any if the type is not listed.\
"""
{:error, message}
end
defp validate_required_opts(func, type, opts) do
case get_required_opts(func, type, opts) -- Keyword.keys(opts) do
[] ->
:ok
missing_opts ->
{:error, "the following options are required: #{inspect(missing_opts)}"}
end
end
defp validate_opts(func, name, type, opts) do
with true <- Keyword.keyword?(opts),
keys <- Keyword.keys(opts),
valid_opts <- get_valid_opts(func, type, opts),
[] <- keys -- valid_opts ++ @private_opts do
Enum.reduce_while(keys, :ok, fn key, _ ->
case validate_opt(func, type, key, opts[key]) do
:ok ->
{:cont, :ok}
error ->
{:halt, error}
end
end)
else
false ->
{:error,
"invalid options for #{func} #{name}. " <>
"Expected a keyword list of options, got: #{inspect(opts)}"}
unknown_options ->
valid_opts = get_valid_opts(func, type, opts)
{:error, unknown_options_message(valid_opts, unknown_options)}
end
end
defp get_valid_opts(:property, :list, _opts) do
[:required, :default, :binding]
end
defp get_valid_opts(:property, :children, _opts) do
[:required, :group, :use_bindings]
end
defp get_valid_opts(:property, _type, _opts) do
[:required, :default, :values]
end
defp get_valid_opts(:data, _type, _opts) do
[:default, :values]
end
defp get_valid_opts(:context, _type, opts) do
case Keyword.fetch!(opts, :action) do
:get ->
[:from, :as]
:set ->
[:scope]
end
end
defp get_required_opts(:context, _type, opts) do
case Keyword.fetch!(opts, :action) do
:get ->
[:from]
_ ->
[]
end
end
defp get_required_opts(_func, _type, _opts) do
[]
end
defp validate_opt(_func, _type, :required, value) when not is_boolean(value) do
{:error, "invalid value for option :required. Expected a boolean, got: #{inspect(value)}"}
end
defp validate_opt(_func, _type, :values, value) when not is_list(value) do
{:error, "invalid value for option :values. Expected a list of values, got: #{inspect(value)}"}
end
defp validate_opt(:context, _type, :scope, value)
when value not in [:only_children, :self_and_children] do
{:error, "invalid value for option :scope. Expected :only_children or :self_and_children, got: #{inspect(value)}"}
end
defp validate_opt(:context, _type, :from, value) when not is_atom(value) do
{:error, "invalid value for option :from. Expected a module, got: #{inspect(value)}"}
end
defp validate_opt(:context, _type, :as, value) when not is_atom(value) do
{:error, "invalid value for option :as. Expected an atom, got: #{inspect(value)}"}
end
defp validate_opt(_func, _type, _opts, _key) do
:ok
end
defp unknown_options_message(valid_opts, unknown_options) do
{plural, unknown_items} =
case unknown_options do
[option] ->
{"", option}
_ ->
{"s", unknown_options}
end
"unknown option#{plural} #{inspect(unknown_items)}. " <>
"Available options: #{inspect(valid_opts)}"
end
defp format_opts(opts_ast) do
opts_ast
|> Macro.to_string()
|> String.slice(1..-2)
end
defp generate_docs(env) do
props_doc = generate_props_docs(env.module)
{line, doc} =
case Module.get_attribute(env.module, :moduledoc) do
nil ->
{env.line, props_doc}
{line, doc} ->
{line, doc <> "\n" <> props_doc}
end
Module.put_attribute(env.module, :moduledoc, {line, doc})
end
defp generate_props_docs(module) do
docs =
for prop <- Module.get_attribute(module, :property) do
doc = if prop.doc, do: " - #{prop.doc}.", else: ""
opts = if prop.opts == [], do: "", else: ", #{format_opts(prop.opts_ast)}"
"* **#{prop.name}** *#{inspect(prop.type)}#{opts}*#{doc}"
end
|> Enum.reverse()
|> Enum.join("\n")
"""
### Properties
#{docs}
"""
end
defp validate_has_init_context(env) do
if !function_exported?(env.module, :init_context, 1) do
for var <- Module.get_attribute(env.module, :context) || [] do
if Keyword.get(var.opts, :action) == :set do
message = "context assign \"#{var.name}\" not initialized. " <>
"You should implement an init_context/1 callback and initialize its " <>
"value by returning {:ok, #{var.name}: ...}"
Surface.Translator.IO.warn(message, env, fn _ -> var.line end)
end
end
end
end
defp pop_doc(module) do
doc =
case Module.get_attribute(module, :doc) do
{_, doc} -> doc
_ -> nil
end
Module.delete_attribute(module, :doc)
doc
end
end
|
lib/surface/api.ex
| 0.694924 | 0.612773 |
api.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.