code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
defmodule Tox.Period do
@moduledoc """
A `Period` struct and functions.
The Period struct contains the fields `year`, `month`, `week`, `day`, `hour`,
`minute` and second. The values for the fields represent the amount of time
for a unit. Expected `second`, all values are integers equal or greater than
`0`. The field `second` can also be a float equals to or greater than `0`.
"""
@microseconds_per_second 1_000_000
@key_map_date %{?Y => :year, ?M => :month, ?W => :week, ?D => :day}
@key_map_time %{?H => :hour, ?M => :minute, ?S => :second}
@typedoc """
An amount of time with a specified unit e.g. `{second: 5.500}` or `{hour: 1}`.
The amount of all durations must be equal or greater as `0`.
"""
@type duration ::
{:year, non_neg_integer()}
| {:month, non_neg_integer}
| {:week, non_neg_integer}
| {:day, non_neg_integer}
| {:hour, non_neg_integer}
| {:minute, non_neg_integer}
| {:second, non_neg_integer}
@type t :: %__MODULE__{
year: non_neg_integer(),
month: non_neg_integer(),
week: non_neg_integer(),
day: non_neg_integer(),
hour: non_neg_integer(),
minute: non_neg_integer(),
second: non_neg_integer() | float()
}
defstruct year: 0, month: 0, week: 0, day: 0, hour: 0, minute: 0, second: 0
@doc """
Creates a new period. All values in durations must be greater or equal `0`.
## Examples
iex> {:ok, period} = Tox.Period.new(1, 2, 3, 4, 5, 6, 7.8)
iex> period
#Tox.Period<P1Y2M3W4DT5H6M7.8S>
iex> Tox.Period.new(1, 2, 3, 4, 5, 6, -7.8)
{:error, :invalid_period}
"""
@spec new(
year :: non_neg_integer(),
month :: non_neg_integer(),
week :: non_neg_integer(),
day :: non_neg_integer(),
hour :: non_neg_integer(),
minute :: non_neg_integer(),
second :: non_neg_integer() | float
) :: {:ok, t()} | {:error, :invalid_period}
def new(year, month, week, day, hour, minute, second) do
new(
year: year,
month: month,
week: week,
day: day,
hour: hour,
minute: minute,
second: second
)
end
@doc """
Creates a new period or raise an error.
See `new/7` for more informations.
## Examples
iex> Tox.Period.new!(1, 2, 3, 4, 5, 6, 7.8)
#Tox.Period<P1Y2M3W4DT5H6M7.8S>
iex> Tox.Period.new!(1, 2, 3, 4, 5, 6, -7.8)
** (ArgumentError) cannot create a new period with [year: 1, month: 2, week: 3, day: 4, hour: 5, minute: 6, second: -7.8], reason: :invalid_period
"""
@spec new!(
year :: non_neg_integer(),
month :: non_neg_integer(),
week :: non_neg_integer(),
day :: non_neg_integer(),
hour :: non_neg_integer(),
minute :: non_neg_integer(),
second :: non_neg_integer() | float
) :: t()
def new!(year, month, week, day, hour, minute, second) do
new!(
year: year,
month: month,
week: week,
day: day,
hour: hour,
minute: minute,
second: second
)
end
@doc """
Creates a new period from `durations`. All values in the `durations` must be
equal or greater `0`.
## Examples
iex> {:ok, period} = Tox.Period.new(day: 4, hour: 5)
iex> period
#Tox.Period<P4DT5H>
iex> Tox.Period.new(minute: -1)
{:error, :invalid_period}
"""
@spec new([duration()]) :: {:ok, t()} | {:error, :invalid_period}
def new(durations) do
case is_valid?(durations) do
true -> {:ok, struct(__MODULE__, durations)}
false -> {:error, :invalid_period}
end
end
@doc """
Creates a new period from `durations` or raises an error.
See `new/1` for more informations.
## Examples
iex> Tox.Period.new!(month: 1, minute: 1)
#Tox.Period<P1MT1M>
iex> Tox.Period.new!(year: 0.5)
** (ArgumentError) cannot create a new period with [year: 0.5], reason: :invalid_period
"""
@spec new!([duration()]) :: t()
def new!(durations) do
case new(durations) do
{:ok, period} ->
period
{:error, reason} ->
raise ArgumentError,
"cannot create a new period with #{inspect(durations)}, " <>
"reason: #{inspect(reason)}"
end
end
@doc """
Creates a new period from a string.
A string representation of a period has the format `PiYiMiWiDTiHiMfS`. The `i`
represents an integer and the `f` a float. All integers and the float must be
equal or greater as `0`. Leading zeros are not required. The capital letters
`P` , `Y`, `M`, `W`, `D`, `T`, `H`, `M`, and `S` are designators for each of
the date and time elements and are not replaced.
* P is the period designator (optional).
* Y is the year designator that follows the value for the number of years.
* M is the month designator that follows the value for the number of months.
* W is the week designator that follows the value for the number of weeks.
* D is the day designator that follows the value for the number of days.
* T is the time designator that precedes the time components of the representation.
* H is the hour designator that follows the value for the number of hours.
* M is the minute designator that follows the value for the number of minutes.
* S is the second designator that follows the value for the number of seconds.
## Examples
iex> Tox.Period.parse("1Y3M")
Tox.Period.new(year: 1, month: 3)
iex> Tox.Period.parse("T12M5.5S")
Tox.Period.new(minute: 12, second: 5.5)
iex> Tox.Period.parse("P1Y3MT2H")
Tox.Period.new(year: 1, month: 3, hour: 2)
iex> Tox.Period.parse("1y")
{:error, :invalid_format}
"""
@spec parse(String.t()) :: {:ok, t()} | {:error, :invalid_format}
def parse("P" <> string) when is_binary(string), do: parse(string)
def parse(string) when is_binary(string) do
with {:ok, durations} <- do_parse(string) do
new(durations)
end
end
@doc """
Creates a new period from a string.
See `parse/1` for more informations.
## Examples
iex> Tox.Period.parse!("T12M5.5S")
#Tox.Period<PT12M5.5S>
iex> Tox.Period.parse!("1y")
** (ArgumentError) cannot parse "1y" as period, reason: :invalid_format
"""
@spec parse!(String.t()) :: t()
def parse!(string) do
case parse(string) do
{:ok, period} ->
period
{:error, reason} ->
raise ArgumentError,
"cannot parse #{inspect(string)} as period, reason: #{inspect(reason)}"
end
end
@doc """
Returns the `period` as `[Tox.duration]`. The optional `sign` can be `:pos`
for positive `durations` and `:neg` for negative `durations`, defaults to
`:pos`. A duration with an amount of `0` will be excluded form the
`durations`.
## Examples
iex> {:ok, period} = Tox.Period.parse("P1Y3MT2H1.123S")
iex> Tox.Period.to_durations(period)
[year: 1, month: 3, hour: 2, second: 1, microsecond: 123000]
iex> Tox.Period.to_durations(period, :neg)
[year: -1, month: -3, hour: -2, second: -1, microsecond: -123000]
iex> {:ok, period} = Tox.Period.parse("1MT1M")
iex> Tox.Period.to_durations(period)
[month: 1, minute: 1]
"""
@spec to_durations(t(), :pos | :neg) :: [Tox.duration()]
def to_durations(period, sign \\ :pos)
def to_durations(%__MODULE__{} = period, sign) when sign in [:pos, :neg] do
Enum.reduce([:second, :minute, :hour, :day, :week, :month, :year], [], fn key, durations ->
do_to_durations(durations, period, key, sign)
end)
end
# Helpers
defp is_valid?(durations) do
Enum.any?(durations, fn {_unit, value} -> value > 0 end) &&
Enum.all?(durations, fn
{:second, value} -> is_number(value) && value >= 0
{_unit, value} -> is_integer(value) && value >= 0
end)
end
defp do_to_durations(durations, %__MODULE__{} = period, :second, sign) do
value = Map.fetch!(period, :second)
second = trunc(value)
microsecond = trunc((value - second) * @microseconds_per_second)
durations
|> do_to_durations(microsecond, :microsecond, sign)
|> do_to_durations(second, :second, sign)
end
defp do_to_durations(durations, %__MODULE__{} = period, key, sign) do
case {Map.fetch!(period, key), sign} do
{value, :pos} when value > 0 -> Keyword.put(durations, key, value)
{value, :neg} when value > 0 -> Keyword.put(durations, key, value * -1)
_zero -> durations
end
end
defp do_to_durations(durations, 0, _key, _sign), do: durations
defp do_to_durations(durations, value, key, sign) when is_integer(value) do
value =
case sign do
:pos -> value
:neg -> value * -1
end
Keyword.put(durations, key, value)
end
defp do_parse(string) when is_binary(string) do
string
|> String.split("T")
|> case do
[date] ->
do_parse(date, @key_map_date)
["", time] ->
do_parse(time, @key_map_time)
[date, time] ->
with {:ok, durations_date} <- do_parse(date, @key_map_date),
{:ok, durations_time} <- do_parse(time, @key_map_time) do
{:ok, Keyword.merge(durations_date, durations_time)}
end
end
end
defp do_parse(string, key_map) when is_binary(string) do
designators_list = Map.keys(key_map)
string
|> String.to_charlist()
|> Enum.reduce_while({[], []}, fn char, {designators, num} ->
cond do
char == ?. ->
{:cont, {designators, [char | num]}}
char in ?0..?9 ->
{:cont, {designators, [char | num]}}
char in designators_list ->
with {:ok, key} <- Map.fetch(key_map, char),
{:ok, value} <- parse_value(key, Enum.reverse(num)) do
{:cont, {Keyword.put(designators, key, value), []}}
else
:error -> {:halt, :error}
end
true ->
{:halt, :error}
end
end)
|> case do
{durations, []} -> {:ok, durations}
_error -> {:error, :invalid_format}
end
end
defp parse_value(:second, num) do
num
|> to_string()
|> Float.parse()
|> case do
{value, ""} -> {:ok, value}
_error -> :error
end
end
defp parse_value(_key, num) do
num
|> to_string()
|> Integer.parse()
|> case do
{value, ""} -> {:ok, value}
_error -> :error
end
end
defimpl Inspect do
alias Tox.Period
@spec inspect(Period.t(), Inspect.Opts.t()) :: String.t()
def inspect(period, _opts) do
"#Tox.Period<#{to_string(period)}>"
end
end
defimpl String.Chars do
alias Tox.Period
@designators %{
year: 'Y',
month: 'M',
week: 'W',
day: 'D',
hour: 'H',
minute: 'M',
second: 'S'
}
@spec to_string(Period.t()) :: String.t()
def to_string(period) do
period_date = period_to_string(period, [:year, :month, :week, :day])
period_time = period_to_string(period, [:hour, :minute, :second])
if period_time == "", do: "P#{period_date}", else: "P#{period_date}T#{period_time}"
end
defp period_to_string(period, keys) do
Enum.reduce(keys, "", fn key, string ->
case Map.fetch!(period, key) do
value when value > 0 -> "#{string}#{value}#{Map.fetch!(@designators, key)}"
_zero -> string
end
end)
end
end
end
defmodule Tox.Period.Sigil do
@moduledoc """
A `~P` sigil for periods.
"""
alias Tox.Period
@doc """
Handles the sigil `~P` for periods.
## Examples
iex> import Tox.Period.Sigil
iex> ~P[1Y2DT1H10.10S]
#Tox.Period<P1Y2DT1H10.1S>
iex> ~P[1y]
** (ArgumentError) cannot parse "1y" as period
"""
@spec sigil_P(binary(), list()) :: Period.t()
def sigil_P(string, _modifiers) do
case Period.parse(string) do
{:ok, period} -> period
{:error, _} -> raise ArgumentError, "cannot parse #{inspect(string)} as period"
end
end
end
|
lib/tox/period.ex
| 0.952959 | 0.789883 |
period.ex
|
starcoder
|
defmodule Data.Events do
@moduledoc """
Eventing layer
An event looks like this:
```json
{
"type": "room/entered",
"actions": [
{
"type": "communications/emote",
"delay": 0.5,
"options": {
"message": "[name] glances up from reading his paper",
}
},
{
"type": "communications/say",
"delay": 0.75,
"options": {
"message": "Welcome!"
}
},
{
"type": "communications/say",
"delay": 0.75,
"options": {
"message": "How can I help you?"
}
}
]
}
```
"""
@type action :: String.t()
@type options_mapping :: map()
@callback type() :: String.t()
@callback allowed_actions() :: [action()]
@callback options :: options_mapping()
alias Data.Events.Actions
alias Data.Events.CharacterTargeted
alias Data.Events.CombatTicked
alias Data.Events.Options
alias Data.Events.RoomEntered
alias Data.Events.RoomHeard
alias Data.Events.StateTicked
@mapping %{
"character/target" => CharacterTargeted,
"combat/ticked" => CombatTicked,
"room/entered" => RoomEntered,
"room/heard" => RoomHeard,
"state/ticked" => StateTicked
}
def mapping(), do: @mapping
def parse(event) do
with {:ok, event_type} <- find_type(event),
{:ok, id} <- parse_id(event),
{:ok, options} <- parse_options(event_type, event),
{:ok, actions} <- parse_actions(event_type, event) do
{:ok, struct(event_type, %{id: id, options: options, actions: actions})}
end
end
defp find_type(event) do
case @mapping[event["type"]] do
nil ->
{:error, :no_type}
event_type ->
{:ok, event_type}
end
end
def parse_id(event) do
with {:ok, id} <- Map.fetch(event, "id") do
{:ok, id}
else
:error ->
{:ok, UUID.uuid4()}
end
end
defp parse_options(event_type, event) do
with {:ok, options} <- Map.fetch(event, "options"),
{:ok, options} <- Options.validate_options(event_type, options) do
{:ok, options}
else
:error ->
{:ok, %{}}
{:error, errors} ->
{:error, :invalid_options, errors}
end
end
defp parse_actions(event_type, event) do
with {:ok, actions} <- Map.fetch(event, "actions") do
actions =
actions
|> Enum.map(&Actions.parse/1)
|> Enum.filter(&(elem(&1, 0) == :ok))
|> Enum.map(&elem(&1, 1))
|> Enum.filter(&action_allowed?(event_type, &1.type))
{:ok, actions}
else
:error ->
{:ok, []}
end
end
@doc """
Check if an action type is allowed in an event
"""
def action_allowed?(event_type, action_type) do
Enum.member?(event_type.allowed_actions(), action_type)
end
end
|
lib/data/events.ex
| 0.74055 | 0.701968 |
events.ex
|
starcoder
|
defmodule Model.Transfer do
@moduledoc """
Transfer specifies additional rules and overrides for a transfer. See
[GTFS `transfers.txt`](https://github.com/google/transit/blob/master/gtfs/spec/en/reference.md#transferstxt)
"""
use Recordable, [
:from_stop_id,
:to_stop_id,
:transfer_type,
:min_transfer_time,
:min_walk_time,
:min_wheelchair_time,
:suggested_buffer_time,
:wheelchair_transfer
]
@typedoc """
| Value | Description |
| ------------ | ----------- |
| `0` or empty | Recommended transfer point between routes |
| `1` | Timed transfer point between two routes |
| `2` | Transfer requires a minimum amount of time between arrival and departure to ensure a connection |
| `3` | Transfers are not possible between routes at the location |
See [GTFS `transfers.txt` `transfer_type`](https://github.com/google/transit/blob/master/gtfs/spec/en/reference.md#transferstxt).
"""
@type transfer_type :: 0..3
@typedoc """
`wheelchair_transfer` is included in some records.
When the value is present, `1` indicates a transfer is wheelchair accessible, `2` indicates it is not.
"""
@type wheelchair_transfer :: 1..2 | nil
@typedoc """
* `:from_stop_id` - stops.stop_id identifying the stop/station where a connection between routes begins.
* `:to_stop_id` - stops.stop_id identifying the stop/station where a connection between routes ends.
* `:transfer_type` - see [GTFS `transfers.txt` `transfer_type`](https://github.com/google/transit/blob/master/gtfs/spec/en/reference.md#transferstxt).
* `:min_transfer_time` - the sum of `min_walk_time` and `suggested_buffer_time`. see [`transfers.txt` `min_transfer_time`] (https://github.com/mbta/gtfs-documentation/blob/master/reference/gtfs.md#transferstxt)
* `:min_walk_time` - minimum time required to travel by foot from `from_stop_id` to `to_stop_id`. see [`transfers.txt` `min_walk_time`] (https://github.com/mbta/gtfs-documentation/blob/master/reference/gtfs.md#transferstxt)
* `:min_wheelchair_time` - minimum time required to travel by wheelchair from `from_stop_id` to `to_stop_id`. see [`transfers.txt` `min_wheelchair_time`] (https://github.com/mbta/gtfs-documentation/blob/master/reference/gtfs.md#transferstxt)
* `:suggested_buffer_time` - recommended buffer time to allow to make a successful transfer between two services. see [`transfers.txt` `suggested_buffer_time`] (https://github.com/mbta/gtfs-documentation/blob/master/reference/gtfs.md#transferstxt)
* `:wheelchair_transfer` - see [`transfers.txt` `wheelchair_transfer`] (https://github.com/mbta/gtfs-documentation/blob/master/reference/gtfs.md#transferstxt)
"""
@type t :: %__MODULE__{
from_stop_id: String.t(),
to_stop_id: String.t(),
transfer_type: transfer_type,
min_transfer_time: non_neg_integer | nil,
min_walk_time: non_neg_integer | nil,
min_wheelchair_time: non_neg_integer | nil,
suggested_buffer_time: non_neg_integer | nil,
wheelchair_transfer: wheelchair_transfer
}
end
|
apps/model/lib/model/transfer.ex
| 0.928595 | 0.664506 |
transfer.ex
|
starcoder
|
defmodule Bintreeviz.Positioner.WS do
@moduledoc """
Module to do the positioning following the WS algorithm. As described in the original paper,
this algorithm works with two loops to keep the algorithm performing in O(N).
"""
@behaviour Bintreeviz.Positioner
# internal struct to keep track of positioning walk results
defmodule WalkResult do
@moduledoc false
defstruct node: nil, nexts: nil, offsets: nil
end
@margin 2
@node_height 6
alias Bintreeviz.Node
@doc "position/1 takes the root node and positions it and all its child nodes accordingly"
@spec position(Node.t()) :: Node.t()
@impl true
def position(%Node{} = root) do
%WalkResult{node: node} =
root
|> first_walk()
|> second_walk()
node
end
@spec first_walk(Node.t(), non_neg_integer(), map(), map()) :: {Node.t(), map(), map()}
defp first_walk(root, depth \\ 0, nexts \\ %{}, offsets \\ %{})
defp first_walk(nil, _depth, nexts, offsets),
do: %WalkResult{node: nil, nexts: nexts, offsets: offsets}
defp first_walk(%Node{} = root, depth, nexts, offsets) do
%WalkResult{node: left_child, nexts: nexts, offsets: offsets} =
first_walk(
root.left_child,
depth + 1,
nexts,
offsets
)
%WalkResult{node: right_child, nexts: nexts, offsets: offsets} =
first_walk(
root.right_child,
depth + 1,
nexts,
offsets
)
# update node with updated children
root = %Node{root | left_child: left_child, right_child: right_child}
root_width = Node.width(root) + @margin
# find the nodes initial position. This might be overwritten in the second
# walk due to children shifting its parent position.
preliminary_x = get_preliminary_x(root, nexts, depth)
# update offsets map with the higher value between the currently known
# offset, or the nexts - preliminary_x value.
bigger_offset = max(Map.get(offsets, depth, 0), Map.get(nexts, depth, 0) - preliminary_x)
offsets = Map.put(offsets, depth, bigger_offset)
# based on previous offsets and calculated preliminary_x, determine
# the new preliminary x position of the node for the first walk.
preliminary_x =
case Node.is_leaf?(root) do
true -> preliminary_x
false -> preliminary_x + Map.get(offsets, depth, 0)
end
# update node's position
root = %Node{
root
| x: preliminary_x,
y: depth * @node_height,
offset: Map.get(offsets, depth, 0)
}
# update nexts
nexts = Map.put(nexts, depth, preliminary_x + root_width)
%WalkResult{
node: root,
nexts: nexts,
offsets: offsets
}
end
defp get_preliminary_x(%Node{} = root, nexts, depth) do
root_width = Node.width(root) + @margin
case root do
%Node{left_child: nil, right_child: nil} ->
Map.get(nexts, depth, 0)
%Node{left_child: nil, right_child: %Node{} = right_child} ->
right_child.x - floor(root_width / 2)
%Node{left_child: %Node{} = left_child, right_child: nil} ->
left_child.x + floor(root_width / 2)
%Node{left_child: %Node{} = left_child, right_child: %Node{} = right_child} ->
floor((left_child.x + right_child.x + Node.width(right_child) + @margin) / 2) -
floor(root_width / 2)
end
end
@spec second_walk(WalkResult.t()) :: WalkResult.t()
defp second_walk(%WalkResult{node: root, nexts: nexts}), do: second_walk(root, nexts)
@spec second_walk(Node.t(), map(), non_neg_integer()) :: WalkResult.t()
defp second_walk(node, nexts, depth \\ 0, modifier_sum \\ 0)
defp second_walk(nil, nexts, _depth, _modifier_sum), do: %WalkResult{node: nil, nexts: nexts}
defp second_walk(%Node{} = root, nexts, depth, modifier_sum) do
# recurse and calculate for left child first
%WalkResult{node: left_child, nexts: nexts} =
second_walk(
root.left_child,
nexts,
depth + 1,
modifier_sum + root.offset
)
# then calculate for the right child
%WalkResult{node: right_child, nexts: nexts} =
second_walk(
root.right_child,
nexts,
depth + 1,
modifier_sum + root.offset
)
# then combine results
root = %Node{
root
| left_child: left_child,
right_child: right_child,
x: root.x + modifier_sum
}
%WalkResult{nexts: nexts, node: root}
end
end
|
lib/positioner/ws.ex
| 0.886282 | 0.670622 |
ws.ex
|
starcoder
|
defmodule ExYarn do
@moduledoc """
ExYarn is a small library for parsing [Yarn](https://yarnpkg.com/) lockfiles in Elixir. Only the version 1 of Yarn
lockfiles is currently supported.
The library allows you to parse either to a plain Elixir map, or to a utility type, `ExYarn.Lockfile`, which makes
manipulating the parsed dependencies a little easier.
#### Note on performance
This library was built in part as a learning exercise and therefore does not necessarily
apply the best possible practices and tools when it comes to code quality and performance. If performance is important
to you, I recommend using Dorgan's library ([hex.pm](https://hex.pm/packages/yarn_parser),
[Github](https://github.com/doorgan/yarn_parser)), which uses
[NimbleParsec](https://hexdocs.pm/nimble_parsec/NimbleParsec.html) for better performance.
## Example
### Parsing to a map
iex> input = ~s(
...># yarn lockfile v1
...>"@babel/[email protected]":
...> version "7.10.4"
...> resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.10.4.tgz#168da1a36e90da68ae8d49c0f1b48c7c6249213a"
...> integrity sha512-vG6SvB6oYEhvgisZNFRmRCUkLz11c7rp+tbNTynGqc6mS1d5ATd/sGyV6W0KZZnXRKMTzZDRgQT3Ou9jhpAfUg==
...> dependencies:
...> "@babel/highlight" "^7.10.4"
...>)
...> ExYarn.parse(input)
{
:ok,
{
%{
"@babel/[email protected]" => %{
"version" => "7.10.4",
"resolved" => "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.10.4.tgz#168da1a36e90da68ae8d49c0f1b48c7c6249213a",
"integrity" => "sha512-vG6SvB6oYEhvgisZNFRmRCUkLz11c7rp+tbNTynGqc6mS1d5ATd/sGyV6W0KZZnXRKMTzZDRgQT3Ou9jhpAfUg==",
"dependencies" => %{"@babel/highlight" => "^7.10.4"}
}
},
[" yarn lockfile v1"]
}
}
### Parsing to a `ExYarn.Lockfile`
iex> input = ~s(
...># yarn lockfile v1
...>"@babel/[email protected]":
...> version "7.10.4"
...> resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.10.4.tgz#168da1a36e90da68ae8d49c0f1b48c7c6249213a"
...> integrity sha512-vG6SvB6oYEhvgisZNFRmRCUkLz11c7rp+tbNTynGqc6mS1d5ATd/sGyV6W0KZZnXRKMTzZDRgQT3Ou9jhpAfUg==
...> dependencies:
...> "@babel/highlight" "^7.10.4"
...>)
...>ExYarn.parse_to_lockfile(input)
{
:ok,
%ExYarn.Lockfile{
version: 1,
dependencies: [
%ExYarn.Dependency{
name: "@babel/[email protected]",
version: "7.10.4",
resolved: "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.10.4.tgz#168da1a36e90da68ae8d49c0f1b48c7c6249213a",
integrity: "<KEY>
dependencies: [
{"@babel/highlight", "^7.10.4"}
],
optional_dependencies: []
}
],
comments: [" yarn lockfile v1"]}}
"""
alias ExYarn.{Lockfile, Parser}
@doc """
Takes the lockfile's content as input and returns the parsed map
"""
@spec parse(binary) :: {:error, Exception.t()} | {:ok, {map(), [binary]}}
def parse(lockfile_content) do
do_parse(lockfile_content)
end
@doc """
Same as `ExYarn.parse/1` but raises errors instead of returning them
"""
@spec parse!(binary) :: {map, [binary]}
def parse!(lockfile_content) do
case parse(lockfile_content) do
{:ok, result} -> result
{:error, error} -> raise error
end
end
@doc """
Takes the lockfile's content as input and returns a parsed `ExYarn.Lockfile`
"""
@spec parse_to_lockfile(binary) :: {:error, binary | Exception.t()} | {:ok, Lockfile.t()}
def parse_to_lockfile(lockfile_content) do
case parse(lockfile_content) do
{:ok, parse_result} -> build_lockfile(parse_result)
{:error, error} -> {:error, error}
end
end
@doc """
Same as `ExYarn.parse_to_lockfile/1` but raises errors instead of returning them
"""
@spec parse_to_lockfile!(binary) :: ExYarn.Lockfile.t()
def parse_to_lockfile!(lockfile_content) do
case parse_to_lockfile(lockfile_content) do
{:ok, lockfile} -> lockfile
{:error, error} -> raise error
end
end
defp do_parse(lockfile_content) do
parse_result =
lockfile_content
|> String.replace_prefix("\uFEFF", "")
|> Parser.parse()
case parse_result do
{:ok, result, comments} -> {:ok, {result, Enum.reverse(comments)}}
{:error, error} -> {:error, error}
end
end
defp build_lockfile(parse_result) do
parse_result
|> Lockfile.from_parse_result()
end
end
|
lib/ex_yarn.ex
| 0.751739 | 0.480479 |
ex_yarn.ex
|
starcoder
|
defmodule PowerControl do
@moduledoc """
`PowerControl` is a library that enables runtime configuration of embedded linux for power conservation or performance via native elixir.
## Getting Started
If using `shoehorn`, add `:power_control` to your shoehorn apps in your `config.exs`:
```elixir
config :shoehorn,
init: [:nerves_runtime, ..., :power_control],
...
```
It must come after `:nerves_runtime` but has no requirements other than that.
Once installed, startup configuration can be set in your `config.exs` like so:
```elixir
config :power_control,
cpu_governor: :powersave,
disable_leds: true,
disable_hdmi: true
```
The CPU Governor determines CPU clock speed behavior. Different devices can have different available governors. For more information about determining available governors, see the "Runtime Functionality" section below.
`disable_leds` disables all system leds. To selectively disable them, you may want to use the functions `list_leds/0` and `disable_led/1` manually. More complex LED management can be done with [`Nerves.Leds`](https://github.com/nerves-project/nerves_leds).
`disable_hdmi` disables the onboard hdmi of the pi. This may also disable other forms of video on hats or other peripherals, but this is not tested.
## Runtime Functionality
`PowerControl` also allows you fetch information about your system and configure some settings during runtime, for example:
```elixir
iex> list_cpus()
{:ok, [:cpu0]}
iex> cpu_info(:cpu0).speed
700000
iex> list_cpu_governors(:cpu0)
{:ok, [:ondemand, :userspace, :powersave, :conservative, :performance]}
iex> set_cpu_governor(:cpu0, :performance)
{:ok, :performance}
iex> cpu_info(:cpu0).speed
1000000
```
The functions `list_cpus/0` and `list_cpu_governors/1` can be used to determine what governors you have available on your device for configuration.
## Other Errors and Advanced Configuration
All Led and CPU functions can return Elixir `File` error messages such as `:enoent`, which generally indicates you have configured the directories improperly for your specific device (unlikely) or that `PowerControl` does not support your device (most likely). In the unlikely case that your device can be supported by `PowerControl` but you must manually configure the directories, the config keys are `cpu_dir` and `led_dir`. I do not currently support configuring filenames.
"""
require Logger
alias PowerControl.{CPU, LED, HDMI}
@startup_governor_warning "[PowerControl] No startup CPU Governor configured, device will use default."
@directory_warning_cpu "[PowerControl] Could not find CPU directory, are you sure this is a Nerves device?"
@directory_warning_led "[PowerControl] Could not find LED directory, are you sure this is a Nerves device?"
@doc false
def init do
case CPU.init() do
{:error, :no_startup_governor_configured} ->
Logger.warn(@startup_governor_warning)
{:error, :enoent} ->
Logger.warn(@directory_warning_cpu)
_ ->
:ok
end
case LED.init() do
{:error, :enoent} ->
Logger.warn(@directory_warning_led)
_ ->
:ok
end
HDMI.init()
:ok
end
@doc """
Lists system CPUS.
```
iex> list_cpus()
{:ok, [:cpu0]}
```
"""
def list_cpus do
CPU.list_cpus()
end
@doc """
Returns an info map for a CPU. If the CPU does not exist, a `File` error is returned.
```
iex> cpu_info(:cpu0)
%{max_speed: 1000000, min_speed: 700000, speed: 1000000}
```
```
iex> cpu_info(:bad_cpu)
{:error, :enoent}
```
"""
def cpu_info(cpu) do
CPU.cpu_info(cpu)
end
@doc """
Returns available governors for a CPU.
```
iex> list_cpu_governors(:cpu0)
{:ok, [:ondemand, :userspace, :powersave, :conservative, :performance]}
```
"""
def list_cpu_governors(cpu) do
CPU.list_governors(cpu)
end
@doc """
Sets the governor for a CPU.
```
iex> set_cpu_governor(:cpu0, :powersave)
{:ok, :powersave}
iex> set_cpu_governor(:cpu0, :invalid)
{:error, :invalid_governor}
# Running on non-nerves device or with bad governor file settings
iex> set_cpu_governor(:cpu0, :powersave)
{:error, :governor_file_not_found}
```
"""
def set_cpu_governor(cpu, governor) do
CPU.set_governor(cpu, governor)
end
@doc """
Lists system LEDS.
```
iex> list_leds()
{:ok, [:led0]}
```
"""
def list_leds do
LED.list_leds()
end
@doc """
Disables an LED.
Uses a simple retry system if it fails, retries 2 times then gives up.
To re-enable the LED or further configure LED settings, I reccomend [`Nerves.Leds`](https://github.com/nerves-project/nerves_leds).
```
iex> disable_led(:led0)
:ok
iex> disable_led(:invalid_led)
{:error, ...}
```
"""
def disable_led(led) do
LED.disable_led(led)
end
@doc """
Disables the HDMI port.
Returns `:ok` regardless of failure or success, but almost always succeeds.
```
iex> disable_hdmi()
:ok
```
"""
def disable_hdmi do
HDMI.disable_hdmi()
:ok
end
end
|
lib/power_control.ex
| 0.916025 | 0.82251 |
power_control.ex
|
starcoder
|
defmodule Hui.Http do
@moduledoc """
A behaviour module for handling HTTP requests and responses.
This module is responsible for dispatching Solr request encapsulated in `t:Hui.Http.t/0` struct.
It underpins the core functions of `Hui`, as well as provides default implementation and
built-in HTTP client capability based on [Erlang httpc](https://erlang.org/doc/man/httpc.html).
### Using other HTTP clients
Instead of using the built-in client, other HTTP clients may be developed
by implementing `Hui.Http` behaviour and deployed through application configuration.
For example, Hui provides another client option - `Hui.Http.Httpoison`.
```
config :hui,
http_client: Hui.Http.Httpoison
```
Hui.Http.Httpoison depends on `HTTPoison`. The dependency needs to be specified in `mix.exs`.
Add `:httpoison` to the applications section of the mix file to start up
the client for runtime.
```
defp deps do
[
{:httpoison, "~> 1.7"}
]
end
```
"""
@httpc_options [:timeout, :connect_timeout, :ssl, :essl, :autoredirect, :proxy_auth, :version, :relaxed]
defstruct body: nil,
headers: [],
method: :get,
options: [],
status: nil,
url: ""
@typedoc """
Request or response body which can be in iodata or parsed (as map) format.
"""
@type body :: nil | iodata() | map()
@typedoc """
The request url in iodata format consisting the full path and encoded query params.
"""
@type request_url :: iodata()
@typedoc """
Response tuple from a HTTP request consists of the `t:Hui.Http.t/0` struct and Solr response.
"""
@type response :: {:ok, t} | {:error, Hui.Error.t()}
@typedoc """
The main request and response data struct.
"""
@type t :: %__MODULE__{
body: body,
headers: list(),
method: :get | :post,
options: keyword(),
status: nil | integer(),
url: request_url
}
@doc """
Dispatch HTTP request to a Solr endpoint.
This callback is optional and can be used to adapt other HTTP clients to
provide different HTTP options and performance. Hui provides `Hui.Http.Httpoison`,
a reference implementation of this callback that can be
used in conjunction with `dispatch/2`.
If the callback is not implemented, the default built-in httpc-based client
will be used.
"""
@callback dispatch(request :: t) :: response
@optional_callbacks dispatch: 1
@doc """
Dispatch HTTP request to a Solr endpoint using the built-in Erlang [httpc](https://erlang.org/doc/man/httpc.html) client.
This is a default implementation of `c:dispatch/1` callback based on [httpc](https://erlang.org/doc/man/httpc.html).
### Example
```
request = %Hui.Http{
url: ["http://localhost:8080/solr/select", "?", "q=loch"],
headers: [{"accept", "application/json"}],
options: [{:timeout, 1000}]
}
{:ok, response} = HTTP.dispatch(request)
```
Find out more about the available options from [httpc documentation](https://erlang.org/doc/man/httpc.html#request-5).
"""
@spec dispatch(request :: t) :: response
def dispatch(%{method: :get, options: options} = request) do
{http_opts, opts} = handle_options(options, {[], []})
:httpc.request(:get, {handle_url(request.url), handle_req_headers(request.headers)}, http_opts, opts)
|> handle_response(request)
end
def dispatch(%{method: :post} = request) do
headers = handle_req_headers(request.headers)
{http_opts, opts} = handle_options(request.options, {[], []})
{_, content_type} = List.keyfind(headers, 'content-type', 0, {'content-type', ''})
:httpc.request(
:post,
{handle_url(request.url), headers, content_type, request.body},
http_opts,
opts
)
|> handle_response(request)
end
defoverridable dispatch: 1
@doc """
Dispatch HTTP request to a Solr endpoint using a given client implementing the `Hui.Http` behaviour.
Same as `dispatch/1` but invoking request through dynamic dispatching. See `Hui.Http.Httpoison`
for a reference client implementation based on `HTTPoison` that provides additional options
such as [connection pooling](https://github.com/edgurgel/httpoison#connection-pools).
"""
@spec dispatch(request :: t, client :: module) :: response
def dispatch(request, client), do: client.dispatch(request)
defp handle_options([], {http_opts, opts}), do: {http_opts, opts}
defp handle_options([{k, v} | t], {http_opts, opts}) do
case k in @httpc_options do
true -> handle_options(t, {[{k, v} | http_opts], opts})
false -> handle_options(t, {http_opts, [{k, v} | opts]})
end
end
defp handle_url(url) when is_list(url), do: Enum.map(url, &handle_url(&1))
defp handle_url(url), do: url |> String.replace("%", "%25") |> to_charlist()
defp handle_req_headers(headers), do: Enum.map(headers, fn {k, v} -> {to_charlist(k), to_charlist(v)} end)
defp handle_resp_headers(headers), do: Enum.map(headers, fn {k, v} -> {to_string(k), to_string(v)} end)
defp handle_response({:ok, {{[?H, ?T, ?T, ?P | _], status, _}, headers, body}}, req) do
headers = handle_resp_headers(headers)
{_, content_type} = List.keyfind(headers, "content-type", 0, {"content-type", ""})
case content_type do
"application/json" <> _ -> {:ok, %{req | body: decode_json(body), headers: headers, status: status}}
_ -> {:ok, %{req | body: to_string(body), headers: headers, status: status}}
end
end
defp handle_response({:error, {reason, _details}}, _req), do: {:error, %Hui.Error{reason: reason}}
# httpc could also return errors with only reason and without further details
defp handle_response({:error, reason}, _req), do: {:error, %Hui.Error{reason: reason}}
defp decode_json(body) do
case Jason.decode(body) do
{:ok, map} -> map
{:error, _} -> to_string(body)
end
end
end
|
lib/hui/http.ex
| 0.903387 | 0.785802 |
http.ex
|
starcoder
|
defmodule Day25 do
def part1(input) do
{:ok, [machine], "", _, _, _} = TuringParser.turing(input)
compiled_machine = TuringCompiler.compile(machine)
compiled_machine.run()
end
end
defmodule TuringCompiler do
def compile({initial_state, steps, actions}) do
ast = compile(initial_state, steps, actions)
IO.puts(Macro.to_string(ast))
[{module, _}] = Code.compile_quoted(ast, "CompiledTuringMachine")
module
end
defp compile(initial_state, steps, actions) do
quote do
defmodule CompiledTuringMachine do
# The architecture of the generated code is inspired by
# https://ferd.ca/advent-of-code-2017.html.
def run() do
zipper = {[], [0]}
run(unquote(initial_state), zipper, unquote(steps))
end
defp run(_, {prev, next}, 0) do
Enum.sum(prev) + Enum.sum(next)
end
defp run(state, {prev, [current | next]}, steps) do
case handle(state, current) do
{current, :left, state} ->
run(state, shift_left({prev, [current | next]}), steps - 1)
{current, :right, state} ->
run(state, shift_right({prev, [current | next]}), steps - 1)
end
end
defp shift_left({[], next}), do: {[], [0 | next]}
defp shift_left({[h | t], next}), do: {t, [h | next]}
defp shift_right({prev, [h]}), do: {[h | prev], [0]}
defp shift_right({prev, [h | t]}), do: {[h | prev], t}
unquote_splicing(gen_handle_clauses(actions))
end
end
end
def gen_handle_clauses(actions) do
for {{in_state, current_value}, action} <- actions do
quote do
defp handle(unquote(in_state), unquote(current_value)) do
unquote(Macro.escape(action))
end
end
end
end
end
defmodule TuringParser do
import NimbleParsec
def head([state, steps]) do
{String.to_atom(state), steps}
end
def state_definition([state, {value1, action1}, {value2, action2}]) do
state = String.to_atom(state)
[{{state, value1}, action1}, {{state, value2}, action2}]
end
def state_action([current, new_value, direction, new_state]) do
{current, {new_value, String.to_atom(direction), String.to_atom(new_state)}}
end
def result([{initial_state, steps} | actions]) do
{initial_state, steps , List.flatten(actions)}
end
blankspace = ignore(ascii_string([?\s], min: 1))
head =
ignore(string("Begin in state "))
|> ascii_string([?A..?Z], 1)
|> ignore(string(".\n"))
|> ignore(string("Perform a diagnostic checksum after "))
|> integer(min: 1)
|> ignore(string(" steps.\n"))
|> reduce({:head, []})
state_head =
ignore(string("\n"))
|> ignore(string("In state "))
|> ascii_string([?A..?Z], 1)
|> ignore(string(":\n"))
state_action =
optional(blankspace)
|> ignore(string("If the current value is "))
|> integer(1)
|> ignore(string(":\n"))
|> optional(blankspace)
|> ignore(string("- Write the value "))
|> integer(1)
|> ignore(string(".\n"))
|> optional(blankspace)
|> ignore(string("- Move one slot to the "))
|> choice([string("left"), string("right")])
|> ignore(string(".\n"))
|> optional(blankspace)
|> ignore(string("- Continue with state "))
|> ascii_string([?A..?Z], 1)
|> ignore(string(".\n"))
|> reduce({:state_action, []})
defcombinatorp :state_definition, state_head
|> concat(state_action)
|> concat(state_action)
|> reduce({:state_definition, []})
defcombinatorp(:state_definitions, repeat(parsec(:state_definition)))
defparsec :turing, head
|> parsec(:state_definitions)
|> reduce({:result, []})
end
|
day25/lib/day25.ex
| 0.596081 | 0.68854 |
day25.ex
|
starcoder
|
defmodule EView.Renders.Root do
@moduledoc """
This module converts map to a structure that corresponds
to [Nebo #15 API Manifest](http://docs.apimanifest.apiary.io/) response structure.
"""
alias EView.Renders.{Meta, Error}
@doc """
Render response object or error description following to our guidelines.
This method will look into status code and:
* create `error` property from response data when HTTP status code is `4XX` or `5XX`;
* create `data` property for other HTTP status codes.
"""
def render(error, %{status: status} = conn) when 400 <= status and status < 600 do
%{
meta: Meta.render(error, conn),
error: Error.render(error)
}
end
def render(data, %{assigns: assigns} = conn) do
%{
meta: Meta.render(data, conn),
data: data
}
|> put_paging(assigns)
|> put_urgent(assigns)
|> put_sandbox(assigns)
end
# Add `paging` property. To use it just add `paging` in `render/2` assigns.
defp put_paging(%{meta: %{type: "list"}} = data, %{
paging: %{
page_number: page_number,
page_size: page_size,
total_pages: total_pages,
total_entries: total_entries
}
}) do
Map.put(data, :paging, %{
page_number: page_number,
page_size: page_size,
total_pages: total_pages,
total_entries: total_entries
})
end
# Add `paging` property. To use it just add `paging` in `render/2` assigns.
defp put_paging(%{meta: %{type: "list"}} = data, %{
paging: %{
"page_number" => page_number,
"page_size" => page_size,
"total_pages" => total_pages,
"total_entries" => total_entries
}
}) do
Map.put(data, :paging, %{
page_number: page_number,
page_size: page_size,
total_pages: total_pages,
total_entries: total_entries
})
end
# Add `paging` property. To use it just add `paging` in `render/2` assigns.
defp put_paging(%{meta: %{type: "list"}} = data, %{
paging:
%{
limit: limit,
cursors: %{starting_after: _, ending_before: _},
has_more: has_more
} = paging
})
when is_integer(limit) and is_boolean(has_more) do
data
|> Map.put(:paging, paging)
end
defp put_paging(data, _assigns), do: data
# Add `urgent` property. To use it just add `urgent` in `render/2` assigns.
defp put_urgent(data, %{urgent: urgent}), do: data |> Map.put(:urgent, urgent)
defp put_urgent(data, _assigns), do: data
# Add `sandbox` property. To use it just add `sandbox` in `render/2` assigns.
if Code.ensure_loaded?(Mix) do
defp put_sandbox(data, %{sandbox: sandbox}) do
if Mix.env() in [:test, :dev], do: Map.put(data, :sandbox, sandbox), else: data
end
end
defp put_sandbox(data, _assigns), do: data
end
|
lib/eview/renders/root_render.ex
| 0.786377 | 0.487856 |
root_render.ex
|
starcoder
|
defmodule ExUnit.CaptureIO do
@moduledoc ~S"""
Functionality to capture IO for testing.
## Examples
defmodule AssertionTest do
use ExUnit.Case
import ExUnit.CaptureIO
test "example" do
assert capture_io(fn -> IO.puts("a") end) == "a\n"
end
test "another example" do
assert with_io(fn ->
IO.puts("a")
IO.puts("b")
2 + 2
end) == {4, "a\nb\n"}
end
end
"""
@doc """
Captures IO generated when evaluating `fun`.
Returns the binary which is the captured output.
By default, `capture_io` replaces the `group_leader` (`:stdio`)
for the current process. Capturing the group leader is done per
process and therefore can be done concurrently.
However, the capturing of any other named device, such as `:stderr`,
happens globally and persists until the function has ended. While this means
it is safe to run your tests with `async: true` in many cases, captured output
may include output from a different test and care must be taken when using
`capture_io` with a named process asynchronously.
A developer can set a string as an input. The default input is an empty
string. If capturing a named device asynchronously, an input can only be given
to the first capture. Any further capture that is given to a capture on that
device will raise an exception and would indicate that the test should be run
synchronously.
Similarly, once a capture on a named device has begun, the encoding on that
device cannot be changed in a subsequent concurrent capture. An error will
be raised in this case.
## IO devices
You may capture the IO from any registered IO device. The device name given
must be an atom representing the name of a registered process. In addition,
Elixir provides two shortcuts:
* `:stdio` - a shortcut for `:standard_io`, which maps to
the current `Process.group_leader/0` in Erlang
* `:stderr` - a shortcut for the named process `:standard_error`
provided in Erlang
## Options
* `:input` - An input to the IO device, defaults to `""`.
* `:capture_prompt` - Define if prompts (specified as arguments to
`IO.get*` functions) should be captured. Defaults to `true`. For
IO devices other than `:stdio`, the option is ignored.
* `:encoding` (since v1.10.0) - encoding of the IO device. Allowed
values are `:unicode` (default) and `:latin1`.
## Examples
iex> capture_io(fn -> IO.write("john") end) == "john"
true
iex> capture_io(:stderr, fn -> IO.write(:stderr, "john") end) == "john"
true
iex> capture_io(:standard_error, fn -> IO.write(:stderr, "john") end) == "john"
true
iex> capture_io("this is input", fn ->
...> input = IO.gets("> ")
...> IO.write(input)
...> end) == "> this is input"
true
iex> capture_io([input: "this is input", capture_prompt: false], fn ->
...> input = IO.gets("> ")
...> IO.write(input)
...> end) == "this is input"
true
## Returning values
As seen in the examples above, `capture_io` returns the captured output.
If you want to also capture the result of the function executed,
use `with_io/2`.
"""
@spec capture_io((() -> any())) :: String.t()
def capture_io(fun) when is_function(fun, 0) do
{_result, capture} = with_io(fun)
capture
end
@doc """
Captures IO generated when evaluating `fun`.
See `capture_io/1` for more information.
"""
@spec capture_io(atom() | String.t() | keyword(), (() -> any())) :: String.t()
def capture_io(device_input_or_options, fun)
def capture_io(device, fun) when is_atom(device) and is_function(fun, 0) do
{_result, capture} = with_io(device, fun)
capture
end
def capture_io(input, fun) when is_binary(input) and is_function(fun, 0) do
{_result, capture} = with_io(input, fun)
capture
end
def capture_io(options, fun) when is_list(options) and is_function(fun, 0) do
{_result, capture} = with_io(options, fun)
capture
end
@doc """
Captures IO generated when evaluating `fun`.
See `capture_io/1` for more information.
"""
@spec capture_io(atom(), String.t() | keyword(), (() -> any())) :: String.t()
def capture_io(device, input_or_options, fun)
def capture_io(device, input, fun)
when is_atom(device) and is_binary(input) and is_function(fun, 0) do
{_result, capture} = with_io(device, input, fun)
capture
end
def capture_io(device, options, fun)
when is_atom(device) and is_list(options) and is_function(fun, 0) do
{_result, capture} = with_io(device, options, fun)
capture
end
@doc ~S"""
Invokes the given `fun` and returns the result and captured output.
It accepts the same arguments and options as `capture_io/1`.
## Examples
{result, output} =
assert with_io(fn ->
IO.puts("a")
IO.puts("b")
2 + 2
end)
assert result == 4
assert output == "a\nb\n"
"""
@doc since: "1.13.0"
@spec with_io((() -> any())) :: {any(), String.t()}
def with_io(fun) when is_function(fun, 0) do
with_io(:stdio, [], fun)
end
@doc """
Invokes the given `fun` and returns the result and captured output.
See `with_io/1` for more information.
"""
@doc since: "1.13.0"
@spec with_io(atom() | String.t() | keyword(), (() -> any())) :: {any(), String.t()}
def with_io(device_input_or_options, fun)
def with_io(device, fun) when is_atom(device) and is_function(fun, 0) do
with_io(device, [], fun)
end
def with_io(input, fun) when is_binary(input) and is_function(fun, 0) do
with_io(:stdio, [input: input], fun)
end
def with_io(options, fun) when is_list(options) and is_function(fun, 0) do
with_io(:stdio, options, fun)
end
@doc """
Invokes the given `fun` and returns the result and captured output.
See `with_io/1` for more information.
"""
@doc since: "1.13.0"
@spec with_io(atom(), String.t() | keyword(), (() -> any())) :: {any(), String.t()}
def with_io(device, input_or_options, fun)
def with_io(device, input, fun)
when is_atom(device) and is_binary(input) and is_function(fun, 0) do
with_io(device, [input: input], fun)
end
def with_io(device, options, fun)
when is_atom(device) and is_list(options) and is_function(fun, 0) do
do_with_io(map_dev(device), options, fun)
end
defp map_dev(:stdio), do: :standard_io
defp map_dev(:stderr), do: :standard_error
defp map_dev(other), do: other
defp do_with_io(:standard_io, options, fun) do
prompt_config = Keyword.get(options, :capture_prompt, true)
encoding = Keyword.get(options, :encoding, :unicode)
input = Keyword.get(options, :input, "")
original_gl = Process.group_leader()
{:ok, capture_gl} = StringIO.open(input, capture_prompt: prompt_config, encoding: encoding)
try do
Process.group_leader(self(), capture_gl)
do_capture_gl(capture_gl, fun)
after
Process.group_leader(self(), original_gl)
end
end
defp do_with_io(device, options, fun) do
input = Keyword.get(options, :input, "")
encoding = Keyword.get(options, :encoding, :unicode)
case ExUnit.CaptureServer.device_capture_on(device, encoding, input) do
{:ok, ref} ->
try do
result = fun.()
{result, ExUnit.CaptureServer.device_output(device, ref)}
after
ExUnit.CaptureServer.device_capture_off(ref)
end
{:error, :no_device} ->
raise "could not find IO device registered at #{inspect(device)}"
{:error, {:changed_encoding, current_encoding}} ->
raise ArgumentError, """
attempted to change the encoding for a currently captured device #{inspect(device)}.
Currently set as: #{inspect(current_encoding)}
Given: #{inspect(encoding)}
If you need to use multiple encodings on a captured device, you cannot \
run your test asynchronously
"""
{:error, :input_on_already_captured_device} ->
raise ArgumentError,
"attempted multiple captures on device #{inspect(device)} with input. " <>
"If you need to give an input to a captured device, you cannot run your test asynchronously"
end
end
defp do_capture_gl(string_io, fun) do
try do
fun.()
catch
kind, reason ->
_ = StringIO.close(string_io)
:erlang.raise(kind, reason, __STACKTRACE__)
else
result ->
{:ok, {_input, output}} = StringIO.close(string_io)
{result, output}
end
end
end
|
lib/ex_unit/lib/ex_unit/capture_io.ex
| 0.918895 | 0.789518 |
capture_io.ex
|
starcoder
|
defmodule Etl do
require Logger
@type stage :: Supervisor.child_spec() | {module(), arg :: term()} | module() | Etl.Stage.t()
@type dictionary :: term()
@type t :: %__MODULE__{
stages: [Etl.stage()],
pids: [pid],
subscriptions: [GenStage.subscription_tag()]
}
defstruct stages: [],
pids: [],
subscriptions: []
@type global_opts :: [
min_demand: pos_integer(),
max_demand: pos_integer(),
dynamic_supervisor: module()
]
@spec pipeline(stage(), global_opts()) :: Etl.Pipeline.t()
def pipeline(stage, opts \\ []) do
Etl.Pipeline.new(opts)
|> Etl.Pipeline.add_stage(stage, [])
end
@spec to(Etl.Pipeline.t(), stage(), keyword()) :: Etl.Pipeline.t()
defdelegate to(pipeline, stage, opts \\ []), to: Etl.Pipeline, as: :add_stage
@spec function(Etl.Pipeline.t(), (Etl.Message.data() -> {:ok, Etl.Message.data()} | {:error, reason :: term()})) ::
Etl.Pipeline.t()
defdelegate function(pipeline, fun), to: Etl.Pipeline, as: :add_function
@type partition_opts :: [
partitions: pos_integer() | list(),
hash: (Etl.Message.t() -> {Etl.Message.t(), partition :: term})
]
@spec partition(Etl.Pipeline.t(), partition_opts) :: Etl.Pipeline.t()
def partition(pipeline, opts) do
Keyword.fetch!(opts, :partitions)
Etl.Pipeline.set_partitions(pipeline, opts)
end
@spec broadcast(Etl.Pipeline.t(), keyword) :: Etl.Pipeline.t()
defdelegate broadcast(pipeline, opts \\ []), to: Etl.Pipeline, as: :set_broadcast
@spec batch(Etl.Pipeline.t(), keyword) :: Etl.Pipeline.t()
defdelegate batch(pipeline, opts \\ []), to: Etl.Pipeline, as: :add_batch
@spec run(Etl.Pipeline.t()) :: t
def run(%Etl.Pipeline{} = pipeline) do
Graph.new(type: :directed)
|> start_steps(Etl.Pipeline.steps(pipeline), pipeline.context)
|> subscribe_stages(pipeline)
|> create_struct()
end
@spec await(t) :: :ok | :timeout
def await(%__MODULE__{} = etl, opts \\ []) do
delay = Keyword.get(opts, :delay, 500)
timeout = Keyword.get(opts, :timeout, 10_000)
do_await(etl, delay, timeout, 0)
end
@spec done?(t) :: boolean()
def done?(%__MODULE__{} = etl) do
Enum.all?(etl.pids, fn pid -> Process.alive?(pid) == false end)
end
@spec ack([Etl.Message.t()]) :: :ok
def ack(messages) do
Enum.group_by(messages, fn %{acknowledger: {mod, ref, _data}} -> {mod, ref} end)
|> Enum.map(&group_by_status/1)
|> Enum.each(fn {{mod, ref}, pass, fail} -> mod.ack(ref, pass, fail) end)
end
defp start_steps(graph, [], _context), do: graph
defp start_steps(graph, [step | remaining], context) do
starter = &start_step(&1, context, remaining == [])
case tails(graph) do
[] ->
{:ok, pid} = starter.(step)
Graph.add_vertex(graph, pid, step: step, subscription_opts: get_in(step.opts, [:subscription_opts]))
tails ->
Enum.reduce(tails, graph, fn tail, g ->
add_to_tail(g, step, tail, starter)
end)
end
|> start_steps(remaining, context)
end
defp add_to_tail(graph, step, tail, starter) do
subscription_opts = get_in(step.opts, [:subscription_opts]) || []
case GenStage.call(tail, :"$dispatcher") do
{GenStage.PartitionDispatcher, opts} ->
partitions = Keyword.fetch!(opts, :partitions) |> to_list()
partitions
|> Enum.reduce(graph, fn partition, g ->
subscription_opts = Keyword.put(subscription_opts, :partition, partition)
{:ok, pid} = starter.(step)
add_step_to_graph(g, pid, tail, step: step, subscription_opts: subscription_opts)
end)
_ ->
count = get_in(step.opts, [:count]) || 1
Enum.reduce(1..count, graph, fn _, g ->
{:ok, pid} = starter.(step)
add_step_to_graph(g, pid, tail, step: step, subscription_opts: subscription_opts)
end)
end
end
defp subscribe_stages(graph, pipeline) do
Graph.postorder(graph)
|> Enum.reduce(graph, fn pid, g ->
v_subscription_opts = Graph.vertex_labels(g, pid) |> Keyword.get(:subscription_opts, [])
Graph.in_neighbors(g, pid)
|> Enum.reduce(g, fn neighbor, g ->
subscription_opts =
[
to: neighbor,
min_demand: pipeline.context.min_demand,
max_demand: pipeline.context.max_demand
]
|> Keyword.merge(v_subscription_opts)
{:ok, sub} = GenStage.sync_subscribe(pid, subscription_opts)
Graph.label_vertex(g, pid, sub: sub)
end)
end)
end
defp create_struct(graph) do
Graph.postorder(graph)
|> Enum.reduce(%__MODULE__{}, fn pid, etl ->
labels = Graph.vertex_labels(graph, pid)
etl
|> Map.update!(:pids, fn pids -> [pid | pids] end)
|> Map.update!(:subscriptions, fn subs ->
Keyword.get_values(labels, :sub) ++ subs
end)
|> Map.update!(:stages, fn stages -> [Keyword.get(labels, :step) | stages] end)
end)
end
defp group_by_status({key, messages}) do
{pass, fail} =
Enum.reduce(messages, {[], []}, fn
%{status: :ok} = msg, {pass, fail} ->
{[msg | pass], fail}
msg, {pass, fail} ->
{pass, [msg | fail]}
end)
{key, Enum.reverse(pass), Enum.reverse(fail)}
end
defp do_await(_etl, _delay, timeout, elapsed) when elapsed >= timeout do
:timeout
end
defp do_await(etl, delay, timeout, elapsed) do
case done?(etl) do
true ->
:ok
false ->
Process.sleep(delay)
do_await(etl, delay, timeout, elapsed + delay)
end
end
defp intercept(%{start: {module, function, [args]}} = child_spec, opts) do
dispatcher = Keyword.get(opts, :dispatcher)
interceptor_args = Keyword.merge(opts, stage: module, args: args, dispatcher: dispatcher)
%{child_spec | start: {Etl.Stage.Interceptor, function, [interceptor_args]}}
end
defp tails(graph) do
graph
|> Graph.vertices()
|> Enum.filter(fn v -> Graph.out_degree(graph, v) == 0 end)
end
defp to_list(list) when is_list(list), do: list
defp to_list(integer) when is_integer(integer), do: 0..(integer - 1)
defp add_step_to_graph(graph, vertex, from, labels) do
graph
|> Graph.add_vertex(vertex, labels)
|> Graph.add_edge(from, vertex)
end
defp start_step(step, context, last_step) do
interceptor_opts =
case last_step do
true -> [post_process: &Etl.ack/1]
false -> []
end
|> Keyword.merge(dispatcher: step.dispatcher)
intercepted_child_spec = intercept(step.child_spec, interceptor_opts)
DynamicSupervisor.start_child(context.dynamic_supervisor, intercepted_child_spec)
end
end
|
lib/etl.ex
| 0.756042 | 0.420659 |
etl.ex
|
starcoder
|
defmodule PathGlob do
@moduledoc """
Implements glob matching using the same semantics as `Path.wildcard/2`, but
without any filesystem interaction.
"""
import PathGlob.Parser
import NimbleParsec, only: [defparsecp: 3]
if System.version() >= "1.11" && Code.ensure_loaded?(Mix) && Mix.env() == :test do
require Logger
Logger.put_module_level(__MODULE__, :none)
defmacrop debug(message) do
quote do
require Logger
Logger.debug("PathGlob: " <> unquote(message))
end
end
else
defmacrop debug(message) do
quote do
# Avoid unused variable warning
_ = fn -> unquote(message) end
:ok
end
end
end
defparsecp(:parse, glob(), inline: true)
@doc """
Returns whether or not `path` matches the `glob`.
The glob is first parsed and compiled as a regular expression. If you're
using the same glob multiple times in performance-critical code, consider
using `compile/1` and caching the result.
## Examples
iex> PathGlob.match?("lib/path_glob.ex", "{lib,test}/path_*.ex")
true
iex> PathGlob.match?("lib/.formatter.exs", "lib/*", match_dot: true)
true
"""
@spec match?(String.t(), String.t(), match_dot: boolean()) :: boolean()
def match?(path, glob, opts \\ []) do
String.match?(path, compile(glob, opts))
end
@doc """
Compiles `glob` to a `Regex`.
Raises `ArgumentError` if `glob` is invalid.
## Examples
iex> PathGlob.compile("{lib,test}/*")
~r{^(lib|test)/([^\\./]|(?<=[^/])\\.)*$}
iex> PathGlob.compile("{lib,test}/path_*.ex", match_dot: true)
~r{^(lib|test)/path_[^/]*\\.ex$}
"""
@spec compile(String.t(), match_dot: boolean()) :: Regex.t()
def compile(glob, opts \\ []) do
case parse(glob) do
{:ok, [parse_tree], "", _, _, _} ->
regex =
parse_tree
|> transform(Keyword.get(opts, :match_dot, false))
|> Regex.compile!()
inspect(
%{
glob: glob,
regex: regex,
parse_tree: parse_tree
},
pretty: true
)
|> debug()
regex
{:error, _, _, _, _, _} = error ->
debug(inspect(error))
raise ArgumentError, "failed to parse '#{glob}'"
end
end
defp transform_join(list, match_dot?, joiner \\ "") when is_list(list) do
list
|> Enum.map(&transform(&1, match_dot?))
|> Enum.join(joiner)
end
defp transform(token, match_dot?) do
case token do
{:glob, terms} ->
"^#{transform_join(terms, match_dot?)}$"
{:literal, items} ->
items
|> Enum.join()
|> Regex.escape()
{:question, _} ->
any_single(match_dot?)
{:double_star_slash, _} ->
pattern = "(#{any_single(match_dot?)}+/)*"
if match_dot? do
pattern
else
"#{pattern}(?!\\.)"
end
{:double_star, _} ->
"(#{any_single(match_dot?)}+/)*#{any_single(match_dot?)}+"
{:star, _} ->
"#{any_single(match_dot?)}*"
{:alternatives, items} ->
choice(items, match_dot?)
{:alternatives_item, items} ->
transform_join(items, match_dot?)
{:character_list, items} ->
transform_join(items, match_dot?, "|")
{:character_range, [start, finish]} ->
"[#{transform(start, match_dot?)}-#{transform(finish, match_dot?)}]"
{:character_class, items} ->
choice(items, match_dot?)
end
end
defp any_single(match_dot?) do
if match_dot? do
"[^/]"
else
"([^\\./]|(?<=[^/])\\.)"
end
end
defp choice(items, match_dot?) do
"(#{transform_join(items, match_dot?, "|")})"
end
end
|
lib/path_glob.ex
| 0.846133 | 0.481576 |
path_glob.ex
|
starcoder
|
defmodule Sanbase.Signal.Validation.Target do
def valid_target?("default"), do: :ok
def valid_target?(%{user_list: int}) when is_integer(int), do: :ok
def valid_target?(%{watchlist_id: int}) when is_integer(int), do: :ok
def valid_target?(%{text: text}) when is_binary(text), do: :ok
def valid_target?(%{word: word}) when is_binary(word), do: :ok
def valid_target?(%{word: words}) when is_list(words) do
Enum.find(words, fn word -> not is_binary(word) end)
|> case do
nil -> :ok
_ -> {:error, "The target list contains elements that are not string"}
end
end
def valid_target?(%{slug: slug}) when is_binary(slug), do: :ok
def valid_target?(%{slug: slugs}) when is_list(slugs) do
Enum.find(slugs, fn slug -> not is_binary(slug) end)
|> case do
nil -> :ok
_ -> {:error, "The target list contains elements that are not string"}
end
end
def valid_target?(target),
do: {:error, "#{inspect(target)} is not a valid target"}
def valid_eth_wallet_target?(%{eth_address: address_or_addresses}) do
valid_crypto_address?(address_or_addresses)
end
def valid_eth_wallet_target?(%{user_list: _}) do
{:error, "Watchlists are not valid ethereum wallet target"}
end
def valid_eth_wallet_target?(%{watchlist_id: _}) do
{:error, "Watchlists are not valid ethereum wallet target"}
end
def valid_eth_wallet_target?(target), do: valid_target?(target)
def valid_crypto_address?(%{slug: slug}), do: valid_target?(%{slug: slug})
def valid_crypto_address?(%{address: address_or_addresses}) do
valid_crypto_address?(address_or_addresses)
end
def valid_crypto_address?(address_or_addresses)
when is_binary(address_or_addresses) or is_list(address_or_addresses) do
address_or_addresses
|> List.wrap()
|> Enum.find(fn elem -> not is_binary(elem) end)
|> case do
nil ->
:ok
_ ->
{:error,
"#{inspect(address_or_addresses)} is not a valid crypto address. The list contains elements that are not string"}
end
end
def valid_crypto_address?(data), do: {:error, "#{inspect(data)} is not a valid crypto address"}
def valid_historical_balance_selector?(selector) when is_map(selector) do
keys = Map.keys(selector)
case Enum.all?(keys, fn key -> key in [:infrastructure, :currency, :slug] end) do
true -> :ok
false -> {:error, "#{inspect(selector)} is not a valid selector - it has unsupported keys"}
end
end
def valid_historical_balance_selector?(selector) do
{:error, "#{inspect(selector)} is not a valid selector - it has to be a map"}
end
end
|
lib/sanbase/signals/trigger/validation/target_validation.ex
| 0.667581 | 0.414217 |
target_validation.ex
|
starcoder
|
defmodule TextDelta.Document do
@moduledoc """
Document-related logic like splitting it into lines etc.
"""
alias TextDelta.Operation
@typedoc """
Reason for an error.
"""
@type error_reason :: :bad_document
@typedoc """
Line segments.
Each line has a delta of the content on that line (minus `\n`) and a set of
attributes applied to the entire block.
"""
@type line_segments :: [{TextDelta.state(), TextDelta.Attributes.t()}]
@typedoc """
Result of getting document lines.
An ok/error tuple. Represents either a successful retrieval in form of
`{:ok, [line]}` or an error in form of `{:error, reason}`.
"""
@type lines_result ::
{:ok, line_segments}
| {:error, error_reason}
@doc """
Breaks document into multiple line segments.
Given document will be split according to newline characters (`\n`).
## Examples
successful application:
iex> doc =
iex> TextDelta.new()
iex> |> TextDelta.insert("hi\\nworld")
iex> |> TextDelta.insert("\\n", %{header: 1})
iex> TextDelta.lines(doc)
{:ok, [ {%TextDelta{ops: [%{insert: "hi"}]}, %{}},
{%TextDelta{ops: [%{insert: "world"}]}, %{header: 1}} ]}
error handling:
iex> doc = TextDelta.retain(TextDelta.new(), 3)
iex> TextDelta.lines(doc)
{:error, :bad_document}
"""
@spec lines(TextDelta.state()) :: lines_result
def lines(doc) do
case valid_document?(doc) do
true -> {:ok, op_lines(TextDelta.operations(doc), TextDelta.new())}
false -> {:error, :bad_document}
end
end
@doc """
Breaks document into multiple line segments.
Equivalent to `&TextDelta.Document.lines/1`, but instead of returning
ok/error tuples raises a `RuntimeError`.
"""
@spec lines!(TextDelta.state()) :: line_segments | no_return
def lines!(doc) do
case lines(doc) do
{:ok, lines} ->
lines
{:error, reason} ->
raise "Can not get lines from document: #{Atom.to_string(reason)}"
end
end
defp op_lines([%{insert: ins} = op | rest], delta) when ins == "\n" do
[{delta, Map.get(op, :attributes, %{})} | op_lines(rest, TextDelta.new())]
end
defp op_lines([%{insert: ins} = op | rest], delta)
when not is_bitstring(ins) do
op_lines(rest, TextDelta.append(delta, op))
end
defp op_lines([%{insert: ins} = op | rest], delta) do
op_from_split_string = fn
"\n" -> Operation.insert("\n")
othr -> Operation.insert(othr, Map.get(op, :attributes, %{}))
end
case String.split(ins, ~r/\n/, include_captures: true, trim: true) do
[_] ->
op_lines(rest, TextDelta.append(delta, op))
mul ->
mul
|> Enum.map(op_from_split_string)
|> Kernel.++(rest)
|> op_lines(delta)
end
end
defp op_lines([], delta) do
case Kernel.length(TextDelta.operations(delta)) do
0 -> []
_ -> [{delta, %{}}]
end
end
defp valid_document?(document) do
TextDelta.length(document) == TextDelta.length(document, [:insert])
end
end
|
lib/text_delta/document.ex
| 0.873929 | 0.510985 |
document.ex
|
starcoder
|
defmodule GenMagic.Server do
@moduledoc """
Provides access to the underlying libmagic client, which performs file introspection.
The Server needs to be supervised, since it will terminate if it receives any unexpected error.
"""
@behaviour :gen_statem
alias GenMagic.Result
alias GenMagic.Server.Data
alias GenMagic.Server.Status
require Logger
@typedoc """
Represents the reference to the underlying server, as returned by `:gen_statem`.
"""
@type t :: :gen_statem.server_ref()
@typedoc """
Represents values accepted as startup options, which can be passed to `start_link/1`.
- `:name`: If present, this will be the registered name for the underlying process.
Note that `:gen_statem` requires `{:local, name}`, but given widespread GenServer convention,
atoms are accepted and will be converted to `{:local, name}`.
- `:startup_timeout`: Specifies how long the Server waits for the C program to initialise.
However, if the underlying C program exits, then the process exits immediately.
Can be set to `:infinity`.
- `:process_timeout`: Specifies how long the Server waits for each request to complete.
Can be set to `:infinity`.
Please note that, if you have chosen a custom timeout value, you should also pass it when
using `GenMagic.Server.perform/3`.
- `:recycle_threshold`: Specifies the number of requests processed before the underlying C
program is recycled.
Can be set to `:infinity` if you do not wish for the program to be recycled.
- `:database_patterns`: Specifies what magic databases to load; you can specify a list of either
Path Patterns (see `Path.wildcard/2`) or `:default` to instruct the C program to load the
appropriate databases.
For example, if you have had to add custom magics, then you can set this value to:
[:default, "path/to/my/magic"]
"""
@type option ::
{:name, atom() | :gen_statem.server_name()}
| {:startup_timeout, timeout()}
| {:process_timeout, timeout()}
| {:recycle_threshold, non_neg_integer() | :infinity}
| {:database_patterns, nonempty_list(:default | Path.t())}
@typedoc """
Current state of the Server:
- `:starting`: This is the initial state; the Server will attempt to start the underlying Port
and the libmagic client, then automatically transition to either Available or Crashed.
- `:available`: This is the default state. In this state the Server is able to accept requests
and they will be replied in the same order.
- `:processing`: This is the state the Server will be in if it is processing requests. In this
state, further requests can still be lodged and they will be processed when the Server is
available again.
For proper concurrency, use a process pool like Poolboy, Sbroker, etc.
- `:recycling`: This is the state the Server will be in, if its underlying C program needs to be
recycled. This state is triggered whenever the cycle count reaches the defined value as per
`:recycle_threshold`.
In this state, the Server is able to accept requests, but they will not be processed until the
underlying C server program has been started again.
"""
@type state :: :starting | :available | :processing | :recycling
@spec child_spec([option()]) :: Supervisor.child_spec()
@spec start_link([option()]) :: :gen_statem.start_ret()
@spec perform(t(), Path.t(), timeout()) :: {:ok, Result.t()} | {:error, term()}
@spec status(t(), timeout()) :: {:ok, Status.t()} | {:error, term()}
@spec stop(t(), term(), timeout()) :: :ok
@doc """
Returns the default Child Specification for this Server for use in Supervisors.
You can override this with `Supervisor.child_spec/2` as required.
"""
def child_spec(options) do
%{
id: __MODULE__,
start: {__MODULE__, :start_link, [options]},
type: :worker,
restart: :permanent,
shutdown: 500
}
end
@doc """
Starts a new Server.
See `t:option/0` for further details.
"""
def start_link(options) do
{name, options} = Keyword.pop(options, :name)
case name do
nil -> :gen_statem.start_link(__MODULE__, options, [])
name when is_atom(name) -> :gen_statem.start_link({:local, name}, __MODULE__, options, [])
{:global, _} -> :gen_statem.start_link(name, __MODULE__, options, [])
{:via, _, _} -> :gen_statem.start_link(name, __MODULE__, options, [])
{:local, _} -> :gen_statem.start_link(name, __MODULE__, options, [])
end
end
@doc """
Determines the type of the file provided.
"""
def perform(server_ref, path, timeout \\ 5000) do
case :gen_statem.call(server_ref, {:perform, path}, timeout) do
{:ok, %Result{} = result} -> {:ok, result}
{:error, reason} -> {:error, reason}
end
end
@doc """
Returns status of the Server.
"""
def status(server_ref, timeout \\ 5000) do
:gen_statem.call(server_ref, :status, timeout)
end
@doc """
Stops the Server with reason `:normal` and timeout `:infinity`.
"""
def stop(server_ref) do
:gen_statem.stop(server_ref)
end
@doc """
Stops the Server with the specified reason and timeout.
"""
def stop(server_ref, reason, timeout) do
:gen_statem.stop(server_ref, reason, timeout)
end
@impl :gen_statem
def init(options) do
import GenMagic.Config
data = %Data{
port_name: get_port_name(),
port_options: get_port_options(options),
startup_timeout: get_startup_timeout(options),
process_timeout: get_process_timeout(options),
recycle_threshold: get_recycle_threshold(options)
}
{:ok, :starting, data}
end
@impl :gen_statem
def callback_mode do
[:state_functions, :state_enter]
end
@doc false
def starting(:enter, _, %{request: nil, port: nil} = data) do
port = Port.open(data.port_name, data.port_options)
{:keep_state, %{data | port: port}, data.startup_timeout}
end
@doc false
def starting({:call, from}, :status, data) do
handle_status_call(from, :starting, data)
end
@doc false
def starting({:call, _from}, {:perform, _path}, _data) do
{:keep_state_and_data, :postpone}
end
@doc false
def starting(:info, {port, {:data, response}}, %{port: port} = data) do
_ = Logger.debug(fn -> "GenMagic: #{inspect(self())} ← #{String.trim(response)}" end)
Enum.reduce_while(String.split(response, "\n"), :keep_state_and_data, fn
"ok", _ -> {:halt, {:next_state, :available, data}}
_, _ -> {:cont, :keep_state_and_data}
end)
end
@doc false
def available(:enter, _old_state, %{request: nil}) do
:keep_state_and_data
end
@doc false
def available({:call, from}, {:perform, path}, data) do
data = %{data | cycles: data.cycles + 1, request: {path, from, :erlang.now()}}
command = "file; " <> path <> "\n"
_ = Logger.debug(fn -> "GenMagic: #{inspect(self())} → #{String.trim(command)}" end)
_ = send(data.port, {self(), {:command, command}})
{:next_state, :processing, data}
end
@doc false
def available({:call, from}, :status, data) do
handle_status_call(from, :available, data)
end
@doc false
def processing(:enter, _old_state, %{request: {_path, _from, _time}} = data) do
{:keep_state_and_data, data.process_timeout}
end
@doc false
def processing({:call, _from}, {:perform, _path}, _data) do
{:keep_state_and_data, :postpone}
end
@doc false
def processing({:call, from}, :status, data) do
handle_status_call(from, :processing, data)
end
@doc false
def processing(:info, {port, {:data, response}}, %{port: port} = data) do
_ = Logger.debug(fn -> "GenMagic: #{inspect(self())} ← #{String.trim(response)}" end)
{_, from, _} = data.request
data = %{data | request: nil}
response = {:reply, from, handle_response(response)}
next_state = (data.cycles >= data.recycle_threshold && :recycling) || :available
{:next_state, next_state, data, response}
end
@doc false
def recycling(:enter, _, %{request: nil, port: port} = data) when is_port(port) do
_ = send(data.port, {self(), :close})
{:keep_state_and_data, data.startup_timeout}
end
@doc false
def recycling({:call, _from}, {:perform, _path}, _data) do
{:keep_state_and_data, :postpone}
end
@doc false
def recycling({:call, from}, :status, data) do
handle_status_call(from, :recycling, data)
end
@doc false
def recycling(:info, {port, :closed}, %{port: port} = data) do
{:next_state, :starting, %{data | port: nil, cycles: 0}}
end
defp handle_response("ok; " <> message) do
case message |> String.trim() |> String.split("\t") do
[mime_type, encoding, content] -> {:ok, Result.build(mime_type, encoding, content)}
_ -> {:error, :malformed_response}
end
end
defp handle_response("error; " <> message) do
{:error, String.trim(message)}
end
defp handle_status_call(from, state, data) do
response = {:ok, %__MODULE__.Status{state: state, cycles: data.cycles}}
{:keep_state_and_data, {:reply, from, response}}
end
end
|
lib/gen_magic/server.ex
| 0.857917 | 0.510313 |
server.ex
|
starcoder
|
defmodule Minio.Helper do
@doc "HMAC-SHA256 hash computation helper"
def hmac(key, data),
do: :crypto.mac(:hmac,:sha256, key, data)
@doc "SHA256 hash computation helper"
def sha256(data),
do: :crypto.hash(:sha256, data)
@doc "encode data into hex code"
def hex_digest(data),
do: Base.encode16(data, case: :lower)
@doc "Convert date to iso8601 string"
def iso8601_date(datetime),
do:
datetime
|> DateTime.to_date()
|> Date.to_iso8601(:basic)
@doc "Convert datetime to iso8601 string"
def iso8601_datetime(datetime),
do:
%{datetime | microsecond: {0, 0}}
|> DateTime.to_iso8601(:basic)
@doc """
Construct result url based on the endpoint, bucket name and object name.
"""
def get_target_uri(endpoint, opts \\ []) do
path = case opts do
[bucket_name: bucket_name, object_name: object_name] ->
"#{bucket_name}/#{object_name}"
[bucket_name: bucket_name] ->
"#{bucket_name}"
_ -> ""
end
endpoint
|> URI.parse()
|> URI.merge(path)
end
@doc """
Return host form parsed URI with port only visible if port is not
one of the default ports.
"""
def remove_default_port(%URI{host: host, port: port}) when port in [80, 443],
do: to_string(host)
def remove_default_port(%URI{host: host, port: port}),
do: "#{host}:#{port}"
@doc """
Checks if the bucketname provided is valid
"""
def is_valid_bucket_name("") do
{:error, "Bucket name can't be empty"}
end
def is_valid_bucket_name(name) when is_bitstring(name) do
cond do
String.length(name) < 3 -> {:error, "Bucket name can't be less than 3 characters"}
String.length(name) > 63 -> {:error, "Bucket name can't be more than 63 characters"}
true -> :ok
end
end
def is_valid_bucket_name(_), do: {:error, "Bucket name must be a string"}
@doc """
checks if the objectname provided is valid
"""
def is_valid_object_name(""), do: {:error, "Object name can't be empty"}
def is_valid_object_name(name) when is_bitstring(name), do: :ok
def is_valid_object_name(_), do: {:error, "Object name must be a string"}
end
|
lib/minio/helper.ex
| 0.726814 | 0.433682 |
helper.ex
|
starcoder
|
defmodule Engine.Fee.Fetcher.Updater do
@moduledoc """
Decides whether fees will be updated from the fetched fees from the feed.
"""
alias Engine.Fee
alias Engine.Fee.Fetcher.Updater.Merger
@type can_update_result_t :: {:ok, Fee.full_fee_t()} | :no_changes
# Internal data structure resulted from merge `stored_fees` and `fetched_fees` by tx type.
# See `merge_specs_by_tx_type/2`
@typep maybe_unpaired_fee_specs_merge_t :: %{non_neg_integer() => Fee.fee_t() | {Fee.fee_t(), Fee.fee_t()}}
# As above but fully paired, which means `stored_fees` and `fetched_fees` support the same tx types
@typep paired_fee_specs_merge_t :: %{non_neg_integer() => {Fee.fee_t(), Fee.fee_t()}}
@doc """
Newly fetched fees will be effective as long as the amount change on any token is significant
or the time passed from previous update exceeds the update interval.
"""
@spec can_update(
stored_fees :: Fee.full_fee_t() | nil,
fetched_fees :: Fee.full_fee_t(),
tolerance_percent :: pos_integer()
) :: can_update_result_t()
def can_update(fee_spec, fee_spec, _tolerance_percent), do: :no_changes
def can_update(nil, new_fee_spec, _tolerance_percent), do: {:ok, new_fee_spec}
def can_update(stored_fees, fetched_fees, tolerance_percent) do
merged = merge_specs_by_tx_type(stored_fees, fetched_fees)
with false <- stored_and_fetched_differs_on_tx_type?(merged),
false <- stored_and_fetched_differs_on_token?(merged),
amount_diffs = Map.values(Merger.merge_specs(stored_fees, fetched_fees)),
false <- is_change_significant?(amount_diffs, tolerance_percent) do
:no_changes
else
_ -> {:ok, fetched_fees}
end
end
@spec merge_specs_by_tx_type(Fee.full_fee_t(), Fee.full_fee_t()) :: maybe_unpaired_fee_specs_merge_t()
defp merge_specs_by_tx_type(stored_specs, fetched_specs) do
Map.merge(stored_specs, fetched_specs, fn _t, stored_fees, fetched_fees -> {stored_fees, fetched_fees} end)
end
# Tells whether each tx_type in stored fees has a corresponding fees in fetched
# Returns `true` when there is a mismatch
@spec stored_and_fetched_differs_on_tx_type?(maybe_unpaired_fee_specs_merge_t()) :: boolean()
defp stored_and_fetched_differs_on_tx_type?(merged_specs) do
merged_specs
|> Map.values()
|> Enum.all?(&Kernel.is_tuple/1)
|> Kernel.not()
end
# Checks whether previously stored and fetched fees differs on token
# Returns `true` when there is a mismatch
@spec stored_and_fetched_differs_on_token?(paired_fee_specs_merge_t()) :: boolean()
defp stored_and_fetched_differs_on_token?(merged_specs) do
Enum.any?(merged_specs, &merge_pair_differs_on_token?/1)
end
@spec merge_pair_differs_on_token?({non_neg_integer(), {Fee.fee_t(), Fee.fee_t()}}) :: boolean()
defp merge_pair_differs_on_token?({_type, {stored_fees, fetched_fees}}) do
not MapSet.equal?(
stored_fees |> Map.keys() |> MapSet.new(),
fetched_fees |> Map.keys() |> MapSet.new()
)
end
# Change is significant when
# - token amount difference exceeds the tolerance level,
# - there is missing token in any of specs, so token support was either added or removed
# in the update.
@spec is_change_significant?(list(Fee.merged_fee_t()), non_neg_integer()) :: boolean()
defp is_change_significant?(token_amounts, tolerance_percent) do
tolerance_rate = tolerance_percent / 100
token_amounts
|> Enum.flat_map(&Map.values/1)
|> Enum.any?(&amount_diff_exceeds_tolerance?(&1, tolerance_rate))
end
defp amount_diff_exceeds_tolerance?([_no_change], _rate), do: false
defp amount_diff_exceeds_tolerance?([stored, fetched], rate) do
abs(stored - fetched) / stored >= rate
end
end
|
apps/engine/lib/engine/fee/fetcher/updater.ex
| 0.869894 | 0.433742 |
updater.ex
|
starcoder
|
defmodule Openmaize.Config do
@moduledoc """
This module provides an abstraction layer for configuration.
The following are valid configuration items.
| name | type | default |
| :----------------- | :----------- | ---------------: |
| crypto_mod | module | Comeonin.Bcrypt |
| hash_name | atom | :password_hash |
| log_level | atom | :info |
| drop_user_keys | list of atoms | [] |
| password_min_len | integer | 8 |
| remember_salt | string | N/A |
## Examples
The simplest way to change the default values would be to add
an `openmaize` entry to the `config.exs` file in your project,
like the following example.
config :openmaize,
crypto_mod: Comeonin.Bcrypt,
hash_name: :encrypted_password,
drop_user_keys: [:shoe_size],
password_min_len: 12
"""
@doc """
The password hashing and checking algorithm. Bcrypt is the default.
You can supply any module, but the module must implement the following
functions:
* hashpwsalt/1 - hashes the password
* checkpw/2 - given a password and a salt, returns if match
* dummy_checkpw/0 - performs a hash and returns false
See Comeonin.Bcrypt for examples.
"""
def crypto_mod do
Application.get_env(:openmaize, :crypto_mod, Comeonin.Bcrypt)
end
@doc """
The name in the database for the password hash.
If, for example, you are migrating from Devise, you will need to
change this to `encrypted_password`.
"""
def hash_name do
Application.get_env(:openmaize, :hash_name, :password_hash)
end
@doc """
The log level for Openmaize logs.
This should either be an atom, :debug, :info, :warn or :error, or
false.
The default is :info, which means that :info, :warn and :error logs
will be returned.
"""
def log_level do
Application.get_env(:openmaize, :log_level, :info)
end
@doc """
The keys that are removed from the user struct before it is passed
on to another function.
This should be a list of atoms.
By default, :password_hash (or the value for hash_name), :password,
:otp_secret, :confirmation_token and :reset_token are removed, and
this option allows you to add to this list.
"""
def drop_user_keys do
Application.get_env(:openmaize, :drop_user_keys, []) ++
[hash_name(), :password, :otp_secret, :confirmation_token, :reset_token]
end
@doc """
Minimum length for the password strength check.
The default minimum length is 8.
The Openmaize.Password module provides a basic check and an advanced
check, both of which use the `password_min_len` value. For more
information about the advanced check, see the documentation for
the Openmaize.Password module.
"""
def password_min_len do
Application.get_env(:openmaize, :password_min_len, 8)
end
@doc """
Salt to be used when signing and verifying the `remember me` cookie.
"""
def remember_salt do
Application.get_env(:openmaize, :remember_salt)
end
end
|
lib/openmaize/config.ex
| 0.791861 | 0.62395 |
config.ex
|
starcoder
|
defmodule Pie.State do
@moduledoc """
Pipeline state handling.
- At any given moment the state can be evaluated to extract a result
- An invalid pipeline can not be updated
"""
defstruct valid?: true,
track_updates?: false,
update_count: 0,
updates: [],
initial_value: nil,
current_value: nil,
error: nil
alias Pie.State.Update
@typedoc """
A struct to hold the state of the pipeline.
"""
@type t :: %__MODULE__{
valid?: true | false,
update_count: non_neg_integer(),
updates: [Update.t()],
track_updates?: true | false,
current_value: any(),
initial_value: any(),
error: any()
}
@type result :: {:ok, any()} | {:error, t()}
@doc """
Creates a new valid state from the given data.
"""
@spec new(any(), Keyword.t()) :: t()
def new(data, opts \\ []) do
%__MODULE__{
current_value: data,
initial_value: data,
track_updates?: opts[:track_updates] || false,
updates: [],
update_count: 0
}
end
@doc """
Updates the state of the pipeline. It does not update an invalid state.
"""
@spec update(t(), any(), Keyword.t()) :: t()
def update(state, value, opts \\ [])
def update(state = %__MODULE__{valid?: true, track_updates?: false}, value, _opts) do
%__MODULE__{state | current_value: value, update_count: state.update_count + 1}
end
def update(state = %__MODULE__{valid?: true, track_updates?: true}, value, opts)
when is_list(opts) do
updates = get_updates(state, value, opts)
%__MODULE__{
state
| current_value: value,
updates: updates,
update_count: state.update_count + 1
}
end
def update(state = %__MODULE__{}, _value, _opts) do
state
end
defp get_updates(state = %__MODULE__{}, new_value, opts) do
update = %Update{
previous_value: state.current_value,
new_value: new_value,
index: state.update_count,
label: opts[:label]
}
[update | state.updates]
end
@doc """
Invalidates a state and sets its error
"""
@spec invalidate(t(), any()) :: t()
def invalidate(state = %__MODULE__{}, error) do
%__MODULE__{state | valid?: false, error: error}
end
@doc """
Returns an ok or error tuple depending on the value of the given state.
"""
@spec eval(t()) :: result()
def eval(state)
def eval(%__MODULE__{valid?: true, current_value: value}) do
{:ok, value}
end
def eval(state = %__MODULE__{}) do
{:error, state}
end
end
|
lib/pie/state.ex
| 0.888671 | 0.528716 |
state.ex
|
starcoder
|
defmodule File.Stream do
@moduledoc """
Defines a `File.Stream` struct returned by `File.stream!/3`.
The following fields are public:
* `path` - the file path
* `modes` - the file modes
* `raw` - a boolean indicating if bin functions should be used
* `line_or_bytes` - if reading should read lines or a given amount of bytes
"""
defstruct path: nil, modes: [], line_or_bytes: :line, raw: true
@type t :: %__MODULE__{}
@doc false
def __build__(path, modes, line_or_bytes) do
raw = :lists.keyfind(:encoding, 1, modes) == false
modes =
if raw do
if :lists.keyfind(:read_ahead, 1, modes) == {:read_ahead, false} do
[:raw | modes]
else
[:raw, :read_ahead | modes]
end
else
modes
end
%File.Stream{path: path, modes: modes, raw: raw, line_or_bytes: line_or_bytes}
end
defimpl Collectable do
def into(%{path: path, modes: modes, raw: raw} = stream) do
modes = for mode <- modes, not mode in [:read], do: mode
case :file.open(path, [:write | modes]) do
{:ok, device} ->
{:ok, into(device, stream, raw)}
{:error, reason} ->
raise File.Error, reason: reason, action: "stream", path: path
end
end
defp into(device, stream, raw) do
fn
:ok, {:cont, x} ->
case raw do
true -> IO.binwrite(device, x)
false -> IO.write(device, x)
end
:ok, :done ->
# If delayed_write option is used and the last write failed will
# MatchError here as {:error, _} is returned.
:ok = :file.close(device)
stream
:ok, :halt ->
# If delayed_write option is used and the last write failed will
# MatchError here as {:error, _} is returned.
:ok = :file.close(device)
end
end
end
defimpl Enumerable do
def reduce(%{path: path, modes: modes, line_or_bytes: line_or_bytes, raw: raw}, acc, fun) do
modes = for mode <- modes, not mode in [:write, :append], do: mode
start_fun =
fn ->
case :file.open(path, modes) do
{:ok, device} -> device
{:error, reason} ->
raise File.Error, reason: reason, action: "stream", path: path
end
end
next_fun =
case raw do
true -> &IO.each_binstream(&1, line_or_bytes)
false -> &IO.each_stream(&1, line_or_bytes)
end
Stream.resource(start_fun, next_fun, &:file.close/1).(acc, fun)
end
def count(_stream) do
{:error, __MODULE__}
end
def member?(_stream, _term) do
{:error, __MODULE__}
end
end
end
|
lib/elixir/lib/file/stream.ex
| 0.83056 | 0.465934 |
stream.ex
|
starcoder
|
defmodule Kino do
@moduledoc """
Client-driven interactive widgets for Livebook.
Kino is the library used by Livebook to render rich and interactive
output directly from your Elixir code.
Kino renders any data structure that implements the `Kino.Render`
protocol, falling back to the `inspect/2` representation whenever
an implementation is not available. The data structures supported
by Kino out of the box are:
### VegaLite
`VegaLite` specifications are rendered as visualizations:
Vl.new(...)
|> Vl.data_from_series(...)
|> ...
### Kino.VegaLite
`Kino.VegaLite` is an extension of `VegaLite` that allows data to
be streamed:
widget =
Vl.new(...)
|> Vl.data_from_series(...)
|> ...
|> Kino.VegaLite.new()
|> tap(&Kino.render/1)
Kino.VegaLite.push(widget, %{x: 1, y: 2})
### Kino.ETS
`Kino.ETS` implements a data table output for ETS tables in the
system:
tid = :ets.new(:users, [:set, :public])
Kino.ETS.new(tid)
### Kino.DataTable
`Kino.DataTable` implements a data table output for user-provided
tabular data:
data = [
%{id: 1, name: "Elixir", website: "https://elixir-lang.org"},
%{id: 2, name: "Erlang", website: "https://www.erlang.org"}
]
Kino.DataTable.new(data)
### Kino.Image
`Kino.Image` wraps binary image content and can be used to render
raw images of any given format:
content = File.read!("/path/to/image.jpeg")
Kino.Image.new(content, "image/jpeg")
### Kino.Markdown
`Kino.Markdown` wraps Markdown content for richer text rendering.
Kino.Markdown.new(\"\"\"
# Example
A regular Markdown file.
## Code
```elixir
"Elixir" |> String.graphemes() |> Enum.frequencies()
```
## Table
| ID | Name | Website |
| -- | ------ | ----------------------- |
| 1 | Elixir | https://elixir-lang.org |
| 2 | Erlang | https://www.erlang.org |
\"\"\")
### Kino.Ecto
`Kino.Ecto` implements a data table output for arbitrary
`Ecto` queries:
Kino.Ecto.new(Weather, Repo)
### All others
All other data structures are rendered as text using Elixir's
`inspect/2`.
"""
@doc """
Sends the given term as cell output.
This allows any Livebook cell to have multiple evaluation
results. You can think of this function as a generalized
`IO.puts/2` that works for any type.
"""
@spec render(term()) :: :"do not show this result in output"
def render(term) do
gl = Process.group_leader()
ref = Process.monitor(gl)
output = Kino.Render.to_livebook(term)
send(gl, {:io_request, self(), ref, {:livebook_put_output, output}})
receive do
{:io_reply, ^ref, :ok} -> :ok
{:io_reply, ^ref, _} -> :error
{:DOWN, ^ref, :process, _object, _reason} -> :error
end
Process.demonitor(ref)
:"do not show this result in output"
end
@doc """
Configures Kino.
The supported options are:
* `:inspect`
They are discussed individually in the sections below.
## Inspect
A keyword list containing inspect options used for printing
usual evaluation results. Defaults to pretty formatting with
a limit of 50 entries.
To show more entries, you configure a higher limit:
Kino.configure(inspect: [limit: 200])
You can also show all entries by setting the limit to `:infinity`,
but keep in mind that for large data structures it is memory-expensive
and is not an advised configuration in this case. Instead prefer
the use of `IO.inspect/2` with `:infinity` limit when needed.
See `Inspect.Opts` for the full list of options.
"""
@spec configure(keyword()) :: :ok
def configure(options) do
Kino.Config.configure(options)
end
end
|
lib/kino.ex
| 0.876251 | 0.830663 |
kino.ex
|
starcoder
|
defmodule AWS.Directory do
@moduledoc """
AWS Directory Service
AWS Directory Service is a web service that makes it easy for you to setup and
run directories in the AWS cloud, or connect your AWS resources with an existing
on-premises Microsoft Active Directory.
This guide provides detailed information about AWS Directory Service operations,
data types, parameters, and errors. For information about AWS Directory Services
features, see [AWS Directory Service](https://aws.amazon.com/directoryservice/) and the [AWS Directory Service Administration
Guide](http://docs.aws.amazon.com/directoryservice/latest/admin-guide/what_is.html).
AWS provides SDKs that consist of libraries and sample code for various
programming languages and platforms (Java, Ruby, .Net, iOS, Android, etc.). The
SDKs provide a convenient way to create programmatic access to AWS Directory
Service and other AWS services. For more information about the AWS SDKs,
including how to download and install them, see [Tools for Amazon Web Services](http://aws.amazon.com/tools/).
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: "Directory Service",
api_version: "2015-04-16",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "ds",
global?: false,
protocol: "json",
service_id: "Directory Service",
signature_version: "v4",
signing_name: "ds",
target_prefix: "DirectoryService_20150416"
}
end
@doc """
Accepts a directory sharing request that was sent from the directory owner
account.
"""
def accept_shared_directory(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AcceptSharedDirectory", input, options)
end
@doc """
If the DNS server for your on-premises domain uses a publicly addressable IP
address, you must add a CIDR address block to correctly route traffic to and
from your Microsoft AD on Amazon Web Services.
*AddIpRoutes* adds this address block. You can also use *AddIpRoutes* to
facilitate routing traffic that uses public IP ranges from your Microsoft AD on
AWS to a peer VPC.
Before you call *AddIpRoutes*, ensure that all of the required permissions have
been explicitly granted through a policy. For details about what permissions are
required to run the *AddIpRoutes* operation, see [AWS Directory Service API Permissions: Actions, Resources, and Conditions
Reference](http://docs.aws.amazon.com/directoryservice/latest/admin-guide/UsingWithDS_IAM_ResourcePermissions.html).
"""
def add_ip_routes(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AddIpRoutes", input, options)
end
@doc """
Adds two domain controllers in the specified Region for the specified directory.
"""
def add_region(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AddRegion", input, options)
end
@doc """
Adds or overwrites one or more tags for the specified directory.
Each directory can have a maximum of 50 tags. Each tag consists of a key and
optional value. Tag keys must be unique to each resource.
"""
def add_tags_to_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AddTagsToResource", input, options)
end
@doc """
Cancels an in-progress schema extension to a Microsoft AD directory.
Once a schema extension has started replicating to all domain controllers, the
task can no longer be canceled. A schema extension can be canceled during any of
the following states; `Initializing`, `CreatingSnapshot`, and `UpdatingSchema`.
"""
def cancel_schema_extension(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CancelSchemaExtension", input, options)
end
@doc """
Creates an AD Connector to connect to an on-premises directory.
Before you call `ConnectDirectory`, ensure that all of the required permissions
have been explicitly granted through a policy. For details about what
permissions are required to run the `ConnectDirectory` operation, see [AWS Directory Service API Permissions: Actions, Resources, and Conditions
Reference](http://docs.aws.amazon.com/directoryservice/latest/admin-guide/UsingWithDS_IAM_ResourcePermissions.html).
"""
def connect_directory(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ConnectDirectory", input, options)
end
@doc """
Creates an alias for a directory and assigns the alias to the directory.
The alias is used to construct the access URL for the directory, such as
`http://<alias>.awsapps.com`.
After an alias has been created, it cannot be deleted or reused, so this
operation should only be used when absolutely necessary.
"""
def create_alias(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateAlias", input, options)
end
@doc """
Creates an Active Directory computer object in the specified directory.
"""
def create_computer(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateComputer", input, options)
end
@doc """
Creates a conditional forwarder associated with your AWS directory.
Conditional forwarders are required in order to set up a trust relationship with
another domain. The conditional forwarder points to the trusted domain.
"""
def create_conditional_forwarder(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateConditionalForwarder", input, options)
end
@doc """
Creates a Simple AD directory.
For more information, see [Simple Active Directory](https://docs.aws.amazon.com/directoryservice/latest/admin-guide/directory_simple_ad.html)
in the *AWS Directory Service Admin Guide*.
Before you call `CreateDirectory`, ensure that all of the required permissions
have been explicitly granted through a policy. For details about what
permissions are required to run the `CreateDirectory` operation, see [AWS Directory Service API Permissions: Actions, Resources, and Conditions
Reference](http://docs.aws.amazon.com/directoryservice/latest/admin-guide/UsingWithDS_IAM_ResourcePermissions.html).
"""
def create_directory(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateDirectory", input, options)
end
@doc """
Creates a subscription to forward real-time Directory Service domain controller
security logs to the specified Amazon CloudWatch log group in your AWS account.
"""
def create_log_subscription(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateLogSubscription", input, options)
end
@doc """
Creates a Microsoft AD directory in the AWS Cloud.
For more information, see [AWS Managed Microsoft AD](https://docs.aws.amazon.com/directoryservice/latest/admin-guide/directory_microsoft_ad.html)
in the *AWS Directory Service Admin Guide*.
Before you call *CreateMicrosoftAD*, ensure that all of the required permissions
have been explicitly granted through a policy. For details about what
permissions are required to run the *CreateMicrosoftAD* operation, see [AWS Directory Service API Permissions: Actions, Resources, and Conditions
Reference](http://docs.aws.amazon.com/directoryservice/latest/admin-guide/UsingWithDS_IAM_ResourcePermissions.html).
"""
def create_microsoft_ad(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateMicrosoftAD", input, options)
end
@doc """
Creates a snapshot of a Simple AD or Microsoft AD directory in the AWS cloud.
You cannot take snapshots of AD Connector directories.
"""
def create_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateSnapshot", input, options)
end
@doc """
AWS Directory Service for Microsoft Active Directory allows you to configure
trust relationships.
For example, you can establish a trust between your AWS Managed Microsoft AD
directory, and your existing on-premises Microsoft Active Directory. This would
allow you to provide users and groups access to resources in either domain, with
a single set of credentials.
This action initiates the creation of the AWS side of a trust relationship
between an AWS Managed Microsoft AD directory and an external domain. You can
create either a forest trust or an external trust.
"""
def create_trust(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateTrust", input, options)
end
@doc """
Deletes a conditional forwarder that has been set up for your AWS directory.
"""
def delete_conditional_forwarder(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteConditionalForwarder", input, options)
end
@doc """
Deletes an AWS Directory Service directory.
Before you call `DeleteDirectory`, ensure that all of the required permissions
have been explicitly granted through a policy. For details about what
permissions are required to run the `DeleteDirectory` operation, see [AWS Directory Service API Permissions: Actions, Resources, and Conditions
Reference](http://docs.aws.amazon.com/directoryservice/latest/admin-guide/UsingWithDS_IAM_ResourcePermissions.html).
"""
def delete_directory(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteDirectory", input, options)
end
@doc """
Deletes the specified log subscription.
"""
def delete_log_subscription(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteLogSubscription", input, options)
end
@doc """
Deletes a directory snapshot.
"""
def delete_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteSnapshot", input, options)
end
@doc """
Deletes an existing trust relationship between your AWS Managed Microsoft AD
directory and an external domain.
"""
def delete_trust(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteTrust", input, options)
end
@doc """
Deletes from the system the certificate that was registered for secure LDAP or
client certificate authentication.
"""
def deregister_certificate(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeregisterCertificate", input, options)
end
@doc """
Removes the specified directory as a publisher to the specified SNS topic.
"""
def deregister_event_topic(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeregisterEventTopic", input, options)
end
@doc """
Displays information about the certificate registered for secure LDAP or client
certificate authentication.
"""
def describe_certificate(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeCertificate", input, options)
end
@doc """
Obtains information about the conditional forwarders for this account.
If no input parameters are provided for RemoteDomainNames, this request
describes all conditional forwarders for the specified directory ID.
"""
def describe_conditional_forwarders(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeConditionalForwarders", input, options)
end
@doc """
Obtains information about the directories that belong to this account.
You can retrieve information about specific directories by passing the directory
identifiers in the `DirectoryIds` parameter. Otherwise, all directories that
belong to the current account are returned.
This operation supports pagination with the use of the `NextToken` request and
response parameters. If more results are available, the
`DescribeDirectoriesResult.NextToken` member contains a token that you pass in
the next call to `DescribeDirectories` to retrieve the next set of items.
You can also specify a maximum number of return results with the `Limit`
parameter.
"""
def describe_directories(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDirectories", input, options)
end
@doc """
Provides information about any domain controllers in your directory.
"""
def describe_domain_controllers(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDomainControllers", input, options)
end
@doc """
Obtains information about which SNS topics receive status messages from the
specified directory.
If no input parameters are provided, such as DirectoryId or TopicName, this
request describes all of the associations in the account.
"""
def describe_event_topics(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEventTopics", input, options)
end
@doc """
Describes the status of LDAP security for the specified directory.
"""
def describe_ldaps_settings(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeLDAPSSettings", input, options)
end
@doc """
Provides information about the Regions that are configured for multi-Region
replication.
"""
def describe_regions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeRegions", input, options)
end
@doc """
Returns the shared directories in your account.
"""
def describe_shared_directories(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeSharedDirectories", input, options)
end
@doc """
Obtains information about the directory snapshots that belong to this account.
This operation supports pagination with the use of the *NextToken* request and
response parameters. If more results are available, the
*DescribeSnapshots.NextToken* member contains a token that you pass in the next
call to `DescribeSnapshots` to retrieve the next set of items.
You can also specify a maximum number of return results with the *Limit*
parameter.
"""
def describe_snapshots(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeSnapshots", input, options)
end
@doc """
Obtains information about the trust relationships for this account.
If no input parameters are provided, such as DirectoryId or TrustIds, this
request describes all the trust relationships belonging to the account.
"""
def describe_trusts(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeTrusts", input, options)
end
@doc """
Disables alternative client authentication methods for the specified directory.
"""
def disable_client_authentication(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisableClientAuthentication", input, options)
end
@doc """
Deactivates LDAP secure calls for the specified directory.
"""
def disable_ldaps(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisableLDAPS", input, options)
end
@doc """
Disables multi-factor authentication (MFA) with the Remote Authentication Dial
In User Service (RADIUS) server for an AD Connector or Microsoft AD directory.
"""
def disable_radius(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisableRadius", input, options)
end
@doc """
Disables single-sign on for a directory.
"""
def disable_sso(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisableSso", input, options)
end
@doc """
Enables alternative client authentication methods for the specified directory.
"""
def enable_client_authentication(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "EnableClientAuthentication", input, options)
end
@doc """
Activates the switch for the specific directory to always use LDAP secure calls.
"""
def enable_ldaps(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "EnableLDAPS", input, options)
end
@doc """
Enables multi-factor authentication (MFA) with the Remote Authentication Dial In
User Service (RADIUS) server for an AD Connector or Microsoft AD directory.
"""
def enable_radius(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "EnableRadius", input, options)
end
@doc """
Enables single sign-on for a directory.
Single sign-on allows users in your directory to access certain AWS services
from a computer joined to the directory without having to enter their
credentials separately.
"""
def enable_sso(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "EnableSso", input, options)
end
@doc """
Obtains directory limit information for the current Region.
"""
def get_directory_limits(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetDirectoryLimits", input, options)
end
@doc """
Obtains the manual snapshot limits for a directory.
"""
def get_snapshot_limits(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetSnapshotLimits", input, options)
end
@doc """
For the specified directory, lists all the certificates registered for a secure
LDAP or client certificate authentication.
"""
def list_certificates(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListCertificates", input, options)
end
@doc """
Lists the address blocks that you have added to a directory.
"""
def list_ip_routes(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListIpRoutes", input, options)
end
@doc """
Lists the active log subscriptions for the AWS account.
"""
def list_log_subscriptions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListLogSubscriptions", input, options)
end
@doc """
Lists all schema extensions applied to a Microsoft AD Directory.
"""
def list_schema_extensions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListSchemaExtensions", input, options)
end
@doc """
Lists all tags on a directory.
"""
def list_tags_for_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTagsForResource", input, options)
end
@doc """
Registers a certificate for a secure LDAP or client certificate authentication.
"""
def register_certificate(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RegisterCertificate", input, options)
end
@doc """
Associates a directory with an SNS topic.
This establishes the directory as a publisher to the specified SNS topic. You
can then receive email or text (SMS) messages when the status of your directory
changes. You get notified if your directory goes from an Active status to an
Impaired or Inoperable status. You also receive a notification when the
directory returns to an Active status.
"""
def register_event_topic(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RegisterEventTopic", input, options)
end
@doc """
Rejects a directory sharing request that was sent from the directory owner
account.
"""
def reject_shared_directory(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RejectSharedDirectory", input, options)
end
@doc """
Removes IP address blocks from a directory.
"""
def remove_ip_routes(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RemoveIpRoutes", input, options)
end
@doc """
Stops all replication and removes the domain controllers from the specified
Region.
You cannot remove the primary Region with this operation. Instead, use the
`DeleteDirectory` API.
"""
def remove_region(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RemoveRegion", input, options)
end
@doc """
Removes tags from a directory.
"""
def remove_tags_from_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RemoveTagsFromResource", input, options)
end
@doc """
Resets the password for any user in your AWS Managed Microsoft AD or Simple AD
directory.
You can reset the password for any user in your directory with the following
exceptions:
* For Simple AD, you cannot reset the password for any user that is
a member of either the **Domain Admins** or **Enterprise Admins** group except
for the administrator user.
* For AWS Managed Microsoft AD, you can only reset the password for
a user that is in an OU based off of the NetBIOS name that you typed when you
created your directory. For example, you cannot reset the password for a user in
the **AWS Reserved** OU. For more information about the OU structure for an AWS
Managed Microsoft AD directory, see [What Gets Created](https://docs.aws.amazon.com/directoryservice/latest/admin-guide/ms_ad_getting_started_what_gets_created.html)
in the *AWS Directory Service Administration Guide*.
"""
def reset_user_password(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ResetUserPassword", input, options)
end
@doc """
Restores a directory using an existing directory snapshot.
When you restore a directory from a snapshot, any changes made to the directory
after the snapshot date are overwritten.
This action returns as soon as the restore operation is initiated. You can
monitor the progress of the restore operation by calling the
`DescribeDirectories` operation with the directory identifier. When the
**DirectoryDescription.Stage** value changes to `Active`, the restore operation
is complete.
"""
def restore_from_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RestoreFromSnapshot", input, options)
end
@doc """
Shares a specified directory (`DirectoryId`) in your AWS account (directory
owner) with another AWS account (directory consumer).
With this operation you can use your directory from any AWS account and from any
Amazon VPC within an AWS Region.
When you share your AWS Managed Microsoft AD directory, AWS Directory Service
creates a shared directory in the directory consumer account. This shared
directory contains the metadata to provide access to the directory within the
directory owner account. The shared directory is visible in all VPCs in the
directory consumer account.
The `ShareMethod` parameter determines whether the specified directory can be
shared between AWS accounts inside the same AWS organization (`ORGANIZATIONS`).
It also determines whether you can share the directory with any other AWS
account either inside or outside of the organization (`HANDSHAKE`).
The `ShareNotes` parameter is only used when `HANDSHAKE` is called, which sends
a directory sharing request to the directory consumer.
"""
def share_directory(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ShareDirectory", input, options)
end
@doc """
Applies a schema extension to a Microsoft AD directory.
"""
def start_schema_extension(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartSchemaExtension", input, options)
end
@doc """
Stops the directory sharing between the directory owner and consumer accounts.
"""
def unshare_directory(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UnshareDirectory", input, options)
end
@doc """
Updates a conditional forwarder that has been set up for your AWS directory.
"""
def update_conditional_forwarder(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateConditionalForwarder", input, options)
end
@doc """
Adds or removes domain controllers to or from the directory.
Based on the difference between current value and new value (provided through
this API call), domain controllers will be added or removed. It may take up to
45 minutes for any new domain controllers to become fully active once the
requested number of domain controllers is updated. During this time, you cannot
make another update request.
"""
def update_number_of_domain_controllers(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateNumberOfDomainControllers", input, options)
end
@doc """
Updates the Remote Authentication Dial In User Service (RADIUS) server
information for an AD Connector or Microsoft AD directory.
"""
def update_radius(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateRadius", input, options)
end
@doc """
Updates the trust that has been set up between your AWS Managed Microsoft AD
directory and an on-premises Active Directory.
"""
def update_trust(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateTrust", input, options)
end
@doc """
AWS Directory Service for Microsoft Active Directory allows you to configure and
verify trust relationships.
This action verifies a trust relationship between your AWS Managed Microsoft AD
directory and an external domain.
"""
def verify_trust(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "VerifyTrust", input, options)
end
end
|
lib/aws/generated/directory.ex
| 0.857753 | 0.434161 |
directory.ex
|
starcoder
|
defmodule AdaptableCostsEvaluatorWeb.ComputationController do
use AdaptableCostsEvaluatorWeb, :controller
use OpenApiSpex.ControllerSpecs
import AdaptableCostsEvaluatorWeb.Helpers.AuthHelper, only: [current_user: 1]
alias AdaptableCostsEvaluator.Computations
alias AdaptableCostsEvaluator.Computations.Computation
alias AdaptableCostsEvaluatorWeb.ApiSpec.{Schemas, Parameters, Errors}
action_fallback AdaptableCostsEvaluatorWeb.FallbackController
tags ["Computations"]
security [%{"JWT" => []}]
operation :organization_index,
summary: "List Computations in the Organization",
parameters: [Parameters.organization_id()],
responses:
[
ok: {"Computation list response", "application/json", Schemas.ComputationsResponse}
] ++ Errors.internal_errors()
def organization_index(conn, %{"organization_id" => organization_id}) do
with :ok <-
Bodyguard.permit(Computation, :organization_list, current_user(conn), organization_id) do
computations = Computations.list_computations(organization_id: organization_id)
render(conn, "index.json", computations: computations)
end
end
operation :index,
summary: "List Computations of the User",
parameters: [
creator_id: [
in: :path,
type: :integer,
description: "ID of the user who created the Computations",
example: 42
]
],
responses:
[
ok: {"Computation list response", "application/json", Schemas.ComputationsResponse}
] ++ Errors.internal_errors()
def index(conn, %{"creator_id" => creator_id}) do
with :ok <-
Bodyguard.permit(Computation, :list, current_user(conn), String.to_integer(creator_id)) do
computations = Computations.list_computations(creator_id: creator_id)
render(conn, "index.json", computations: computations)
end
end
operation :create,
summary: "Create a new Computation",
request_body:
{"Computation attributes", "application/json", Schemas.ComputationRequest, required: true},
responses:
[
created: {"Computation response", "application/json", Schemas.ComputationResponse}
] ++ Errors.all_errors()
def create(conn, %{"computation" => computation_params}) do
with :ok <- Bodyguard.permit(Computation, :create, nil, nil),
{:ok, %Computation{} = computation} <-
Computations.create_computation(current_user(conn), computation_params) do
conn
|> put_status(:created)
|> put_resp_header("location", Routes.computation_path(conn, :show, computation))
|> render("show.json", computation: computation)
end
end
operation :organization_create,
summary: "Add the Computation to the Organization",
parameters: [Parameters.organization_id(), Parameters.computation_id()],
responses:
[
no_content:
{"Computation successfully added to the Organization", "application/json", nil}
] ++ Errors.internal_errors()
def organization_create(conn, %{
"organization_id" => organization_id,
"computation_id" => computation_id
}) do
computation = Computations.get_computation!(computation_id)
bodyguard_params = %{computation_id: computation_id, organization_id: organization_id}
with :ok <-
Bodyguard.permit(
Computation,
:organization_create,
current_user(conn),
bodyguard_params
),
{:ok, _} <- Computations.add_computation_to_organization(computation, organization_id) do
send_resp(conn, :no_content, "")
end
end
operation :show,
summary: "Retrieve the Computation",
parameters: [Parameters.id()],
responses:
[
ok: {"Computation response", "application/json", Schemas.ComputationResponse}
] ++ Errors.internal_errors()
def show(conn, %{"id" => id}) do
computation = Computations.get_computation!(id)
with :ok <- Bodyguard.permit(Computation, :read, current_user(conn), computation.id) do
render(conn, "show.json", computation: computation)
end
end
operation :update,
summary: "Update the Computation",
parameters: [Parameters.id()],
request_body:
{"Computation attributes", "application/json", Schemas.ComputationRequest, required: true},
responses:
[
ok: {"Computation response", "application/json", Schemas.ComputationResponse}
] ++ Errors.all_errors()
def update(conn, %{"id" => id, "computation" => computation_params}) do
computation = Computations.get_computation!(id)
with :ok <- Bodyguard.permit(Computation, :update, current_user(conn), computation.id),
{:ok, %Computation{} = computation} <-
Computations.update_computation(computation, computation_params) do
render(conn, "show.json", computation: computation)
end
end
operation :delete,
summary: "Delete the Computation",
parameters: [Parameters.id()],
responses:
[
no_content: {"Computation was successfully deleted", "application/json", nil}
] ++ Errors.internal_errors()
def delete(conn, %{"id" => id}) do
computation = Computations.get_computation!(id)
with :ok <- Bodyguard.permit(Computation, :delete, current_user(conn), computation.id),
{:ok, %Computation{}} <- Computations.delete_computation(computation) do
send_resp(conn, :no_content, "")
end
end
operation :organization_delete,
summary: "Remove the Computation from the Organization",
parameters: [Parameters.computation_id(), Parameters.organization_id()],
responses:
[
no_content:
{"Computation was successfully removed from the Organization", "application/json", nil}
] ++ Errors.internal_errors()
def organization_delete(conn, %{
"organization_id" => organization_id,
"computation_id" => computation_id
}) do
computation =
Computations.get_computation_by!(
organization_id: organization_id,
id: computation_id
)
with :ok <-
Bodyguard.permit(
Computation,
:organization_delete,
current_user(conn),
computation.id
),
{:ok, %Computation{}} <- Computations.delete_computation(computation, from_org: true) do
send_resp(conn, :no_content, "")
end
end
end
|
lib/adaptable_costs_evaluator_web/controllers/computation_controller.ex
| 0.776199 | 0.403067 |
computation_controller.ex
|
starcoder
|
defmodule AWS.Macie do
@moduledoc """
Amazon Macie Classic
Amazon Macie Classic has been discontinued and is no longer available.
A new Amazon Macie is now available with significant design improvements and
additional features, at a lower price and in most Amazon Web Services Regions.
We encourage you to take advantage of the new and improved features, and benefit
from the reduced cost. To learn about features and pricing for the new Macie,
see [Amazon Macie](http://aws.amazon.com/macie/). To learn how to use the new Macie, see the [Amazon Macie User
Guide](https://docs.aws.amazon.com/macie/latest/user/what-is-macie.html).
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2017-12-19",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "macie",
global?: false,
protocol: "json",
service_id: "Macie",
signature_version: "v4",
signing_name: "macie",
target_prefix: "MacieService"
}
end
@doc """
(Discontinued) Associates a specified Amazon Web Services account with Amazon
Macie Classic as a member account.
"""
def associate_member_account(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AssociateMemberAccount", input, options)
end
@doc """
(Discontinued) Associates specified S3 resources with Amazon Macie Classic for
monitoring and data classification.
If `memberAccountId` isn't specified, the action associates specified S3
resources with Macie Classic for the current Macie Classic administrator
account. If `memberAccountId` is specified, the action associates specified S3
resources with Macie Classic for the specified member account.
"""
def associate_s3_resources(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AssociateS3Resources", input, options)
end
@doc """
(Discontinued) Removes the specified member account from Amazon Macie Classic.
"""
def disassociate_member_account(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisassociateMemberAccount", input, options)
end
@doc """
(Discontinued) Removes specified S3 resources from being monitored by Amazon
Macie Classic.
If `memberAccountId` isn't specified, the action removes specified S3 resources
from Macie Classic for the current Macie Classic administrator account. If
`memberAccountId` is specified, the action removes specified S3 resources from
Macie Classic for the specified member account.
"""
def disassociate_s3_resources(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisassociateS3Resources", input, options)
end
@doc """
(Discontinued) Lists all Amazon Macie Classic member accounts for the current
Macie Classic administrator account.
"""
def list_member_accounts(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListMemberAccounts", input, options)
end
@doc """
(Discontinued) Lists all the S3 resources associated with Amazon Macie Classic.
If `memberAccountId` isn't specified, the action lists the S3 resources
associated with Macie Classic for the current Macie Classic administrator
account. If `memberAccountId` is specified, the action lists the S3 resources
associated with Macie Classic for the specified member account.
"""
def list_s3_resources(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListS3Resources", input, options)
end
@doc """
(Discontinued) Updates the classification types for the specified S3 resources.
If `memberAccountId` isn't specified, the action updates the classification
types of the S3 resources associated with Amazon Macie Classic for the current
Macie Classic administrator account. If `memberAccountId` is specified, the
action updates the classification types of the S3 resources associated with
Macie Classic for the specified member account.
"""
def update_s3_resources(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateS3Resources", input, options)
end
end
|
lib/aws/generated/macie.ex
| 0.74382 | 0.410136 |
macie.ex
|
starcoder
|
defmodule Iyzico.Inquiry do
@moduledoc false
@doc false
defstruct [ :bin, :conversation_id, :price, :currency ]
@type currency :: :try
@type t :: %__MODULE__{
bin: binary,
conversation_id: binary,
price: number,
currency: currency
}
end
defimpl Iyzico.IOListConvertible, for: Iyzico.Inquiry do
@default_locale Keyword.get(Application.get_env(:iyzico, Iyzico), :locale, "en")
def to_iolist(data) do
[{"locale", @default_locale},
{"conversationId", data.conversation_id},
{"binNumber", data.bin},
{"price", data.price},
{"currency", Atom.to_string(data.currency) |> String.upcase()}]
end
end
defmodule Iyzico.BinInquiry do
@bin_length 6
@moduledoc """
Functions for inquirying cards from their BIN numbers to fetch superficial information.
## Motivation
Before requesting to confirm a payment,
- users could be interrogated with available installment options, or
- one might need to show card family as an overlay in the user interface of an application, or
- user interface might show a switch for *3D Secure* preference of a user.
Emanating from those philosophical concerns, a superficial information could be fetched upon given BIN number, which
is first #{@bin_length} characters of a credit card number.
## Performing inquiry
You can retrieve card information with providing a BIN number:
```
{:ok, inquiry, metadata} = perform_inquiry("450634", "123456789", "100.00")
```
If you already have a card, you can also supply it to the same function:
```
card = %Iyzico.Card{}
{:ok, inquiry, metadata} = perform_inquiry(card, "123456789", "100.00")
```
## Common options
- `:api_key`: API key to be used in authentication, optional. Configuration is used instead if not supplied.
- `:api_secret`: API secret key to be used in authentication. Configuration is used instead if not supplied.
"""
@type currency :: :try | :eur
import Iyzico.Client
import Iyzico.Card
import Iyzico.ErrorHandler
alias Iyzico.InquiryResult
alias Iyzico.Metadata
alias Iyzico.CardReference
alias Iyzico.InstallmentOption
@doc """
Inquiries given BIN number/card with price, retrieves details and specifications for the given card,
available installment options and shows whether usage of *3D Secure* is mandatory.
## Caveats
- If the card appears to be a `:debit` card, *3D Secure* is mandatory.
- The underlying inquiry response represents a subset of the values found in *Installment & Commission Management*
panel.
- Local cards could not perform transactions in foreign currencies.
## Options
See common options.
"""
@spec perform_inquiry(Iyzico.Card.t | binary, binary, binary, currency, Keyword.t) ::
{:ok, Iyzico.Inquiry.t, Iyzico.Metadata.t} |
{:error, atom}
def perform_inquiry(bin_or_card, conversation_id, price, currency, opts \\ [])
def perform_inquiry(card = %Iyzico.Card{}, conversation_id, price, currency, opts)
when is_binary(conversation_id) and is_binary(price) do
inquiry =
%Iyzico.Inquiry{
conversation_id: conversation_id,
bin: String.slice(card.number, 0..@bin_length),
price: price,
currency: currency
}
case request([], :post, url_for_path("/payment/iyzipos/installment"), [], inquiry, opts) do
{:ok, resp} ->
if resp["status"] == "success",
do: serialize_resp(resp),
else: handle_error(resp)
any ->
any
end
end
def perform_inquiry(bin, conversation_id, price, currency, opts)
when is_binary(bin) and is_binary(conversation_id) and is_binary(price) do
if String.length(bin) == @bin_length do
inquiry =
%Iyzico.Inquiry{
conversation_id: conversation_id,
bin: bin,
price: price,
currency: currency
}
case request([], :post, url_for_path("/payment/iyzipos/installment"), [], inquiry, opts) do
{:ok, resp} ->
if resp["status"] == "success",
do: serialize_resp(resp),
else: handle_error(resp)
any ->
any
end
else
{:error, :einval}
end
end
defp serialize_resp(resp) do
details = List.first(resp["installmentDetails"])
result =
%InquiryResult{
card_ref: %CardReference{
assoc: get_card_assoc(details["cardAssociation"]),
family: get_card_family(details["cardFamilyName"]),
type: get_card_type(details["cardType"]),
bank_name: details["bankName"]
},
price: details["price"],
installment_opts: Enum.map(details["installmentPrices"], fn (x) ->
%InstallmentOption{
per_month_price: x["installmentPrice"],
stages: x["installmentNumber"]
}
end),
is_secure_payment_mandatory?: details["forceCvc"] == 1
}
metadata =
%Metadata{
system_time: resp["systemTime"],
succeed?: resp["status"] == "success",
phase: resp["phase"],
locale: resp["locale"],
auth_code: resp["authCode"]}
{:ok, result, metadata}
end
end
|
lib/endpoint/bin_inquiry.ex
| 0.775009 | 0.717136 |
bin_inquiry.ex
|
starcoder
|
defmodule RDF.Triple do
@moduledoc """
Helper functions for RDF triples.
An RDF Triple is represented as a plain Elixir tuple consisting of three valid
RDF values for subject, predicate and object.
"""
alias RDF.{Statement, PropertyMap}
@type t :: {Statement.subject(), Statement.predicate(), Statement.object()}
@type t_values :: {String.t(), String.t(), any}
@doc """
Creates a `RDF.Triple` with proper RDF values.
An error is raised when the given elements are not coercible to RDF values.
Note: The `RDF.triple` function is a shortcut to this function.
## Examples
iex> RDF.Triple.new("http://example.com/S", "http://example.com/p", 42)
{~I<http://example.com/S>, ~I<http://example.com/p>, RDF.literal(42)}
iex> RDF.Triple.new(EX.S, EX.p, 42)
{RDF.iri("http://example.com/S"), RDF.iri("http://example.com/p"), RDF.literal(42)}
iex> RDF.Triple.new(EX.S, :p, 42, RDF.PropertyMap.new(p: EX.p))
{RDF.iri("http://example.com/S"), RDF.iri("http://example.com/p"), RDF.literal(42)}
"""
@spec new(
Statement.coercible_subject(),
Statement.coercible_predicate(),
Statement.coercible_object(),
PropertyMap.t() | nil
) :: t
def new(subject, predicate, object, property_map \\ nil)
def new(subject, predicate, object, nil) do
{
Statement.coerce_subject(subject),
Statement.coerce_predicate(predicate),
Statement.coerce_object(object)
}
end
def new(subject, predicate, object, %PropertyMap{} = property_map) do
{
Statement.coerce_subject(subject),
Statement.coerce_predicate(predicate, property_map),
Statement.coerce_object(object)
}
end
@doc """
Creates a `RDF.Triple` with proper RDF values.
An error is raised when the given elements are not coercible to RDF values.
Note: The `RDF.triple` function is a shortcut to this function.
## Examples
iex> RDF.Triple.new {"http://example.com/S", "http://example.com/p", 42}
{~I<http://example.com/S>, ~I<http://example.com/p>, RDF.literal(42)}
iex> RDF.Triple.new {EX.S, EX.p, 42}
{RDF.iri("http://example.com/S"), RDF.iri("http://example.com/p"), RDF.literal(42)}
iex> RDF.Triple.new {EX.S, EX.p, 42, EX.Graph}
{RDF.iri("http://example.com/S"), RDF.iri("http://example.com/p"), RDF.literal(42)}
iex> RDF.Triple.new {EX.S, :p, 42}, RDF.PropertyMap.new(p: EX.p)
{RDF.iri("http://example.com/S"), RDF.iri("http://example.com/p"), RDF.literal(42)}
"""
@spec new(Statement.coercible_t(), PropertyMap.t() | nil) :: t
def new(statement, property_map \\ nil)
def new({subject, predicate, object}, property_map),
do: new(subject, predicate, object, property_map)
def new({subject, predicate, object, _}, property_map),
do: new(subject, predicate, object, property_map)
@doc """
Returns a tuple of native Elixir values from a `RDF.Triple` of RDF terms.
When a `:context` option is given with a `RDF.PropertyMap`, predicates will
be mapped to the terms defined in the `RDF.PropertyMap`, if present.
Returns `nil` if one of the components of the given tuple is not convertible via `RDF.Term.value/1`.
## Examples
iex> RDF.Triple.values {~I<http://example.com/S>, ~I<http://example.com/p>, RDF.literal(42)}
{"http://example.com/S", "http://example.com/p", 42}
iex> {~I<http://example.com/S>, ~I<http://example.com/p>, RDF.literal(42)}
...> |> RDF.Triple.values(context: %{p: ~I<http://example.com/p>})
{"http://example.com/S", :p, 42}
"""
@spec values(t, keyword) :: t_values | nil
def values(triple, opts \\ []) do
if property_map = PropertyMap.from_opts(opts) do
map(triple, Statement.default_property_mapping(property_map))
else
map(triple, &Statement.default_term_mapping/1)
end
end
@doc """
Returns a triple where each element from a `RDF.Triple` is mapped with the given function.
Returns `nil` if one of the components of the given tuple is not convertible via `RDF.Term.value/1`.
The function `fun` will receive a tuple `{statement_position, rdf_term}` where
`statement_position` is one of the atoms `:subject`, `:predicate` or `:object`,
while `rdf_term` is the RDF term to be mapped. When the given function returns
`nil` this will be interpreted as an error and will become the overhaul result
of the `map/2` call.
## Examples
iex> {~I<http://example.com/S>, ~I<http://example.com/p>, RDF.literal(42)}
...> |> RDF.Triple.map(fn
...> {:object, object} -> RDF.Term.value(object)
...> {_, term} -> term |> to_string() |> String.last()
...> end)
{"S", "p", 42}
"""
@spec map(t, Statement.term_mapping()) :: t_values | nil
def map({subject, predicate, object}, fun) do
with subject_value when not is_nil(subject_value) <- fun.({:subject, subject}),
predicate_value when not is_nil(predicate_value) <- fun.({:predicate, predicate}),
object_value when not is_nil(object_value) <- fun.({:object, object}) do
{subject_value, predicate_value, object_value}
else
_ -> nil
end
end
@doc """
Checks if the given tuple is a valid RDF triple.
The elements of a valid RDF triple must be RDF terms. On the subject
position only IRIs and blank nodes allowed, while on the predicate position
only IRIs allowed. The object position can be any RDF term.
"""
@spec valid?(t | any) :: boolean
def valid?(tuple)
def valid?({_, _, _} = triple), do: Statement.valid?(triple)
def valid?(_), do: false
end
|
lib/rdf/triple.ex
| 0.870865 | 0.71039 |
triple.ex
|
starcoder
|
defmodule DataMatrix.MappingMatrix do
@moduledoc false
@size Code.eval_file("lib/datamatrix/static/mapping_matrix_size.tuple") |> elem(0)
@doc """
"""
def get_mapping_matrix(version) do
{nrow, ncol} = elem(@size, version)
{mapping, _} =
generate_placement_path(nrow, ncol)
|> Enum.flat_map_reduce(MapSet.new(), fn module, occupied ->
if available?(module, occupied) do
modules =
Enum.map(shape(module, {nrow, ncol}), fn {row, col} ->
module({row, col}, {nrow, ncol})
end)
new_occupied = MapSet.union(occupied, MapSet.new(modules))
{modules, new_occupied}
else
{[], occupied}
end
end)
remaining_area =
if {nrow, ncol} in [{10, 10}, {14, 14}, {18, 18}, {22, 22}] do
[{nrow - 1, ncol - 1}, {nrow - 2, ncol - 2}]
else
[]
end
{mapping, remaining_area}
end
defp generate_placement_path(nrow, ncol) do
Stream.iterate({4, 0, :upper_right}, fn {row, col, direction} ->
if direction == :upper_right do
if row - 2 >= 0 && col + 2 < ncol do
{row - 2, col + 2, :upper_right}
else
{row - 1, col + 5, :lower_left}
end
else
if row + 2 < nrow && col - 2 >= 0 do
{row + 2, col - 2, :lower_left}
else
{row + 5, col - 1, :upper_right}
end
end
end)
|> Stream.map(fn {row, col, _} ->
{row, col}
end)
|> Enum.take_while(fn {row, col} ->
row < nrow || col < ncol
end)
end
defp available?(module, occupied) do
not MapSet.member?(occupied, module)
end
defp shape({row, col}, {nrow, ncol}) when row == nrow and col == 0 do
[
{nrow - 1, 0},
{nrow - 1, 1},
{nrow - 1, 2},
{0, ncol - 2},
{0, ncol - 1},
{1, ncol - 1},
{2, ncol - 1},
{3, ncol - 1}
]
end
defp shape({row, col}, {nrow, ncol})
when row == nrow - 2 and col == 0 and rem(ncol, 4) != 0 do
[
{nrow - 3, 0},
{nrow - 2, 0},
{nrow - 1, 0},
{0, ncol - 4},
{0, ncol - 3},
{0, ncol - 2},
{0, ncol - 1},
{1, ncol - 1}
]
end
defp shape({row, col}, {nrow, ncol})
when row == nrow - 2 and col == 0 and rem(ncol, 8) == 4 do
[
{nrow - 3, 0},
{nrow - 2, 0},
{nrow - 1, 0},
{0, ncol - 2},
{0, ncol - 1},
{1, ncol - 1},
{2, ncol - 1},
{3, ncol - 1}
]
end
defp shape({row, col}, {nrow, ncol})
when row == nrow + 4 and col == 2 and rem(ncol, 8) == 0 do
[
{nrow - 1, 0},
{nrow - 1, ncol - 1},
{0, ncol - 3},
{0, ncol - 2},
{0, ncol - 1},
{1, ncol - 3},
{1, ncol - 2},
{1, ncol - 1}
]
end
defp shape({row, col}, {nrow, ncol}) when row in 0..(nrow - 1) and col in 0..(ncol - 1) do
[
{row - 2, col - 2},
{row - 2, col - 1},
{row - 1, col - 2},
{row - 1, col - 1},
{row - 1, col},
{row, col - 2},
{row, col - 1},
{row, col}
]
end
defp shape({_, _}, {_, _}), do: []
defp module({row, col}, {nrow, ncol}) when row < 0 do
module({row + nrow, col + 4 - rem(nrow + 4, 8)}, {nrow, ncol})
end
defp module({row, col}, {nrow, ncol}) when col < 0 do
module({row + 4 - rem(ncol + 4, 8), col + ncol}, {nrow, ncol})
end
defp module({row, col}, {_, _}), do: {row, col}
end
|
lib/datamatrix/mapping_matrix.ex
| 0.641535 | 0.609466 |
mapping_matrix.ex
|
starcoder
|
defmodule Plymio.Option.Utility do
@moduledoc ~S"""
Utility Function for Managing (Keyword) Options ("opts")
## Documentation Terms
In the documentation there are terms, usually in *italics*, used to mean the same thing (e.g. *opts*).
### opts
*opts* is a `Keyword`.
### derivable opts
*derivable opts* is either a `Keyword` or `Map` with `Atom` keys (from which the *opts* can be *derived* simply using `Map.to_list/1`).
### *key*
A *key* is an `Atom`.
### key list
A *key list* is a list of *key*s.
### key spec
A *key spec* is usually a *key list*.
Alternatively a `Map` with `Atom` keys or a `Keyword` can be given and the (unique) keys will be used.
### key alias dict
A *key alias dict* is usually a `Map` with `Atom` keys and values used for canonicalising keys (e.g. as the 2nd argument to `opts_canonical_keys/2`).
Alternatively a `Keyword` with `Atom` values can be given and will be converted on the fly.
### key dict
A *key alias dict* is usually a `Map` with `Atom` keys.
Alternatively a `Keyword` with `Atom` values can be given and will be converted on the fly.
### tuple predicate
A *tuple predicate* is an arity one function that when passed a `{key,value}` tuple returns `true` or `false`.
## Return Values
Many functions support an API that returns either `{:ok, result}` or {`:error, error}` where `error` will be an `Exception`.
The default action for bang function when fielding `{:error, error}` is to raise the `error`.
In many cases the `error` will be a `KeyError` where its `key` field is set to the key, or list of keys, that is missing, unknown, etc.
"""
@type key :: atom
@type keys :: key | [key]
@type alias_key :: key
@type alias_keys :: keys
@type alias_value :: nil | alias_keys
@type aliases_kvs :: [{alias_key, alias_value}]
@type aliases_tuples :: [{alias_key, alias_key}]
@type aliases_dict :: %{optional(alias_key) => alias_key}
@type defaults_map :: %{optional(alias_key) => any}
@type opts :: Keyword.t
@type dict :: %{optional(alias_key) => any}
@type error :: struct
defdelegate opts_take_keys(arg0,arg1), to: Keyword, as: :take
defdelegate opts_drop_keys(arg0,arg1), to: Keyword, as: :drop
defp normalise_key_spec(value)
defp normalise_key_spec(value) when is_list(value) do
cond do
Keyword.keyword?(value) -> {:ok, value |> Keyword.keys |> Enum.uniq}
true ->
value
|> Enum.reject(&is_atom/1)
|> case do
[] -> {:ok, value |> Enum.uniq}
not_atom_keys -> {:error, %KeyError{key: not_atom_keys, term: value}}
end
end
end
defp normalise_key_spec(value) when is_map(value) do
value |> Map.keys |> normalise_key_spec
end
defp normalise_key_spec(value) do
new_error_result(m: "expected enum", v: value)
end
@spec validate_key_list(any) :: {:ok, keys} | {:error, error}
defp validate_key_list(keys)
defp validate_key_list(keys) when is_list(keys) do
keys
|> Enum.reject(&is_atom/1)
|> case do
[] -> {:ok, keys}
not_atoms -> not_atoms |> new_key_error_result(keys)
end
end
defp validate_key_list(keys) do
new_error_result(m: "expected valid key list", v: keys)
end
defp normalise_key_list(keys) do
keys |> validate_key_list
end
@spec validate_key_alias_dict(any) :: {:ok, aliases_dict} | {:error, error}
defp validate_key_alias_dict(dict)
defp validate_key_alias_dict(dict) when is_map(dict) do
with true <- dict |> Map.keys |> Enum.all?(&is_atom/1),
true <- dict |> Map.values |> Enum.all?(&is_atom/1) do
{:ok, dict}
else
false -> new_error_result(m: "expected valid key alias dictionary", v: dict)
end
end
@spec normalise_key_alias_dict(any) :: {:ok, aliases_dict} | {:error, error}
defp normalise_key_alias_dict(dict)
defp normalise_key_alias_dict(dict) when is_map(dict) do
dict |> validate_key_alias_dict
end
defp normalise_key_alias_dict(dict) when is_list(dict) do
cond do
Keyword.keyword?(dict) -> dict |> Enum.into(%{}) |> validate_key_alias_dict
true ->
new_error_result(m: "expected valid alias dictionary", v: dict)
end
end
defp normalise_key_alias_dict(dict) do
new_error_result(m: "expected valid alias dictionary", v: dict)
end
@spec validate_key_dict(any) :: {:ok, aliases_dict} | {:error, error}
defp validate_key_dict(dict)
defp validate_key_dict(dict) when is_map(dict) do
with true <- dict |> Map.keys |> Enum.all?(&is_atom/1) do
{:ok, dict}
else
false -> new_error_result(m: "expected valid key dictionary", v: dict)
end
end
@spec normalise_key_dict(any) :: {:ok, aliases_dict} | {:error, error}
defp normalise_key_dict(dict)
defp normalise_key_dict(dict) when is_map(dict) do
dict |> validate_key_dict
end
defp normalise_key_dict(dict) when is_list(dict) do
cond do
Keyword.keyword?(dict) -> dict |> Enum.into(%{})
true ->
new_error_result(m: "expected valid key dictionary", v: dict)
end
end
defp normalise_key_dict(dict) do
new_error_result(m: "expected valid key dictionary", v: dict)
end
@doc false
defp opts_index_normalise(opts, index)
defp opts_index_normalise(opts, index)
when is_list(opts) and is_integer(index) and index >= 0 do
{:ok, index}
end
defp opts_index_normalise(opts, index)
when is_list(opts) and is_integer(index) and index < 0 do
{:ok, length(opts) + index}
end
defp opts_index_normalise(opts, _index) when not is_list(opts) do
new_error_result(m: "opts invalid", v: opts)
end
defp opts_index_normalise(_opts, index) when not is_integer(index) do
new_error_result(m: "index invalid", v: index)
end
defp opts_index_validate(opts, index)
when is_list(opts) and is_integer(index) do
with {:ok, index} <- opts |> opts_index_normalise(index) do
index_max = length(opts) - 1
case index do
x when x >= 0 -> x
x -> index_max + x + 1
end
|> fn
ndx when ndx < 0 -> new_error_result(m: :index_too_small, v: ndx)
ndx when ndx > index_max -> new_error_result(m: :index_too_large, v: ndx)
ndx -> {:ok, ndx}
end.()
else
{:error, _} = result -> result
end
end
defp opts_index_validate(opts, _index) when not is_list(opts) do
new_error_result(m: "opts invalid", v: opts)
end
defp opts_index_validate(_opts, index) when not is_integer(index) do
new_error_result(m: "index invalid", v: index)
end
defp opts_indices_validate(opts, indices)
defp opts_indices_validate(opts, nil) do
{:ok, opts |> Enum.with_index |> Enum.map(&(elem(&1,1)))}
end
defp opts_indices_validate(opts, indices) when is_list(opts) do
indices
|> List.wrap
|> Enum.reduce({[],[]},
fn index, {valid_indices,invalid_indices} ->
case opts |> opts_index_validate(index) do
{:ok, index} -> {[index | valid_indices], invalid_indices}
{:error, _} -> {valid_indices, [index | invalid_indices]}
end
end)
|> case do
{valid_indices, []} -> {:ok, valid_indices |> Enum.reverse}
{_valid_indices, invalid_indices} ->
case invalid_indices |> length do
1 -> new_error_result(m: "index invalid", v: invalid_indices |> hd)
_ -> new_error_result(m: "indices invalid", v: invalid_indices |> Enum.reverse)
end
end
end
@doc ~S"""
`opts_normalise/` expects a *derivable opts* and returns `{:ok, opts}`.
Any other argument causes `{:error, error}` to be returned.
## Examples
iex> [] |> opts_normalise
{:ok, []}
iex> %{a: 1, b: 2, c: 3} |> opts_normalise
{:ok, [a: 1, b: 2, c: 3]}
iex> %{"a" => 1, :b => 2, :c => 3} |> opts_normalise
{:error, %KeyError{key: "a", term: %{:b => 2, :c => 3, "a" => 1}}}
iex> 42 |> opts_normalise
{:error, %ArgumentError{message: "expected valid derivable opts; got: 42"}}
iex> [a: nil, b: [:b1], c: [:c1, :c2, :c3]] |> opts_normalise
{:ok, [a: nil, b: [:b1], c: [:c1, :c2, :c3]]}
"""
@spec opts_normalise(any) :: {:ok, opts} | {:error, error}
def opts_normalise(value) do
cond do
Keyword.keyword?(value) -> {:ok, value}
is_map(value) ->
value
|> Map.to_list
|> fn tuples ->
tuples
|> Keyword.keyword?
|> case do
true -> {:ok, tuples}
_ ->
tuples
|> Keyword.keys
|> Enum.reject(&is_atom/1)
|> new_key_error_result(value)
end
end.()
true -> new_error_result(m: "expected valid derivable opts", v: value)
end
end
@doc ~S"""
`opts_normalise!/1` calls `opts_normalise/1` and if the result is `{:ok, opts}` returns `opts`.
## Examples
iex> [] |> opts_normalise!
[]
iex> %{a: 1, b: 2, c: 3} |> opts_normalise!
[a: 1, b: 2, c: 3]
iex> %{"a" => 1, :b => 2, :c => 3} |> opts_normalise!
** (KeyError) key "a" not found in: %{:b => 2, :c => 3, "a" => 1}
iex> 42 |> opts_normalise!
** (ArgumentError) expected valid derivable opts; got: 42
iex> [a: nil, b: [:b1], c: [:c1, :c2, :c3]] |> opts_normalise!
[a: nil, b: [:b1], c: [:c1, :c2, :c3]]
"""
@spec opts_normalise!(any) :: opts | no_return
def opts_normalise!(opts) do
case opts |> opts_normalise do
{:ok, opts} -> opts
{:error, error} -> raise error
end
end
@doc ~S"""
`opts_normalise_map/` expects a *derivable opts* as argument.
If the argument is a `Map`, with `Atom` keys, it returns `{:ok, argument}` directly.
If the argument is a `Keyword`, with `Atom` keys, it returns `{:ok, argument |> Enum.into(%{})}`.
Any other argument causes `{:error, error}` to be returned.
## Examples
iex> [] |> opts_normalise_map
{:ok, %{}}
iex> [a: nil, b: [:b1], c: [:c1, :c2, :c3]] |> opts_normalise_map
{:ok, %{a: nil, b: [:b1], c: [:c1, :c2, :c3]}}
iex> %{a: 1, b: 2, c: 3} |> opts_normalise_map
{:ok, %{a: 1, b: 2, c: 3}}
iex> %{"a" => 1, :b => 2, :c => 3} |> opts_normalise_map
{:error, %KeyError{key: ["a"], term: %{:b => 2, :c => 3, "a" => 1}}}
iex> 42 |> opts_normalise_map
{:error, %ArgumentError{message: "expected valid derivable opts; got: 42"}}
"""
@spec opts_normalise_map(any) :: {:ok, opts} | {:error, error}
def opts_normalise_map(value) do
cond do
Keyword.keyword?(value) -> {:ok, value |> Enum.into(%{})}
is_map(value) ->
with {:ok, _} <- value |> normalise_key_spec do
{:ok, value}
else
{:error, %KeyError{} = error} -> {:error, struct!(error, term: value)}
{:error, _} = result -> result
end
true -> new_error_result(m: "expected valid derivable opts", v: value)
end
end
@doc ~S"""
`opts_normalise_map!/1` call `opts_normalise_map/1` and if the result is `{:ok, map}` returns `map`.
## Examples
iex> [] |> opts_normalise_map!
%{}
iex> [a: 1, b: 2, c: 3] |> opts_normalise_map!
%{a: 1, b: 2, c: 3}
iex> %{a: 1, b: 2, c: 3} |> opts_normalise_map!
%{a: 1, b: 2, c: 3}
iex> %{"a" => 1, :b => 2, :c => 3} |> opts_normalise_map!
** (KeyError) key ["a"] not found in: %{:b => 2, :c => 3, "a" => 1}
iex> 42 |> opts_normalise_map!
** (ArgumentError) expected valid derivable opts; got: 42
"""
@spec opts_normalise_map!(any) :: opts | no_return
def opts_normalise_map!(opts) do
case opts |> opts_normalise_map do
{:ok, map} -> map
{:error, %KeyError{} = error} -> raise struct!(error, term: opts)
{:error, error} -> raise error
end
end
@doc ~S"""
`opts_validate/1` returns `{:ok, opts}` if the argument is an *opts*.
Any other argument causes `{:error, error}` to be returned.
## Examples
iex> [] |> opts_validate
{:ok, []}
iex> %{a: 1, b: 2, c: 3} |> opts_validate
{:error, %ArgumentError{message: "validate opts failed; got: %{a: 1, b: 2, c: 3}"}}
iex> %{"a" => 1, :b => 2, :c => 3} |> opts_validate
{:error, %ArgumentError{message: "validate opts failed; got: %{:b => 2, :c => 3, \"a\" => 1}"}}
iex> 42 |> opts_validate
{:error, %ArgumentError{message: "validate opts failed; got: 42"}}
iex> [a: nil, b: [:b1], c: [:c1, :c2, :c3]] |> opts_validate
{:ok, [a: nil, b: [:b1], c: [:c1, :c2, :c3]]}
"""
@spec opts_validate(any) :: {:ok, opts} | {:error, error}
def opts_validate(value) do
case Keyword.keyword?(value) do
true -> {:ok, value}
_ -> new_error_result(m: "validate opts failed", v: value)
end
end
@doc ~S"""
`opts_validate!/1` calls `opts_validate/1` and, if the result is `{:ok, opts}`, returns `opts`.
## Examples
iex> [] |> opts_validate!
[]
iex> %{a: 1, b: 2, c: 3} |> opts_validate!
** (ArgumentError) validate opts failed; got: %{a: 1, b: 2, c: 3}
iex> %{"a" => 1, :b => 2, :c => 3} |> opts_validate!
** (ArgumentError) validate opts failed; got: %{:b => 2, :c => 3, "a" => 1}
iex> 42 |> opts_validate!
** (ArgumentError) validate opts failed; got: 42
iex> [a: nil, b: [:b1], c: [:c1, :c2, :c3]] |> opts_validate!
[a: nil, b: [:b1], c: [:c1, :c2, :c3]]
"""
@spec opts_validate!(opts) :: opts | no_return
def opts_validate!(opts) do
case opts |> opts_validate do
{:ok, opts} -> opts
{:error, error} -> raise error
end
end
@doc ~S"""
`opts_create_aliases_tuples/1` takes an *opts* where the keys are the canonical key names, and their values are zero (nil), one or more aliases for the canonical key.
A `Keyword` is returned where each key is an alias and its value the canonical key.
The canonical key also has an entry for itself with the same value.
## Examples
iex> [a: nil, b: [:b1], c: [:c1, :c2, :c3]] |> opts_create_aliases_tuples
[a: :a, b: :b, b1: :b, c: :c, c1: :c, c2: :c, c3: :c]
"""
@spec opts_create_aliases_tuples(aliases_kvs) :: aliases_tuples
def opts_create_aliases_tuples(aliases) do
aliases
|> Enum.map(fn
{k,nil} -> {k,k}
{k,a} ->
[k | a |> List.wrap]
|> Enum.uniq
|> Enum.map(fn a -> {a,k} end)
end)
|> List.flatten
end
@doc ~S"""
`opts_create_aliases_dict/1` does the same job as `opts_create_aliases_tuples/1` but returns a *key alias dict*.
## Examples
iex> [a: nil, b: [:b1], c: [:c1, :c2, :c3]] |> opts_create_aliases_dict
%{a: :a, b: :b, b1: :b, c: :c, c1: :c, c2: :c, c3: :c}
"""
@spec opts_create_aliases_dict(aliases_kvs) :: aliases_dict
def opts_create_aliases_dict(aliases) do
aliases
|> opts_create_aliases_tuples
|> Enum.into(%{})
end
@doc ~S"""
`opts_create_defstruct/2` takes an *opts*, together with a defaults map, and returns an *opts* where each value if the value of the key in the defaults map (with default `nil`).
`opts_create_defstruct/2` creates an argument suitable for use with `Kernel.defstruct/1`
The defaults map must contain *only* keys that are also in the opts list; any unknown keys will raise a `KeyError.`
## Examples
iex> [a: 1, b: :two, c: "tre", d: nil] |> opts_create_defstruct(%{a: 42, b: "two"})
[a: 42, b: "two", c: nil, d: nil]
iex> [a: 1, b: :two, c: "tre", d: nil] |> opts_create_defstruct(%{a: 42, b: "two", x: 1})
** (KeyError) key [:x] not found in: [a: 1, b: :two, c: "tre", d: nil]
"""
@spec opts_create_defstruct(opts, defaults_map) :: opts
def opts_create_defstruct(struct_kvs, defaults_map \\ %{})
def opts_create_defstruct(struct_kvs, defaults_map)
when is_map(defaults_map) and (map_size(defaults_map) == 0) do
struct_kvs |> Enum.map(fn {k,_v} -> {k,nil} end)
end
def opts_create_defstruct(struct_kvs, defaults_map)
when is_map(defaults_map) do
# do not allow keys in the default that aren't in the struct_kvs
# too dangerous as hard to spot e.g default with wrong name
struct_map = struct_kvs |> Map.new(fn {k,_v} -> {k, nil} end)
defaults_map
# get rid of known keys
|> Enum.reject(fn {k,_v} -> struct_map |> Map.has_key?(k) end)
|> Keyword.keys
|> case do
# no unknown keys
[] -> nil
unknown_keys ->
raise KeyError, key: unknown_keys, term: struct_kvs
end
struct_kvs
|> Enum.map(fn {k,_v} -> {k, defaults_map |> Map.get(k)} end)
end
@doc ~S"""
`opts_crue_defstruct/2` takes a *derivable opts*, together with a defaults map, and returns `{:ok, opts}` where each value is the value of the key in the defaults map (with default `nil`).
`opts_crue_defstruct/2` creates an argument suitable for use with `Kernel.defstruct/1`
The defaults map must contain *only* keys that are also in the opts list; any unknown keys will cause `{:error, error}`, where `error` is a `KeyError`, to be returned.
## Examples
iex> [a: 1, b: :two, c: "tre", d: nil] |> opts_crue_defstruct(%{a: 42, b: "two"})
{:ok, [a: 42, b: "two", c: nil, d: nil]}
iex> [a: 1, b: :two, c: "tre", d: nil] |> opts_crue_defstruct(%{a: 42, b: "two", x: 1})
{:error, %KeyError{key: :x, term: [a: 1, b: :two, c: "tre", d: nil]}}
"""
@spec opts_crue_defstruct(opts, defaults_map) :: {:ok, opts} | {:error, error}
def opts_crue_defstruct(struct_kvs, defaults_map \\ %{})
def opts_crue_defstruct(struct_kvs, defaults_map)
when is_map(defaults_map) and (map_size(defaults_map) == 0) do
{:ok, struct_kvs |> Enum.map(fn {k,_v} -> {k,nil} end)}
end
def opts_crue_defstruct(struct_kvs, defaults_map)
when is_map(defaults_map) do
with {:ok, struct_kvs} <- struct_kvs |> opts_normalise do
# do not allow keys in the default that aren't in the struct_kvs
# too dangerous as hard to spot e.g default with wrong name
struct_map = struct_kvs |> Map.new(fn {k,_v} -> {k, nil} end)
defaults_map
# get rid of known keys
|> Enum.reject(fn {k,_v} -> struct_map |> Map.has_key?(k) end)
|> Keyword.keys
|> case do
# no unknown keys
[] -> {:ok, struct_kvs |> Enum.map(fn {k,_v} -> {k, defaults_map |> Map.get(k)} end)}
unknown_keys -> unknown_keys |> new_key_error_result(struct_kvs)
end
else
{:error, _} = result -> result
end
end
@doc ~S"""
`opts_crue_defstruct!/2` calls `opts_crue_defstruct/2` and if the result is `{:ok, opts}` returns `opts`.
## Examples
iex> [a: 1, b: :two, c: "tre", d: nil] |> opts_crue_defstruct!(%{a: 42, b: "two"})
[a: 42, b: "two", c: nil, d: nil]
iex> [a: 1, b: :two, c: "tre", d: nil] |> opts_crue_defstruct!(%{a: 42, b: "two", x: 1})
** (KeyError) key :x not found in: [a: 1, b: :two, c: "tre", d: nil]
"""
@spec opts_crue_defstruct!(opts, defaults_map) :: opts | no_return
def opts_crue_defstruct!(struct_kvs, defaults_map \\ %{})
def opts_crue_defstruct!(opts, defaults_map) do
case opts_crue_defstruct(opts, defaults_map) do
{:ok, opts} -> opts
{:error, error} -> raise error
end
end
@doc ~S"""
`opts_avoir_keys/2` takes an *opts* and a *key spec*.
If all of the keys are present in the `opts`, its returns `{:ok, opts}`.
If there are any missing keys, `{:error, error}`, where `error` is a `KeyError`, will be returned.
## Examples
iex> [a: 1, b: 2, c: 3] |> opts_avoir_keys([:a, :b, :c])
{:ok, [a: 1, b: 2, c: 3]}
iex> [a: 1, b: 2, c: 3] |> opts_avoir_keys(%{a: 1, b: 2, c: 3})
{:ok, [a: 1, b: 2, c: 3]}
iex> [a: 1, b: 2, c: 3] |> opts_avoir_keys([:a, :b, :d, :a])
{:error, %KeyError{key: :d, term: [a: 1, b: 2, c: 3]}}
iex> [a: 1, b: 2, c: 3] |> opts_avoir_keys(%{x: nil, y: nil, z: nil})
{:error, %KeyError{key: [:x, :y, :z], term: [a: 1, b: 2, c: 3]}}
"""
@spec opts_avoir_keys(any, any) :: {:ok, opts} | {:error, error}
def opts_avoir_keys(opts, keys)
def opts_avoir_keys(opts, keys) do
with {:ok, opts_keys} <- opts |> normalise_key_spec,
{:ok, wanted_keys} <- keys |> normalise_key_spec do
wanted_keys -- opts_keys
|> case do
# none missing
[] -> {:ok, opts}
missing_keys -> missing_keys |> new_key_error_result(opts)
end
else
{:error, _} = result -> result
end
end
@doc ~S"""
`opts_avoir_keys?/2` calls `opts_avoir_keys/2` and if the result is `{:ok, _}`, returns `true`, else `false`.
## Examples
iex> [a: 1, b: 2, c: 3] |> opts_avoir_keys?([:a, :b, :c])
true
iex> [a: 1, b: 2, c: 3] |> opts_avoir_keys?(%{a: 1, b: 2, c: 3})
true
iex> [a: 1, b: 2, c: 3] |> opts_avoir_keys?([:a, :b, :d, :a])
false
iex> [a: 1, b: 2, c: 3] |> opts_avoir_keys?(%{x: nil, y: nil, z: nil})
false
"""
@spec opts_avoir_keys?(any, any) :: true | false
def opts_avoir_keys?(opts, keys) do
case opts_avoir_keys(opts, keys) do
{:ok, _} -> true
_ -> false
end
end
@doc ~S"""
`opts_avoir_keys!/2` calls `opts_avoir_keys/2` and if the result is `{:ok, opts}`, returns `opts`.
## Examples
iex> [a: 1, b: 2, c: 3] |> opts_avoir_keys!([:a, :b, :c])
[a: 1, b: 2, c: 3]
iex> [a: 1, b: 2, c: 3] |> opts_avoir_keys!(%{a: 1, b: 2, c: 3})
[a: 1, b: 2, c: 3]
iex> [a: 1, b: 2, c: 3] |> opts_avoir_keys!([:a, :b, :d, :a])
** (KeyError) key :d not found in: [a: 1, b: 2, c: 3]
iex> [a: 1, b: 2, c: 3] |> opts_avoir_keys!(%{x: nil, y: nil, z: nil})
** (KeyError) key [:x, :y, :z] not found in: [a: 1, b: 2, c: 3]
"""
@spec opts_avoir_keys!(any, any) :: opts | no_return
def opts_avoir_keys!(opts, keys) do
case opts_avoir_keys(opts, keys) do
{:ok, opts} -> opts
{:error, error} -> raise error
end
end
@doc ~S"""
`opts_has_keys/2` takes an *opts*, together with a list or dictionary (map) of wanted `keys`.
If all of the `keys` are present in the `opts`, its returns `{:ok, opts}`.
If there are any missing keys, `{:error, {present_opts, missing_keys}}` is returned, where the
`present_opts` include *only* the tuples for the wanted keys (i.e. result of `Keyword.take/2` for the wanted keys).
## Examples
iex> [a: 1, b: 2, c: 3] |> opts_has_keys([:a, :b, :c])
{:ok, [a: 1, b: 2, c: 3]}
iex> [a: 1, b: 2, c: 3] |> opts_has_keys(%{a: 1, b: 2, c: 3})
{:ok, [a: 1, b: 2, c: 3]}
iex> [a: 1, b: 2, c: 3] |> opts_has_keys([:a, :b, :d, :a])
{:error, {[a: 1, b: 2], [:d]}}
iex> [a: 1, b: 2, c: 3] |> opts_has_keys(%{x: nil, y: nil, z: nil})
{:error, {[], [:x, :y, :z]}}
"""
@spec opts_has_keys(opts, keys) :: {:ok, opts} | {:error, {opts,opts}}
@spec opts_has_keys(opts, dict) :: {:ok, opts} | {:error, {opts,opts}}
def opts_has_keys(opts, keys_or_dict)
def opts_has_keys(opts, dict) when is_map(dict) do
opts
|> opts_has_keys(dict |> Map.keys)
end
def opts_has_keys(opts, keys) when is_list(keys) do
opts_keys = opts |> Keyword.keys |> Enum.uniq
wanted_keys = keys |> Enum.uniq
wanted_keys -- opts_keys
|> case do
# none missing
[] -> {:ok, opts}
missing_keys ->
wanted_tuples = opts |> opts_take_keys(wanted_keys)
{:error, {wanted_tuples, missing_keys}}
end
end
@doc ~S"""
`opts_has_keys?/2` calls `opts_has_keys/2` and if the result is `{:ok, _}`, returns `true`, else `false`.
## Examples
iex> [a: 1, b: 2, c: 3] |> opts_has_keys?([:a, :b, :c])
true
iex> [a: 1, b: 2, c: 3] |> opts_has_keys?(%{a: 1, b: 2, c: 3})
true
iex> [a: 1, b: 2, c: 3] |> opts_has_keys?([:a, :b, :d, :a])
false
iex> [a: 1, b: 2, c: 3] |> opts_has_keys?(%{x: nil, y: nil, z: nil})
false
"""
@spec opts_has_keys?(opts, keys) :: true | false
@spec opts_has_keys?(opts, dict) :: true | false
def opts_has_keys?(opts, keys) do
case opts_has_keys(opts, keys) do
{:ok, _} -> true
_ -> false
end
end
@doc ~S"""
`opts_has_keys!/2` calls `opts_has_keys/2` and if the result is `{:ok, opts}`, returns `opts`.
## Examples
iex> [a: 1, b: 2, c: 3] |> opts_has_keys!([:a, :b, :c])
[a: 1, b: 2, c: 3]
iex> [a: 1, b: 2, c: 3] |> opts_has_keys!(%{a: 1, b: 2, c: 3})
[a: 1, b: 2, c: 3]
iex> [a: 1, b: 2, c: 3] |> opts_has_keys!([:a, :b, :d, :a])
** (KeyError) key [:d] not found in: [a: 1, b: 2, c: 3]
iex> [a: 1, b: 2, c: 3] |> opts_has_keys!(%{x: nil, y: nil, z: nil})
** (KeyError) key [:x, :y, :z] not found in: [a: 1, b: 2, c: 3]
"""
@spec opts_has_keys!(opts, keys) :: opts | no_return
@spec opts_has_keys!(opts, dict) :: opts | no_return
def opts_has_keys!(opts, keys) do
case opts_has_keys(opts, keys) do
{:ok, x} -> x
{:error, {_present_tuples, missing_keys}} ->
raise KeyError, key: missing_keys, term: opts
end
end
@doc ~S"""
`opts_canon_keys!/2` takes an *opts*, together with a lookup dictionary and replaces each key with its canonical value from the dictionary. Unknown keys raise a `KeyError`.
## Examples
iex> [a: 1, b: 2, c: 3] |> opts_canon_keys!(%{a: :x, b: :y, c: :z})
[x: 1, y: 2, z: 3]
iex> [x: 1, y: 3, z: 3] |> opts_canon_keys!(%{a: 1, b: 2, c: 3})
** (KeyError) key :x not found in: %{a: 1, b: 2, c: 3}
"""
@spec opts_canon_keys!(opts, dict) :: opts | no_return
def opts_canon_keys!(opts, dict) when is_map(dict) do
opts |> Enum.map(fn {k,v} -> {dict |> Map.fetch!(k), v} end)
end
@doc ~S"""
`opts_canon_keys/2` takes an *opts*, together with either a dictionary (map) or (keyword) list of aliases.
If a dictionary is provided, each key in the `opts` is replaced with its (canonical) value from the dictionary, returning `{:ok, transformed_opts}`.
If a (keyword) list of aliases is provided, the aliases are first converted into a dictionary by `opts_create_aliases_dict/1` and the dictionary used as described above.
If there are any unknown keys, `{:error, {known_opts, unknown_opts}}` is returned.
## Examples
iex> [a: 1, b: 2, c: 3] |> opts_canon_keys(%{a: :x, b: :y, c: :z})
{:ok, [x: 1, y: 2, z: 3]}
iex> [a: 11, p: 1, b: 22, q: 2, c: 33, r: 3] |> opts_canon_keys(%{a: :x, b: :y, c: :z})
{:error, {[x: 11, y: 22, z: 33], [p: 1, q: 2, r: 3]}}
iex> [a: 1, b: 2, c: 3] |> opts_canon_keys([a_canon: :a, b_canon: [:b], c_canon: [:c, :cc]])
{:ok, [a_canon: 1, b_canon: 2, c_canon: 3]}
iex> [a: 1, b: 2, c: 3] |> opts_canon_keys([a_canon: :a, b_canon: nil, c_canon: [:c, :cc]])
{:error, {[a_canon: 1, c_canon: 3], [b: 2]}}
"""
@spec opts_canon_keys(opts, dict) :: {:ok, opts} | {:error, {opts, opts}}
@spec opts_canon_keys(opts, opts) :: {:ok, opts} | {:error, {opts, opts}}
def opts_canon_keys(opts, dict) when is_map(dict) do
opts
# split into known and unknown keys
|> Enum.split_with(fn {k,_v} -> Map.has_key?(dict, k) end)
|> case do
# no unknown keys
{known_tuples, []} ->
{:ok, opts_canon_keys!(known_tuples, dict)}
{known_tuples, unknown_tuples} ->
{:error, {opts_canon_keys!(known_tuples, dict), unknown_tuples}}
end
end
def opts_canon_keys(opts, aliases) when is_list(aliases) do
opts
|> opts_canon_keys(aliases |> opts_create_aliases_dict)
end
@doc ~S"""
`opts_canonical_keys/2` takes a *derivable opts*, together with a *key alias dict*.
Each key in the `opts` is replaced with its (canonical) value from the dictionary, returning `{:ok, canon_opts}`.
If there are any unknown keys, `{:error, error}`, where `error` is a `KeyError`, will be returned.
## Examples
iex> [a: 1, b: 2, c: 3] |> opts_canonical_keys(%{a: :x, b: :y, c: :z})
{:ok, [x: 1, y: 2, z: 3]}
iex> [a: 1, b: 2, c: 3] |> opts_canonical_keys([a: :x, b: :y, c: :z])
{:ok, [x: 1, y: 2, z: 3]}
iex> [a: 11, p: 1, b: 22, q: 2, c: 33, r: 3] |> opts_canonical_keys(%{a: :x, b: :y, c: :z})
{:error, %KeyError{key: [:p, :q, :r], term: %{a: :x, b: :y, c: :z}}}
iex> [a: 1, b: 2, c: 3] |> opts_canonical_keys([a_canon: :a, b_canon: [:b], c_canon: [:c, :cc]])
{:error, %ArgumentError{message: "expected valid key alias dictionary; got: %{a_canon: :a, b_canon: [:b], c_canon: [:c, :cc]}"}}
"""
@spec opts_canonical_keys(any, any) :: {:ok, opts} | {:error, error}
def opts_canonical_keys(opts, dict) do
with {:ok, opts} <- opts |> opts_normalise,
{:ok, dict} <- dict |> normalise_key_alias_dict do
opts
# reject known_keys
|> Enum.reject(fn {k,_v} -> Map.has_key?(dict, k) end)
|> case do
# no unknown keys
[] ->
canon_tuples = opts
|> Enum.map(fn{k,v} -> {Map.get(dict,k), v} end)
{:ok, canon_tuples}
unknown_tuples -> unknown_tuples |> new_key_error_result(dict)
end
else
{:error, _} = result -> result
end
end
@doc ~S"""
`opts_canonical_keys!/2` calls `opts_canonical_keys/2` and if the result is `{:ok, opts}` returns `opts`.
## Examples
iex> [a: 1, b: 2, c: 3] |> opts_canonical_keys!(%{a: :x, b: :y, c: :z})
[x: 1, y: 2, z: 3]
iex> [a: 1, b: 2, c: 3] |> opts_canonical_keys!([a: :x, b: :y, c: :z])
[x: 1, y: 2, z: 3]
iex> [x: 1, y: 3, z: 3] |> opts_canonical_keys!(%{a: 1, b: 2, c: 3})
** (ArgumentError) expected valid key alias dictionary; got: %{a: 1, b: 2, c: 3}
"""
@spec opts_canonical_keys!(any, any) :: opts | no_return
def opts_canonical_keys!(opts, dict) do
with {:ok, opts} <- opts |> opts_canonical_keys(dict) do
opts
else
{:error, error} -> raise error
end
end
@doc ~S"""
`opts_sort_keys/` takes a *derivable opts*, together with a list of sort keys, and returns the opts sorted in the sort keys order. Duplicate keys follow one after another.
Any keys found but not given in the sort keys follow the sorted keys in the returned opts.
Any key in the sort list not found in the opts is ignored.
## Examples
iex> [a: 1, b: 2, c: 3, d: 4] |> opts_sort_keys
[a: 1, b: 2, c: 3, d: 4]
iex> [a: 1, b: 2, c: 3, d: 4] |> opts_sort_keys([:c, :a])
[c: 3, a: 1, b: 2, d: 4]
iex> [] |> opts_sort_keys([:c, :a])
[]
iex> [a: 11, b: 2, c: 3, a: 12, d: 4] |> opts_sort_keys([:c, :a])
[c: 3, a: 11, a: 12, b: 2, d: 4]
iex> [a: 11, b: 21, c: 3, a: 12, d: 4, b: 22] |> opts_sort_keys([:d, :x, :b, :z])
[d: 4, b: 21, b: 22, a: 11, c: 3, a: 12]
"""
@spec opts_sort_keys(any, any) :: {:ok, opts} | {:error, error}
def opts_sort_keys(opts, keys \\ [])
def opts_sort_keys([], _keys) do
[]
end
def opts_sort_keys(opts, keys) do
sort_keys = keys |> Enum.uniq
sort_dict = sort_keys |> Map.new(fn k -> {k,nil} end)
# partition the opts into sort and other keys
{sorted_tuples, remain_tuples} = opts
|> Enum.split_with(fn {k,_v} -> Map.has_key?(sort_dict, k) end)
# collect the sorted_tuples for same key
sort_keys
|> Enum.flat_map(fn k ->
sorted_tuples
|> Keyword.get_values(k)
|> Enum.map(fn v -> {k,v} end)
end)
|> Kernel.++(remain_tuples)
end
@doc ~S"""
`opts_take_keys!/1` takes an *opts*, together with a *key list* and returns the *opts* with just the supplied keys.
It any of the keys are not found, raises a `KeyError` citing the missing keys.
## Examples
iex> [a: 1, b: 2, c: 3] |> opts_take_keys!([:c, :a])
[a: 1, c: 3]
iex> [a: 1, b: 2, c: 3] |> opts_take_keys!([:d, :a])
** (KeyError) key [:d] not found in: [a: 1, b: 2, c: 3]
"""
@spec opts_take_keys!(opts, keys) :: opts
def opts_take_keys!(opts, keys \\ [])
def opts_take_keys!([], _keys) do
[]
end
def opts_take_keys!(opts, []) when is_list(opts) do
[]
end
def opts_take_keys!(opts, keys) when is_list(keys) do
opts
|> opts_take_keys(keys)
# check all keys present
|> opts_has_keys(keys)
|> case do
{:ok, new_opts} -> new_opts
{:error, {_present_opts, missing_keys}} ->
raise KeyError, key: missing_keys, term: opts
end
end
@doc ~S"""
`opts_drop_keys!/1` takes an *opts*, together with a *key list* and returns the *opts* without the supplied keys.
It any of the keys are not found, raises a `KeyError` citing the missing keys.
## Examples
iex> [a: 1, b: 2, c: 3] |> opts_drop_keys!([:b])
[a: 1, c: 3]
iex> [a: 11, b: 21, c: 3, b: 22, a: 12] |> opts_drop_keys!([:b])
[a: 11, c: 3, a: 12]
iex> [a: 1, b: 2, c: 3] |> opts_drop_keys!([:d, :a])
** (KeyError) key [:d] not found in: [a: 1, b: 2, c: 3]
"""
@spec opts_drop_keys!(opts, keys) :: opts
def opts_drop_keys!(opts, keys \\ [])
def opts_drop_keys!([], _keys) do
[]
end
def opts_drop_keys!(opts, []) when is_list(opts) do
[]
end
def opts_drop_keys!(opts, keys) when is_list(keys) do
opts
|> opts_has_keys(keys)
|> case do
{:ok, _} -> opts |> opts_drop_keys(keys)
{:error, {_present_opts, missing_keys}} ->
raise KeyError, key: missing_keys, term: opts
end
end
@doc ~S"""
`opts_filter/2` takes a *derivable opts*, together with a *tuple predicate* and returns `{:ok, opts}` where `opts` has all the 2tuples the *tuple predicate* return `true` for.
## Examples
iex> [a: 1, b: 2, c: 3] |> opts_filter(&(&1))
{:ok, [a: 1, b: 2, c: 3]}
iex> [a: 1, b: 2, c: 3] |> opts_filter(
...> fn
...> {:a,_} -> true
...> _ -> false
...> end)
{:ok, [a: 1]}
iex> %{a: 1, b: 2, c: 3} |> opts_filter(
...> fn
...> {:a,_} -> false
...> _ -> true
...> end)
{:ok, [b: 2, c: 3]}
iex> {:error, error} = [a: 1, b: 2, c: 3] |> opts_filter(:not_a_function)
...> match?(%ArgumentError{message: "expected valid tuple predicate; got: :not_a_function"}, error)
true
iex> {:error, error} = :not_opts |> opts_filter(&(&1))
...> match?(%ArgumentError{message: "expected valid derivable opts; got: :not_opts"}, error)
true
"""
@spec opts_filter(any, any) :: {:ok, opts} | {:error, error}
def opts_filter(opts, fun_pred) when is_function(fun_pred,1) do
with {:ok, opts} <- opts |> opts_normalise do
opts = opts
|> Enum.filter(fun_pred)
{:ok, opts}
else
{:error, _} = result -> result
end
end
def opts_filter(_opts, fun_pred) do
new_error_result(m: "expected valid tuple predicate", v: fun_pred)
end
@doc ~S"""
`opts_filter!/2` calls `opts_filter/2` and if the result is `{:ok, opts}` returns `opts`.
## Examples
iex> [a: 1, b: 2, c: 3] |> opts_filter!(&(&1))
[a: 1, b: 2, c: 3]
iex> [a: 1, b: 2, c: 3] |> opts_filter!(
...> fn
...> {:a,_} -> true
...> _ -> false
...> end)
[a: 1]
iex> %{a: 1, b: 2, c: 3} |> opts_filter!(
...> fn
...> {:a,_} -> false
...> _ -> true
...> end)
[b: 2, c: 3]
iex> [a: 1, b: 2, c: 3] |> opts_filter!(:not_a_function)
** (ArgumentError) expected valid tuple predicate; got: :not_a_function
iex> :not_opts |> opts_filter!(&(&1))
** (ArgumentError) expected valid derivable opts; got: :not_opts
"""
@spec opts_filter!(any, any) :: opts | no_return
def opts_filter!(opts, dict) do
with {:ok, opts} <- opts |> opts_filter(dict) do
opts
else
{:error, error} -> raise error
end
end
@doc ~S"""
`opts_reject/2` takes a *derivable opts*, together with a *tuple predicate* and returns `{:ok, opts}` where `opts` has all the 2tuples the *tuple predicate* returns `false` for.
## Examples
iex> [a: 1, b: 2, c: 3] |> opts_reject(&(&1))
{:ok, []}
iex> [a: 1, b: 2, c: 3] |> opts_reject(
...> fn
...> {:a,_} -> true
...> _ -> false
...> end)
{:ok, [b: 2, c: 3]}
iex> %{a: 1, b: 2, c: 3} |> opts_reject(
...> fn
...> {:a,_} -> false
...> _ -> true
...> end)
{:ok, [a: 1]}
iex> {:error, error} = [a: 1, b: 2, c: 3] |> opts_reject(:not_a_function)
...> match?(%ArgumentError{message: "expected valid tuple predicate; got: :not_a_function"}, error)
true
iex> {:error, error} = :not_opts |> opts_reject(&(&1))
...> match?(%ArgumentError{message: "expected valid derivable opts; got: :not_opts"}, error)
true
"""
@spec opts_reject(any, any) :: {:ok, opts} | {:error, error}
def opts_reject(opts, fun_pred) when is_function(fun_pred,1) do
with {:ok, opts} <- opts |> opts_normalise do
opts = opts
|> Enum.reject(fun_pred)
{:ok, opts}
else
{:error, _} = result -> result
end
end
def opts_reject(_opts, fun_pred) do
new_error_result(m: "expected valid tuple predicate", v: fun_pred)
end
@doc ~S"""
`opts_reject!/2` calls `opts_reject/2` and if the result is `{:ok, opts}` returns `opts`.
## Examples
iex> [a: 1, b: 2, c: 3] |> opts_reject!(&(&1))
[]
iex> [a: 1, b: 2, c: 3] |> opts_reject!(
...> fn
...> {:a,_} -> true
...> _ -> false
...> end)
[b: 2, c: 3]
iex> %{a: 1, b: 2, c: 3} |> opts_reject!(
...> fn
...> {:a,_} -> false
...> _ -> true
...> end)
[a: 1]
iex> [a: 1, b: 2, c: 3] |> opts_reject!(:not_a_function)
** (ArgumentError) expected valid tuple predicate; got: :not_a_function
iex> :not_opts |> opts_reject!(&(&1))
** (ArgumentError) expected valid derivable opts; got: :not_opts
"""
@spec opts_reject!(any, any) :: opts | no_return
def opts_reject!(opts, dict) do
with {:ok, opts} <- opts |> opts_reject(dict) do
opts
else
{:error, error} -> raise error
end
end
@doc ~S"""
`opts_predicate/2` takes a *derivable opts*, together with a *tuple predicate* and returns `{:ok, opts}` if all the 2tuples pass the *tuple predicate*.
If any of the *opts* fail the *tuple predicate*, {:error, error} is returned where `error` will be a `KeyError` whose `key` field contains all the keys that failed.
## Examples
iex> [a: 1, b: 2, c: 3] |> opts_predicate(&(&1))
{:ok, [a: 1, b: 2, c: 3]}
iex> [a: 1, b: 2, c: 3] |> opts_predicate(fn {_k,v} -> v |> is_integer end)
{:ok, [a: 1, b: 2, c: 3]}
iex> [a: 1, b: 2, c: 3] |> opts_predicate(
...> fn
...> {:a,_} -> true
...> _ -> false
...> end)
{:error, %KeyError{key: [:b, :c], term: [a: 1, b: 2, c: 3]}}
iex> {:error, error} = [a: 1, b: 2, c: 3] |> opts_predicate(:not_a_function)
...> match?(%ArgumentError{message: "expected valid tuple predicate; got: :not_a_function"}, error)
true
iex> {:error, error} = :not_opts |> opts_predicate(&(&1))
...> match?(%ArgumentError{message: "expected valid derivable opts; got: :not_opts"}, error)
true
"""
@spec opts_predicate(any, any) :: {:ok, opts} | {:error, error}
def opts_predicate(opts, fun_pred) when is_function(fun_pred,1) do
with {:ok, norm_opts} <- opts |> opts_normalise do
norm_opts
|> Enum.split_with(fun_pred)
|> case do
{pass_opts, []} -> {:ok, pass_opts}
{_pass_opts, fail_opts} ->
# build a KeyError with failing keys
fail_keys = fail_opts |> Keyword.keys |> Enum.uniq
{:error, %KeyError{key: fail_keys, term: opts}}
end
else
{:error, _} = result -> result
end
end
def opts_predicate(_opts, fun_pred) do
new_error_result(m: "expected valid tuple predicate", v: fun_pred)
end
@doc ~S"""
`opts_predicate!/2` calls `opts_predicate/2` and if the result is `{:ok, opts}` returns `opts`.
## Examples
iex> [a: 1, b: 2, c: 3] |> opts_predicate(&(&1))
{:ok, [a: 1, b: 2, c: 3]}
iex> [a: 1, b: 2, c: 3] |> opts_predicate(fn {_k,v} -> v |> is_integer end)
{:ok, [a: 1, b: 2, c: 3]}
iex> [a: 1, b: 2, c: 3] |> opts_predicate(
...> fn
...> {:a,_} -> true
...> _ -> false
...> end)
{:error, %KeyError{key: [:b, :c], term: [a: 1, b: 2, c: 3]}}
iex> [a: 1, b: 2, c: 3] |> opts_predicate!(:not_a_function)
** (ArgumentError) expected valid tuple predicate; got: :not_a_function
iex> :not_opts |> opts_predicate!(&(&1))
** (ArgumentError) expected valid derivable opts; got: :not_opts
"""
@spec opts_predicate!(any, any) :: opts | no_return
def opts_predicate!(opts, dict) do
with {:ok, opts} <- opts |> opts_predicate(dict) do
opts
else
{:error, error} -> raise error
end
end
@doc ~S"""
`opts_filter_keys/2` takes a *derivable opts* and a *key spec* and returns `{:ok, opts}` where `opts` has all keys from the original *derivable opts* that appear in the *key spec*.
## Examples
iex> [a: 1, b: 2, c: 3] |> opts_filter_keys([:a, :b, :c])
{:ok, [a: 1, b: 2, c: 3]}
iex> [a: 1, b: 2, c: 3] |> opts_filter_keys([b: :want_b, c: :and_c])
{:ok, [b: 2, c: 3]}
iex> [a: 1, b: 2, c: 3] |> opts_filter_keys(%{a: 42, b: nil})
{:ok, [a: 1, b: 2]}
iex> [a: 1, b: 2, c: 3] |> opts_filter_keys(:not_a_key_spec)
{:error, %ArgumentError{message: "expected enum; got: :not_a_key_spec"}}
iex> :not_opts |> opts_filter_keys([:a, :b, :c])
{:error, %ArgumentError{message: "expected valid derivable opts; got: :not_opts"}}
"""
@spec opts_filter_keys(any, any) :: {:ok, opts} | {:error, error}
def opts_filter_keys(opts, keys)
def opts_filter_keys(opts, keys) do
with {:ok, filter_keys} <- keys |> normalise_key_spec do
filter_map = filter_keys |> Map.new(fn k -> {k, nil} end)
filter_pred = fn {k,_v} -> filter_map |> Map.has_key?(k) end
with {:ok, _filter_opts} = result <- opts |> opts_filter(filter_pred) do
result
else
{:error, _} = result -> result
end
else
{:error, _} = result -> result
end
end
@doc ~S"""
`opts_filter_keys!/2` calls `opts_filter_keys/2` and if the result is `{:ok, opts}`, returns `opts`.
## Examples
iex> [a: 1, b: 2, c: 3] |> opts_filter_keys!([:a, :b, :c])
[a: 1, b: 2, c: 3]
iex> [a: 1, b: 2, c: 3] |> opts_filter_keys!([b: :want_b, c: :and_c])
[b: 2, c: 3]
iex> [a: 1, b: 2, c: 3] |> opts_filter_keys!(%{a: 42, b: nil})
[a: 1, b: 2]
iex> [a: 1, b: 2, c: 3] |> opts_filter_keys!(:not_a_key_spec)
** (ArgumentError) expected enum; got: :not_a_key_spec
iex> :not_opts |> opts_filter_keys!([:a, :b, :c])
** (ArgumentError) expected valid derivable opts; got: :not_opts
"""
@spec opts_filter_keys!(any, any) :: opts | no_return
def opts_filter_keys!(opts, keys) do
case opts_filter_keys(opts, keys) do
{:ok, opts} -> opts
{:error, error} -> raise error
end
end
@doc ~S"""
`opts_reject_keys/2` takes a *derivable opts* and a *key spec* and returns `{:ok, opts}` where `opts` has all keys from the original *derivable opts* that **do not** appear in the *key spec*.
## Examples
iex> [a: 1, b: 2, c: 3] |> opts_reject_keys([:a, :b, :c])
{:ok, []}
iex> [a: 1, b: 2, c: 3] |> opts_reject_keys([b: "b value", c: :dont_want_this_key])
{:ok, [a: 1]}
iex> [a: 1, b: 2, c: 3] |> opts_reject_keys(%{a: nil, b: nil})
{:ok, [c: 3]}
iex> [a: 1, b: 2, c: 3] |> opts_reject_keys(:not_a_key_spec)
{:error, %ArgumentError{message: "expected enum; got: :not_a_key_spec"}}
iex> :not_opts |> opts_reject_keys([:a, :b, :c])
{:error, %ArgumentError{message: "expected valid derivable opts; got: :not_opts"}}
"""
@spec opts_reject_keys(any, any) :: {:ok, opts} | {:error, error}
def opts_reject_keys(opts, keys)
def opts_reject_keys(opts, keys) do
with {:ok, reject_keys} <- keys |> normalise_key_spec do
reject_map = reject_keys |> Map.new(fn k -> {k, nil} end)
reject_pred = fn {k,_v} -> reject_map |> Map.has_key?(k) end
with {:ok, _reject_opts} = result <- opts |> opts_reject(reject_pred) do
result
else
{:error, _} = result -> result
end
else
{:error, _} = result -> result
end
end
@doc ~S"""
`opts_reject_keys!/2` calls `opts_reject_keys/2` and if the result is `{:ok, opts}`, returns `opts`.
## Examples
iex> [a: 1, b: 2, c: 3] |> opts_reject_keys!([:a, :b, :c])
[]
iex> [a: 1, b: 2, c: 3] |> opts_reject_keys!([b: "b value", c: :dont_want_this_key])
[a: 1]
iex> [a: 1, b: 2, c: 3] |> opts_reject_keys!(%{a: nil, b: nil})
[c: 3]
iex> [a: 1, b: 2, c: 3] |> opts_reject_keys!(:not_a_key_spec)
** (ArgumentError) expected enum; got: :not_a_key_spec
iex> :not_opts |> opts_reject_keys!([:a, :b, :c])
** (ArgumentError) expected valid derivable opts; got: :not_opts
"""
@spec opts_reject_keys!(any, any) :: opts | no_return
def opts_reject_keys!(opts, keys) do
case opts_reject_keys(opts, keys) do
{:ok, opts} -> opts
{:error, error} -> raise error
end
end
@doc ~S"""
`opts_fetch_key_values/2` takes a *derivable opts* and a *key* and returns the values of the *key* as `{:ok, values}` where `values` will be a list.
One of more indices can be provided to select the values at specific indices; the default is to return all values from `Keyword.get_values/2`.
Note indices *must* be relative to the result of `Keyword.get_values/2` **not** the indices of the original *opts*
Values are returned in the same order as the indices. Indices may be repeated. Indices are validated; asking for an unknown/impossible index will cause an error.
## Examples
The default is to return *all* the values for a *key* i.e same as `Keyword.get_values/2`:
iex> [a: 1, b: 2, c: 3] |> opts_fetch_key_values(:a)
{:ok, [1]}
This examples show multiple values being returned:
iex> [a: 11, b: 2, a: 12, c: 3, a: 13] |> opts_fetch_key_values(:a)
{:ok, [11, 12, 13]}
Here the last value for the key is returned:
iex> [a: 11, b: 2, a: 12, c: 3, a: 13] |> opts_fetch_key_values(:a, -1)
{:ok, [13]}
Here only the first value is wanted:
iex> [a: 11, b: 2, a: 12, c: 3, a: 13] |> opts_fetch_key_values(:a, 0)
{:ok, [11]}
Values at different indices:
iex> [a: 11, b: 2, a: 12, c: 3, a: 13] |> opts_fetch_key_values(:a, [1,-1])
{:ok, [12, 13]}
Note order of the values is same order as the indices:
iex> [a: 11, b: 2, a: 12, c: 3, a: 13] |> opts_fetch_key_values(:a, [-2,0,-1])
{:ok, [12, 11, 13]}
The same index/indices can be repeated:
iex> [a: 11, b: 2, a: 12, c: 3, a: 13] |> opts_fetch_key_values(:a, [-2,0,-2,-1,0])
{:ok, [12, 11, 12, 13, 11]}
Indices are validated:
iex> [a: 11, b: 2, a: 12, c: 3, a: 13] |> opts_fetch_key_values(:a, 99)
{:error, %ArgumentError{message: "index invalid; got: 99"}}
iex> [a: 11, b: 2, a: 12, c: 3, a: 13] |> opts_fetch_key_values(:a, :not_an_index)
{:error, %ArgumentError{message: "index invalid; got: :not_an_index"}}
"""
@spec opts_fetch_key_values(any, any) :: {:ok, opts} | {:error, error}
def opts_fetch_key_values(opts, key, indicies \\ nil)
def opts_fetch_key_values(opts, key, nil) do
with {:ok, norm_opts} <- opts |> opts_normalise,
true <- norm_opts |> Keyword.has_key?(key) do
{:ok, norm_opts |> Keyword.get_values(key)}
else
false -> new_key_error_result(key, opts)
{:error, _} = result -> result
end
end
def opts_fetch_key_values(opts, key, indices) do
with {:ok, norm_opts} <- opts |> opts_normalise,
true <- norm_opts |> Keyword.has_key?(key) do
key_values = norm_opts |> Keyword.get_values(key)
with {:ok, indices} <- key_values |> opts_indices_validate(indices) do
indices
|> List.wrap
|> Enum.reduce({[],[]},
fn index, {known_values,missing_indices} ->
with {:ok, value} <- key_values |> Enum.fetch(index) do
{[value | known_values], missing_indices}
else
:error -> {known_values, [index | missing_indices]}
end
end)
|> case do
{known_values, []} -> {:ok, known_values |> Enum.reverse}
{_known_values, missing_indices} ->
new_key_error_result(missing_indices, key_values)
end
else
{:error, _} = result -> result
end
else
false -> new_key_error_result(key, opts)
{:error, _} = result -> result
end
end
@doc ~S"""
`opts_fetch_key_values!/2` takes a *derivable opts* and a *key* and returns the values of the *key* as `{:ok, values}`.
One of more indices can be provided to select the values at specific indices; the default is to return all values from `Keyword.get_values/2`.
Note indices *must* be relative to the result of `Keyword.get_values/2` **not** the indices of the original *opts*
Values are returned in the order they are given in the indices. Indices may be repeated.
## Examples
The default is to return *all* the values for a *key*:
iex> [a: 1, b: 2, c: 3] |> opts_fetch_key_values!(:a)
[1]
This examples show multiple values being returned:
iex> [a: 11, b: 2, a: 12, c: 3, a: 13] |> opts_fetch_key_values!(:a)
[11, 12, 13]
Here the last value for the key is returned:
iex> [a: 11, b: 2, a: 12, c: 3, a: 13] |> opts_fetch_key_values!(:a, -1)
[13]
Here the first value is wanted:
iex> [a: 11, b: 2, a: 12, c: 3, a: 13] |> opts_fetch_key_values!(:a, 0)
[11]
Values at different indices:
iex> [a: 11, b: 2, a: 12, c: 3, a: 13] |> opts_fetch_key_values!(:a, [1,-1])
[12, 13]
Note order of the values is same as order of the indices
iex> [a: 11, b: 2, a: 12, c: 3, a: 13] |> opts_fetch_key_values!(:a, [-2,0,-1])
[12, 11, 13]
The same index/indices can be requested more than once:
iex> [a: 11, b: 2, a: 12, c: 3, a: 13] |> opts_fetch_key_values!(:a, [-2,0,-2,-1,0])
[12, 11, 12, 13, 11]
Indices are validated:
iex> [a: 11, b: 2, a: 12, c: 3, a: 13] |> opts_fetch_key_values!(:a, :not_an_index)
** (ArgumentError) index invalid; got: :not_an_index
"""
@spec opts_fetch_key_values!(any, any, any) :: list | no_return
def opts_fetch_key_values!(opts, key, indices \\ nil)
def opts_fetch_key_values!(opts, key, indices) do
case opts_fetch_key_values(opts, key, indices) do
{:ok, values} -> values
{:error, error} -> raise error
end
end
@doc ~S"""
`canon_keys!/2` takes a *key list* together with a lookup dictionary and replaces each key with its canonical value from the dictionary. Unknown keys raise a `KeyError`.
## Examples
iex> [:a, :b, :c] |> canon_keys!(%{a: 1, b: 2, c: 3})
[1,2,3]
iex> [:x] |> canon_keys!(%{a: 1, b: 2, c: 3})
** (KeyError) key :x not found in: %{a: 1, b: 2, c: 3}
"""
@spec canon_keys!(alias_keys, dict) :: alias_keys | no_return
def canon_keys!(keys, dict) when is_map(dict) do
keys |> Enum.map(fn k -> dict |> Map.fetch!(k) end)
end
@doc ~S"""
`canon_keys/2` takes a *key list* together with a lookup dictionary and replaces each key with its canonical value from the dictionary, returning `{:ok, canon_keys}`.
If there are any unknown keys, `{:error, {canon_known_keys, unknown_keys}}` will be returned.
## Examples
iex> [:a, :b, :c] |> canon_keys(%{a: 1, b: 2, c: 3})
{:ok, [1,2,3]}
iex> [:a, :x, :b, :y, :c, :z] |> canon_keys(%{a: 1, b: 2, c: 3})
{:error, {[1, 2, 3], [:x, :y, :z]}}
"""
@spec canon_keys(alias_keys, dict) :: {:ok, alias_keys} | {:error, error}
def canon_keys(keys, dict) when is_map(dict) do
keys
# split into known and unknown keys
|> Enum.split_with(fn k -> Map.has_key?(dict, k) end)
|> case do
# no unknown keys
{known_keys, []} ->
{:ok, known_keys |> canon_keys!(dict)}
{known_keys, unknown_keys} ->
{:error, {known_keys |> canon_keys!(dict), unknown_keys}}
end
end
@doc ~S"""
`canonical_keys/2` takes a *key list* and *key alias dict* and replaces each key with its canonical value from the dictionary, returning `{:ok, canonical_keys}`.
If there are any unknown keys `{:error, error}`, where `error` is a `KeyError`, will be returned.
## Examples
iex> [:a, :b, :c] |> canonical_keys(%{a: :p, b: :q, c: :r})
{:ok, [:p,:q,:r]}
iex> [:a, :b, :c] |> canonical_keys(%{a: 1, b: 2, c: 3})
{:ok, [1,2,3]}
iex> [:a, :x, :b, :y, :c, :z] |> canonical_keys(%{a: 1, b: 2, c: 3})
{:error, %KeyError{key: [:x, :y, :z], term: %{a: 1, b: 2, c: 3}}}
"""
@spec canonical_keys(alias_keys, any) :: {:ok, alias_keys} | {:error, error}
def canonical_keys(keys, dict) do
with {:ok, keys} <- keys |> normalise_key_list,
{:ok, dict} <- dict |> normalise_key_dict do
keys
|> Enum.reject(fn k -> Map.has_key?(dict, k) end)
|> case do
# no unknown keys
[] ->
canon_keys = keys |> Enum.map(fn k -> dict |> Map.get(k) end)
{:ok, canon_keys}
unknown_keys ->
unknown_keys |> new_key_error_result(dict)
end
else
{:error, _} = result -> result
end
end
@doc ~S"""
`canonical_keys!/2` calls `canonical_keys/2` and if the result is `{:ok, canonical_keys}` returns `canonical_keys`.
## Examples
iex> [:a, :b, :c] |> canonical_keys!(%{a: :p, b: :q, c: :r})
[:p,:q,:r]
iex> [:a, :b, :c] |> canonical_keys!(%{a: 1, b: 2, c: 3})
[1,2,3]
iex> [:a, :x, :b, :y, :c, :z] |> canonical_keys!(%{a: 1, b: 2, c: 3})
** (KeyError) key [:x, :y, :z] not found in: %{a: 1, b: 2, c: 3}
"""
@spec canonical_keys!(alias_keys, dict) :: alias_keys | no_return
def canonical_keys!(keys, dict) do
with {:ok, keys} <- keys |> canonical_keys(dict) do
keys
else
{:error, error} -> raise error
end
end
@doc ~S"""
`canonical_key/2` takes a key together with a *key dict* and replaces the key with its canonical value from the dictionary, returning `{:ok, canonical_key}`.
If the key is unknown, `{:error, error}`, `error` is a `KeyError`, will be returned.
## Examples
iex> :b |> canonical_key(%{a: :p, b: :q, c: :r})
{:ok, :q}
iex> :a |> canonical_key(%{a: 1, b: 2, c: 3})
{:ok, 1}
iex> :x |> canonical_key(%{a: 1, b: 2, c: 3})
{:error, %KeyError{key: :x, term: %{a: 1, b: 2, c: 3}}}
"""
@spec canonical_key(alias_key, any) :: {:ok, alias_key} | {:error, error}
def canonical_key(key, dict) do
with {:ok, dict} <- dict |> normalise_key_dict,
{:ok, keys} <- [key] |> canonical_keys(dict) do
{:ok, keys |> hd}
else
{:error, %KeyError{} = error} -> {:error, error |> struct!(key: key)}
{:error, _} = result -> result
end
end
@doc ~S"""
`canonical_key!/2` calls `canonical_key/2` and if the result is `{:ok, canonical_key}` returns `canonical_key`.
## Examples
iex> :a |> canonical_key!(%{a: 1, b: 2, c: 3})
1
iex> :b |> canonical_key!(%{a: :p, b: :q, c: :r})
:q
iex> :x |> canonical_key!(%{a: 1, b: 2, c: 3})
** (KeyError) key :x not found in: %{a: 1, b: 2, c: 3}
"""
@spec canonical_key!(alias_key, dict) :: alias_key | no_return
def canonical_key!(key, dict) do
with {:ok, key} <- key |> canonical_key(dict) do
key
else
{:error, error} -> raise error
end
end
@doc ~S"""
`maybe_canon_keys/2` takes a *key list* together with a lookup dictionary and, if the key is in the dictionary, replaces it with its value. Unknown keys are passed through unchanged.
## Examples
iex> [:a, :b, :c] |> maybe_canon_keys(%{a: 1, b: 2, c: 3})
[1, 2, 3]
iex> [:x, :a] |> maybe_canon_keys(%{a: 1, b: 2, c: 3})
[:x, 1]
"""
@spec maybe_canon_keys(alias_keys, dict) :: alias_keys
def maybe_canon_keys(keys, dict) when is_map(dict) do
keys
|> Enum.map(fn k ->
case dict |> Map.has_key?(k) do
true -> dict |> Map.fetch!(k)
_ -> k
end
end)
end
@doc ~S"""
`list_wrap_flat_just/1` wraps a value (if not already a list), flattens and removes `nils` at the *first / top* level.
## Examples
iex> [{:a, 1}, nil, [{:b1, 12}, nil, {:b2, [nil, 22, nil]}], nil, {:c, 3}] |> list_wrap_flat_just
[a: 1, b1: 12, b2: [nil, 22, nil], c: 3]
iex> [[[nil, 42, nil]]] |> list_wrap_flat_just
[42]
"""
@spec list_wrap_flat_just(any) :: [any]
def list_wrap_flat_just(value) do
value
|> List.wrap
|> List.flatten
|> Enum.reject(&is_nil/1)
end
@doc ~S"""
`list_wrap_flat_just_uniq/1` wraps a value (if not already a list), flattens, removes `nils` at
the *first / top* level, and deletes duplicates (using `Enum.uniq/1`)
## Examples
iex> [{:a, 1}, nil, [{:b1, 12}, nil, {:b2, [nil, 22, nil]}], nil, {:c, 3}, {:a, 1}, {:b1, 12}] |> list_wrap_flat_just_uniq
[a: 1, b1: 12, b2: [nil, 22, nil], c: 3]
iex> [nil, [42, [42, 42, nil]], 42] |> list_wrap_flat_just_uniq
[42]
"""
@spec list_wrap_flat_just_uniq(any) :: [any]
def list_wrap_flat_just_uniq(value) do
value
|> List.wrap
|> List.flatten
|> Enum.reject(&is_nil/1)
|> Enum.uniq
end
defp normalise_error_message(value)
defp normalise_error_message(value) when is_binary(value) do
value
end
defp normalise_error_message(value) when is_atom(value) do
value |> to_string
end
defp normalise_error_message(value) do
value |> inspect
end
defp normalise_error_value(value)
defp normalise_error_value(value) do
"got: #{value |> inspect}"
end
defp new_error_result(opts)
defp new_error_result(opts) do
message = [
m: &normalise_error_message/1,
v: &normalise_error_value/1,
]
|> Enum.reduce([],fn {k,fun}, texts ->
opts
|> Keyword.has_key?(k)
|> case do
true ->
text = opts |> Keyword.get(k) |> fun.()
[text | texts]
_ -> texts
end
end)
|> Enum.reverse
|> Enum.join("; ")
{:error, %ArgumentError{message: message}}
end
defp new_key_error(values, term) do
cond do
Keyword.keyword?(values) -> values |> Keyword.keys
is_list(values) -> values
true -> raise ArgumentError, message: "expected opts or keys; got: #{inspect values}"
end
|> Enum.uniq
|> case do
[key] -> %KeyError{key: key, term: term}
keys -> %KeyError{key: keys, term: term}
end
end
defp new_key_error_result(values, term) do
{:error, new_key_error(values, term)}
end
end
|
lib/option/utility.ex
| 0.867401 | 0.683736 |
utility.ex
|
starcoder
|
defmodule ExRtmidi.Input do
@moduledoc """
Contains methods that initialize and interface with input ports.
Things of note:
- init/1 should not be called frequently. Ideally, it should be called once at app boot (see comments)
- If you add a listener, do so before opening the port (per RtMidi C++ docs)
- Listeners work really well with GenServers - see main README for an example
"""
alias ExRtmidi.Nifs.Input, as: InputNif
# TODO: while most other methods take 10-20 usec, this takes about 10 ms
# The max recommended execution time for a NIF fn is 1ms, but this may be workable
# due to the infrequency that it would likely be called.
# Normal use case would be to call this one time but send messages frequently
@doc """
Creates an RtMidi input instance under the specified name.
The name you pass will be used to reference this instance going forward.
"""
@spec init(atom()) :: {:error, any()} | {:ok, atom()}
def init(instance_name) when is_atom(instance_name) do
result =
instance_name
|> Atom.to_charlist()
|> InputNif.init()
case result do
:ok -> {:ok, instance_name}
{:error, msg} -> {:error, msg}
end
end
@doc """
Returns the count of available input ports for a given RtMidi instance
"""
@spec get_port_count(atom()) :: {:error, any()} | {:ok, integer()}
def get_port_count(instance_name) when is_atom(instance_name) do
instance_name
|> Atom.to_charlist()
|> InputNif.get_port_count()
end
@doc """
Returns the name of a port for a given instance and port index
"""
@spec get_port_name(atom(), integer()) :: {:error, any()} | {:ok, String.t()}
def get_port_name(instance_name, port_idx)
when is_atom(instance_name) and is_integer(port_idx) do
formatted_instance_name = Atom.to_charlist(instance_name)
case InputNif.get_port_name(formatted_instance_name, port_idx) do
{:error, msg} -> {:error, msg}
{:ok, instance_name} -> {:ok, List.to_string(instance_name)}
end
end
@doc """
Does the same as get_port_name/2 but gets a lot more upset if the specified index doesn't exist
"""
@spec get_port_name!(atom(), integer()) :: String.t()
def get_port_name!(instance_name, port_idx)
when is_atom(instance_name) and is_integer(port_idx) do
{:ok, port_name} = get_port_name(instance_name, port_idx)
port_name
end
@doc """
Returns a list of port names for a given instance
"""
@spec get_ports(atom()) :: {:error, any()} | {:ok, list()}
def get_ports(instance_name) when is_atom(instance_name) do
case get_port_count(instance_name) do
{:ok, port_count} ->
# bang method should be safe here because we've ensured that `n` ports exist
port_names =
Enum.map(0..(port_count - 1), fn idx -> get_port_name!(instance_name, idx) end)
{:ok, port_names}
{:error, msg} ->
{:error, msg}
end
end
@doc """
Opens a given port by index or name on the given instance
Maximum of one open port per instance
"""
@spec open_port(atom(), integer()) :: {:error, any()} | :ok
def open_port(instance_name, port_idx) when is_atom(instance_name) and is_integer(port_idx) do
instance_name
|> Atom.to_charlist()
|> InputNif.open_port(port_idx)
end
def open_port(instance_name, port_name) when is_atom(instance_name) and is_binary(port_name) do
case get_ports(instance_name) do
{:ok, port_names} ->
port_index = Enum.find_index(port_names, fn port -> port == port_name end)
case port_index do
nil -> {:error, :port_not_found}
_ -> open_port(instance_name, port_index)
end
{:error, msg} ->
{:error, msg}
end
end
@doc """
Closes the open port on the given instance, if it exists. Safe to call even if no port is open
"""
@spec close_port(atom()) :: {:error, any()} | :ok
def close_port(instance_name) when is_atom(instance_name) do
instance_name
|> Atom.to_charlist()
|> InputNif.close_port()
end
@doc """
Takes a PID of a process that can listen to input events for a given instance
Upon receipt of an incoming message the data is passed async to the provided process
"""
@spec attach_listener(atom(), pid()) :: {:error, any()} | :ok
def attach_listener(instance_name, pid) when is_atom(instance_name) do
instance_name
|> Atom.to_charlist()
|> InputNif.attach_listener(pid)
end
@doc """
Removes the listener for a given instance
Prints a warning to console if no listener exists (from RtMidi itself), but doesn't blow up. This may change in future
"""
@spec detach_listener(atom()) :: {:error, any()} | :ok
def detach_listener(instance_name) when is_atom(instance_name) do
instance_name
|> Atom.to_charlist()
|> InputNif.detach_listener()
end
end
|
lib/ex_rtmidi/input.ex
| 0.532911 | 0.42173 |
input.ex
|
starcoder
|
defmodule Cadet.Assessments.Query do
@moduledoc """
Generate queries related to the Assessments context
"""
import Ecto.Query
alias Cadet.Assessments.{Answer, Assessment, Question, Submission}
@doc """
Returns a query with the following bindings:
[submissions_with_xp_and_grade, answers]
"""
@spec all_submissions_with_xp_and_grade :: Ecto.Query.t()
def all_submissions_with_xp_and_grade do
Submission
|> join(:inner, [s], q in subquery(submissions_xp_and_grade()), s.id == q.submission_id)
|> select([s, q], %Submission{
s
| xp: q.xp,
xp_adjustment: q.xp_adjustment,
grade: q.grade,
adjustment: q.adjustment
})
end
@doc """
Returns a query with the following bindings:
[submissions_with_xp, answers]
"""
@spec all_submissions_with_xp :: Ecto.Query.t()
def all_submissions_with_xp do
Submission
|> join(:inner, [s], q in subquery(submissions_xp()), s.id == q.submission_id)
|> select([s, q], %Submission{s | xp: q.xp, xp_adjustment: q.xp_adjustment})
end
@doc """
Returns a query with the following bindings:
[submissions_with_grade, answers]
"""
@spec all_submissions_with_grade :: Ecto.Query.t()
def all_submissions_with_grade do
Submission
|> join(:inner, [s], q in subquery(submissions_grade()), s.id == q.submission_id)
|> select([s, q], %Submission{s | grade: q.grade, adjustment: q.adjustment})
end
@doc """
Returns a query with the following bindings:
[assessments_with_xp_and_grade, questions]
"""
@spec all_assessments_with_max_xp_and_grade :: Ecto.Query.t()
def all_assessments_with_max_xp_and_grade do
Assessment
|> join(:inner, [a], q in subquery(assessments_max_xp_and_grade()), a.id == q.assessment_id)
|> select([a, q], %Assessment{a | max_grade: q.max_grade, max_xp: q.max_xp})
end
@doc """
Returns a query with the following bindings:
[assessments_with_grade, questions]
"""
@spec all_assessments_with_max_grade :: Ecto.Query.t()
def all_assessments_with_max_grade do
Assessment
|> join(:inner, [a], q in subquery(assessments_max_grade()), a.id == q.assessment_id)
|> select([a, q], %Assessment{a | max_grade: q.max_grade})
end
@spec submissions_xp_and_grade :: Ecto.Query.t()
def submissions_xp_and_grade do
Answer
|> group_by(:submission_id)
|> select([a], %{
submission_id: a.submission_id,
grade: sum(a.grade),
adjustment: sum(a.adjustment),
xp: sum(a.xp),
xp_adjustment: sum(a.xp_adjustment)
})
end
@spec submissions_grade :: Ecto.Query.t()
def submissions_grade do
Answer
|> group_by(:submission_id)
|> select([a], %{
submission_id: a.submission_id,
grade: sum(a.grade),
adjustment: sum(a.adjustment)
})
end
@spec submissions_xp :: Ecto.Query.t()
def submissions_xp do
Answer
|> group_by(:submission_id)
|> select([a], %{
submission_id: a.submission_id,
xp: sum(a.xp),
xp_adjustment: sum(a.xp_adjustment)
})
end
@spec assessments_max_grade :: Ecto.Query.t()
def assessments_max_grade do
Question
|> group_by(:assessment_id)
|> select([q], %{assessment_id: q.assessment_id, max_grade: sum(q.max_grade)})
end
@spec assessments_max_xp_and_grade :: Ecto.Query.t()
def assessments_max_xp_and_grade do
Question
|> group_by(:assessment_id)
|> select([q], %{
assessment_id: q.assessment_id,
max_grade: sum(q.max_grade),
max_xp: sum(q.max_xp)
})
end
end
|
lib/cadet/assessments/query.ex
| 0.833697 | 0.466724 |
query.ex
|
starcoder
|
defmodule Livebook.Notebook.Explore.Utils do
@moduledoc false
@doc """
Defines a module attribute `attr` with notebook info.
"""
defmacro defnotebook(attr, props) do
quote bind_quoted: [attr: attr, props: props] do
{path, notebook_info} = Livebook.Notebook.Explore.Utils.fetch_notebook!(attr, props)
@external_resource path
Module.put_attribute(__MODULE__, attr, notebook_info)
end
end
def fetch_notebook!(attr, props) do
name = Atom.to_string(attr)
path = Path.join([__DIR__, "explore", name <> ".livemd"])
markdown = File.read!(path)
# Parse the file to ensure no warnings and read the title.
# However, in the info we keep just the file contents to save on memory.
{notebook, []} = Livebook.LiveMarkdown.Import.notebook_from_markdown(markdown)
images =
props
|> Keyword.get(:image_names, [])
|> Map.new(fn image_name ->
path = Path.join([__DIR__, "explore", "images", image_name])
content = File.read!(path)
{image_name, content}
end)
notebook_info = %{
slug: String.replace(name, "_", "-"),
livemd: markdown,
title: notebook.name,
description: Keyword.fetch!(props, :description),
image_url: Keyword.fetch!(props, :image_url),
images: images
}
{path, notebook_info}
end
end
defmodule Livebook.Notebook.Explore do
@moduledoc false
defmodule NotFoundError do
@moduledoc false
defexception [:slug, plug_status: 404]
def message(%{slug: slug}) do
"could not find an example notebook matching #{inspect(slug)}"
end
end
import Livebook.Notebook.Explore.Utils
defnotebook(:intro_to_livebook,
description: "Get to know Livebook, see how it works and explore its features.",
image_url: "/images/logo.png"
)
defnotebook(:distributed_portals_with_elixir,
description:
"A fast-paced introduction to the Elixir language by building distributed data-transfer portals.",
image_url: "/images/portals.png"
)
defnotebook(:elixir_and_livebook,
description: "Learn how to use some of Elixir and Livebook's unique features together.",
image_url: "/images/elixir.png"
)
defnotebook(:intro_to_nx,
description:
"Enter numerical Elixir, experience the power of multi-dimensional arrays of numbers.",
image_url: "/images/nx.png"
)
defnotebook(:intro_to_axon,
description: "Build Neural Networks in Elixir using a high-level, composable API.",
image_url: "/images/axon.png"
)
defnotebook(:intro_to_vega_lite,
description: "Learn how to quickly create numerous plots for your data.",
image_url: "/images/vega_lite.png"
)
@type notebook_info :: %{
slug: String.t(),
livemd: String.t(),
title: String.t(),
description: String.t(),
image_url: String.t(),
images: images()
}
@type images :: %{String.t() => binary()}
@doc """
Returns a list of example notebooks with metadata.
"""
@spec notebook_infos() :: list(notebook_info())
def notebook_infos() do
[
@intro_to_livebook,
@distributed_portals_with_elixir,
@elixir_and_livebook,
@intro_to_vega_lite
# @intro_to_nx, @intro_to_axon,
]
end
@doc """
Finds explore notebook by slug and returns the parsed data structure.
Returns the notebook along with the images it uses as preloaded binaries.
"""
@spec notebook_by_slug!(String.t()) :: {Livebook.Notebook.t(), images()}
def notebook_by_slug!(slug) do
notebook_infos()
|> Enum.find(&(&1.slug == slug))
|> case do
nil ->
raise NotFoundError, slug: slug
notebook_info ->
{notebook, []} = Livebook.LiveMarkdown.Import.notebook_from_markdown(notebook_info.livemd)
{notebook, notebook_info.images}
end
end
end
|
lib/livebook/notebook/explore.ex
| 0.860296 | 0.610279 |
explore.ex
|
starcoder
|
defmodule Geometrics.OpenTelemetry.Logger do
@moduledoc """
Capture crashes and exits with open spans. This helps to capture problems where a
system is overloaded or under-optimized, and timeouts occur. Timeout errors (in Ecto,
or when `call`ing into GenServers) typically trigger `exit` instead of raising exceptions.
Frameworks such as `Phoenix` and `Phoenix.LiveView` use `:telemetry`, with wrappers that
rescue and reraise exceptions, but which do not catch exits. For this reason, exits and
other timeout errors can interrupt application tracing, since spans opened in processes
may otherwise never be closed, and therefore never be exported.
This module requires that when a span is opened, it be
References:
* https://github.com/elixir-lang/elixir/blob/v1.11.3/lib/logger/lib/logger/backends/console.ex
"""
@behaviour :gen_event
defstruct level: nil
@doc """
Given an otel span context, ensure that it is saved in the Logger metadata for the current process.
If the process crashes or exits, the custom logger defined by this file will receive an error event,
and can send telemetry to indicate that the span should be closed.
"""
def track_span_ctx(span_ctx) do
spans =
Logger.metadata()
|> Keyword.get(:ot_spans, [])
|> List.insert_at(0, span_ctx)
Logger.metadata(ot_spans: spans)
span_ctx
end
@doc """
When ending a span, it no longer needs to be tracked by the Logger.
"""
def pop_span_ctx() do
spans =
Logger.metadata()
|> Keyword.get(:ot_spans, [])
|> case do
[_span | rest] -> rest
[] -> []
end
Logger.metadata(ot_spans: spans)
spans
end
def init(__MODULE__), do: init({__MODULE__, :geometrics_logger})
def init({__MODULE__, config_name}) do
config = Application.get_env(:logger, config_name, [])
{:ok, new(config, %__MODULE__{})}
end
def handle_call({:configure, _options}, state) do
{:ok, :ok, state}
end
def handle_event({:info, _, _}, state), do: {:ok, state}
def handle_event({:error, _pid, {Logger, _, _timestamp, metadata}}, state) do
case Keyword.get(metadata, :crash_reason) do
nil ->
{:ok, state}
{crash_reason, stacktrace} ->
case Keyword.get(metadata, :ot_spans) do
nil ->
{:ok, state}
spans when is_list(spans) ->
:telemetry.execute([:geometrics, :open_telemetry, :exit], %{}, %{
spans: spans,
reason: crash_reason,
stacktrace: stacktrace
})
{:ok, state}
end
end
{:ok, state}
end
def handle_event({level, _gl, _thing} = _event, state) do
%{level: log_level} = state
if meet_level?(level, log_level) do
{:ok, state}
else
{:ok, state}
end
end
def handle_event(:flush, state) do
{:ok, flush(state)}
end
def handle_event(_msg, state) do
{:ok, state}
end
def handle_info({Logger.Config, :update_counter}, state), do: {:ok, state}
def handle_info({:io_reply, _ref, _msg}, state), do: {:ok, state}
def handle_info({:EXIT, _pid, _reason}, state), do: {:ok, state}
defp flush(state), do: state
defp meet_level?(_lvl, nil), do: true
defp meet_level?(lvl, min), do: Logger.compare_levels(lvl, min) != :lt
defp new(config, state) do
level = Keyword.get(config, :level, :error)
%{
state
| level: level
}
end
end
|
lib/geometrics/open_telemetry/logger.ex
| 0.847005 | 0.487612 |
logger.ex
|
starcoder
|
defmodule DBConnection.Sojourn do
@moduledoc """
A `DBConnection.Pool` using sbroker.
### Options
* `:pool_size` - The number of connections (default: `10`)
* `:broker` - The sbroker callback module (see `:sbroker`,
default: `DBConnection.Sojourn.Timeout`)
* `:broker_start_opts` - Start options for the broker (see
`:sbroker`, default: `[]`)
* `:max_restarts` - the maximum amount of connection restarts allowed in a
time frame (default `3`)
* `:max_seconds` - the time frame in which `:max_restarts` applies (default
`5`)
* `:shutdown` - the shutdown strategy for connections (default `5_000`)
All options are passed as the argument to the sbroker callback module.
"""
@behaviour DBConnection.Pool
@broker DBConnection.Sojourn.Timeout
@time_unit :micro_seconds
import Supervisor.Spec
@doc false
def start_link(mod, opts) do
apply(:sbroker, :start_link, broker_args(mod, opts))
end
@doc false
def child_spec(mod, opts, child_opts \\ []) do
worker(:sbroker, broker_args(mod, opts), child_opts)
end
@doc false
def checkout(broker, opts) do
case ask(broker, opts) do
{:go, ref, {pid, mod, state}, _, _} ->
{:ok, {pid, ref}, mod, state}
{drop, _} when drop in [:drop, :retry] ->
{:error, DBConnection.Error.exception("connection not available")}
end
end
@doc false
defdelegate checkin(ref, state, opts), to: DBConnection.Connection
@doc false
defdelegate disconnect(ref, err, state, opts), to: DBConnection.Connection
@doc false
defdelegate stop(ref, reason, state, opts), to: DBConnection.Connection
## Helpers
defp broker_args(mod, opts) do
broker = Keyword.get(opts, :broker, @broker)
start_opts = Keyword.get(opts, :broker_start_opt, [time_unit: @time_unit])
args = [__MODULE__.Broker, {broker, mod, opts}, start_opts]
case Keyword.get(opts, :name) do
nil -> args
name when is_atom(name) -> [{:local, name} | args]
name -> [name | args]
end
end
defp ask(broker, opts) do
timeout = Keyword.get(opts, :timeout, 5_000)
info = {self(), timeout}
case Keyword.get(opts, :queue, true) do
true -> :sbroker.ask(broker, info)
false -> :sbroker.nb_ask(broker, info)
end
end
end
|
throwaway/hello/deps/db_connection/lib/db_connection/sojourn.ex
| 0.838647 | 0.444565 |
sojourn.ex
|
starcoder
|
defmodule Quark.Compose do
@moduledoc ~S"""
Function composition is taking two functions, and joining them together to
create a new function. For example:
## Examples
iex> sum_plus_one = compose([&(&1 + 1), &Enum.sum/1])
...> sum_plus_one.([1,2,3])
7
In this case, we have joined `Enum.sum` with a function that adds one,
to create a new function that takes a list, sums it, and adds one.
Note that composition normally applies _from right to left_, though `Quark`
provides the opposite in the form of `*_forward` functions.
"""
import Quark.SKI
import Quark.Curry
@doc ~S"""
Function composition
## Examples
iex> sum_plus_one = compose(&(&1 + 1), &Enum.sum/1)
...> [1, 2, 3] |> sum_plus_one.()
7
"""
@spec compose(fun, fun) :: any
def compose(g, f) do
fn x ->
x
|> curry(f).()
|> curry(g).()
end
end
@doc ~S"""
Function composition, from the tail of the list to the head
## Examples
iex> sum_plus_one = compose([&(&1 + 1), &Enum.sum/1])
...> [1,2,3] |> sum_plus_one.()
7
"""
@spec compose([fun]) :: fun
def compose(funcs) when is_list(funcs), do: funcs |> List.foldr(&id/1, &compose/2)
@doc ~S"""
Infix compositon operator
## Examples
iex> sum_plus_one = fn x -> x + 1 end <|> &Enum.sum/1
...> sum_plus_one.([1,2,3])
7
iex> add_one = &(&1 + 1)
...> piped = [1, 2, 3] |> Enum.sum() |> add_one.()
...> composed = [1, 2, 3] |> ((add_one <|> &Enum.sum/1)).()
...> piped == composed
true
"""
@spec fun <|> fun :: fun
def g <|> f, do: compose(g, f)
@doc ~S"""
Function composition, from the head to tail (left-to-right)
## Examples
iex> sum_plus_one = compose_forward(&Enum.sum/1, &(&1 + 1))
...> [1, 2, 3] |> sum_plus_one.()
7
"""
@spec compose_forward(fun, fun) :: fun
def compose_forward(f, g) do
compose(g, f)
end
@doc ~S"""
Infix "forward" compositon operator
## Examples
iex> sum_plus_one = (&Enum.sum/1) <~> fn x -> x + 1 end
...> sum_plus_one.([1, 2, 3])
7
iex> x200 = (&(&1 * 2)) <~> (&(&1 * 10)) <~> (&(&1 * 10))
...> x200.(5)
1000
iex> add_one = &(&1 + 1)
...> piped = [1, 2, 3] |> Enum.sum() |> add_one.()
...> composed = [1, 2, 3] |> ((&Enum.sum/1) <~> add_one).()
...> piped == composed
true
"""
@spec fun <~> fun :: fun
def f <~> g, do: compose_forward(f, g)
@doc ~S"""
Compose functions, from the head of the list of functions.
## Examples
iex> sum_plus_one = compose_forward([&Enum.sum/1, &(&1 + 1)])
...> sum_plus_one.([1, 2, 3])
7
"""
@spec compose_forward([fun]) :: fun
def compose_forward(funcs) when is_list(funcs), do: funcs |> List.foldl(&id/1, &compose/2)
end
|
lib/quark/compose.ex
| 0.871174 | 0.685555 |
compose.ex
|
starcoder
|
defmodule BSV.VarInt do
@moduledoc """
A VarInt is an integer encoded as a variable length binary value. It is a
format used throughout Bitcoin to represent the length of binary data in a
compact form.
"""
alias BSV.Serializable
@max_int64 18_446_744_073_709_551_615
@typedoc "VarInt binary"
@type t() :: binary()
@doc """
Decodes the given VarInt binary into an integer.
Returns the result in an `:ok` / `:error` tuple pair.
## Examples
iex> BSV.VarInt.decode(<<253, 4, 1>>)
{:ok, 260}
iex> BSV.VarInt.decode(<<254, 0, 225, 245, 5>>)
{:ok, 100_000_000}
"""
@spec decode(binary()) :: {:ok, integer()} | {:error, term()}
def decode(data) when is_binary(data) do
with {:ok, int, _rest} <- parse_int(data), do: {:ok, int}
end
@doc """
Decodes the given VarInt binary into an integer.
As `decode/1` but returns the result or raises an exception.
"""
@spec decode!(binary()) :: integer()
def decode!(data) when is_binary(data) do
case decode(data) do
{:ok, int} ->
int
{:error, error} ->
raise BSV.DecodeError, error
end
end
@doc """
Returns a binary of the length specified by the VarInt in the first bytes of
the binary. Any remaining bytes are ignored.
Returns the result in an `:ok` / `:error` tuple pair.
## Examples
iex> BSV.VarInt.decode_binary(<<5, 104, 101, 108, 108, 111, 99, 99>>)
{:ok, "hello"}
"""
@spec decode_binary(binary()) :: {:ok, binary()} | {:error, term()}
def decode_binary(data) when is_binary(data) do
with {:ok, data, _rest} <- parse_data(data), do: {:ok, data}
end
@doc """
Returns a binary of the length specified by the VarInt in the first bytes of
the binary.
As `decode_binary/1` but returns the result or raises an exception.
"""
@spec decode_binary!(binary()) :: binary()
def decode_binary!(data) when is_binary(data) do
case decode_binary(data) do
{:ok, data} ->
data
{:error, error} ->
raise BSV.DecodeError, error
end
end
@doc """
Encodes the given integer into a VarInt binary.
## Examples
iex> BSV.VarInt.encode(260)
<<253, 4, 1>>
iex> BSV.VarInt.encode(100_000_000)
<<254, 0, 225, 245, 5>>
"""
@spec encode(integer()) :: binary()
def encode(int)
when is_integer(int)
and int >= 0 and int <= @max_int64
do
case int do
int when int < 254 ->
<<int::integer>>
int when int < 0x10000 ->
<<253, int::little-16>>
int when int < 0x100000000 ->
<<254, int::little-32>>
int ->
<<255, int::little-64>>
end
end
@doc """
Prepends the given binary with a VarInt representing the length of the binary.
## Examples
iex> BSV.VarInt.encode_binary("hello")
<<5, 104, 101, 108, 108, 111>>
"""
@spec encode_binary(binary()) :: binary()
def encode_binary(data)
when is_binary(data)
and byte_size(data) <= @max_int64
do
size = byte_size(data) |> encode()
size <> data
end
@doc """
Parses the given binary, returning a tuple with a binary of the length
specified by the VarInt in the first bytes of the binary, and a binary of any
remaining bytes.
## Examples
iex> BSV.VarInt.parse_data(<<5, 104, 101, 108, 108, 111, 1, 2, 3>>)
{:ok, "hello", <<1, 2, 3>>}
"""
@spec parse_data(binary()) :: {:ok, binary(), binary()} | {:error, term()}
def parse_data(<<253, int::little-16, data::bytes-size(int), rest::binary>>),
do: {:ok, data, rest}
def parse_data(<<254, int::little-32, data::bytes-size(int), rest::binary>>),
do: {:ok, data, rest}
def parse_data(<<255, int::little-64, data::bytes-size(int), rest::binary>>),
do: {:ok, data, rest}
def parse_data(<<int::integer, data::bytes-size(int), rest::binary>>),
do: {:ok, data, rest}
def parse_data(<<_data::binary>>),
do: {:error, :invalid_varint}
@doc """
Parses the given binary, returning a tuple with an integer decoded from the
VarInt in the first bytes of the binary, and a binary of any remaining bytes.
## Examples
iex> BSV.VarInt.parse_int(<<5, 104, 101, 108, 108, 111, 1, 2, 3>>)
{:ok, 5, <<104, 101, 108, 108, 111, 1, 2, 3>>}
"""
@spec parse_int(binary()) :: {:ok, integer(), binary()} | {:error, term()}
def parse_int(<<253, int::little-16, rest::binary>>), do: {:ok, int, rest}
def parse_int(<<254, int::little-32, rest::binary>>), do: {:ok, int, rest}
def parse_int(<<255, int::little-64, rest::binary>>), do: {:ok, int, rest}
def parse_int(<<int::integer, rest::binary>>), do: {:ok, int, rest}
def parse_int(<<_data::binary>>),
do: {:error, :invalid_varint}
@doc """
Parses the given binary into a list of the length specified by the VarInt in
the first bytes of the binary. Each item is parsed according to the specified
`Serializable.t/0`.
"""
@spec parse_items(binary(), Serializable.t()) ::
{:ok, list(Serializable.t()), binary()} |
{:error, term()}
def parse_items(data, mod) when is_binary(data) and is_atom(mod) do
with {:ok, int, data} <- parse_int(data) do
parse_items(data, int, mod)
end
end
# Parses items from the data binary until the correct number have been parsed
defp parse_items(data, num, mod, result \\ [])
defp parse_items(data, num, _mod, result) when length(result) == num,
do: {:ok, Enum.reverse(result), data}
defp parse_items(data, num, mod, result) do
with {:ok, item, data} <- Serializable.parse(struct(mod), data) do
parse_items(data, num, mod, [item | result])
end
end
end
|
lib/bsv/var_int.ex
| 0.894942 | 0.600452 |
var_int.ex
|
starcoder
|
defmodule Ecto.Schema do
@moduledoc ~S"""
Defines a schema for a model.
A schema is a struct with associated metadata that is persisted to a
repository. Every schema model is also a struct, that means that you work
with models just like you would work with structs.
## Example
defmodule User do
use Ecto.Schema
schema "users" do
field :name, :string
field :age, :integer, default: 0
has_many :posts, Post
end
end
By default, a schema will generate a primary key named `id`
of type `:integer` and `belongs_to` associations in the schema will generate
foreign keys of type `:integer`. Those setting can be configured
below.
## Schema attributes
The schema supports some attributes to be set before hand,
configuring the defined schema.
Those attributes are:
* `@primary_key` - configures the schema primary key. It expects
a tuple with the primary key name, type (:id or :binary_id) and options. Defaults
to `{:id, :id, autogenerate: true}`. When set to
false, does not define a primary key in the model;
* `@foreign_key_type` - configures the default foreign key type
used by `belongs_to` associations. Defaults to `:integer`;
* `@timestamps_opts` - configures the default timestamps type
used by `timestamps`. Defaults to `[type: Ecto.DateTime, usec: false]`;
* `@derive` - the same as `@derive` available in `Kernel.defstruct/1`
as the schema defines a struct behind the scenes;
The advantage of configuring the schema via those attributes is
that they can be set with a macro to configure application wide
defaults.
For example, if your database does not support autoincrementing
primary keys and requires something like UUID or a RecordID, you
configure and use`:binary_id` as your primary key type as follows:
# Define a module to be used as base
defmodule MyApp.Model do
defmacro __using__(_) do
quote do
use Ecto.Model
@primary_key {:id, :binary_id, autogenerate: true}
@foreign_key_type :binary_id
end
end
end
# Now use MyApp.Model to define new models
defmodule MyApp.Comment do
use MyApp.Model
schema "comments" do
belongs_to :post, MyApp.Post
end
end
Any models using `MyApp.Model` will get the `:id` field with type
`:binary_id` as primary key. We explain what the `:binary_id` type
entails in the next section.
The `belongs_to` association on `MyApp.Comment` will also define
a `:post_id` field with `:binary_id` type that references the `:id`
field of the `MyApp.Post` model.
## Primary keys
Ecto supports two ID types, called `:id` and `:binary_id` which are
often used as the type for primary keys and associations.
The `:id` type is used when the primary key is an integer while the
`:binary_id` is used when the primary key is in binary format, which
may be `Ecto.UUID` for databases like PostgreSQL and MySQL, or some
specific ObjectID or RecordID often imposed by NoSQL databases.
In both cases, both types have their semantics specified by the
underlying adapter/database. For example, if you use the `:id`
type with `:autogenerate`, it means the database will be responsible
for auto-generation the id if it supports it.
Similarly, the `:binary_id` type may be generated in the adapter
for cases like UUID but it may also be handled by the database if
required. In any case, both scenarios are handled transparently by
Ecto.
Besides `:id` and `:binary_id`, which are often used by primary
and foreign keys, Ecto provides a huge variety of types to be used
by the remaining columns.
## Types and casting
When defining the schema, types need to be given. Types are split
in two categories, primitive types and custom types.
### Primitive types
The primitive types are:
Ecto type | Elixir type | Literal syntax in query
:---------------------- | :---------------------- | :---------------------
`:id` | `integer` | 1, 2, 3
`:binary_id` | `binary` | `<<int, int, int, ...>>`
`:integer` | `integer` | 1, 2, 3
`:float` | `float` | 1.0, 2.0, 3.0
`:boolean` | `boolean` | true, false
`:string` | UTF-8 encoded `string` | "hello"
`:binary` | `binary` | `<<int, int, int, ...>>`
`{:array, inner_type}` | `list` | `[value, value, value, ...]`
`:decimal` | [`Decimal`](https://github.com/ericmj/decimal)
`:datetime` | `{{year, month, day}, {hour, min, sec}}`
`:date` | `{year, month, day}`
`:time` | `{hour, min, sec}`
**Note:** Although Ecto provides `:date`, `:time` and `:datetime`, you
likely want to use `Ecto.Date`, `Ecto.Time` and `Ecto.DateTime` respectively.
See the Custom types sections below about types that enhance the primitive
ones.
### Custom types
Sometimes the primitive types in Ecto are too primitive. For example,
`:datetime` relies on the underling tuple representation instead of
representing itself as something nicer like a map/struct. That's where
`Ecto.DateTime` comes in.
`Ecto.DateTime` is a custom type. A custom type is a module that
implements the `Ecto.Type` behaviour. By default, Ecto provides the
following custom types:
Custom type | Database type | Elixir type
:---------------------- | :---------------------- | :---------------------
`Ecto.DateTime` | `:datetime` | `%Ecto.DateTime{}`
`Ecto.Date` | `:date` | `%Ecto.Date{}`
`Ecto.Time` | `:time` | `%Ecto.Time{}`
`Ecto.UUID` | `:uuid` | "uuid-string"
Ecto allow developers to provide their own types too. Read the
`Ecto.Type` documentation for more information.
### Casting
When directly manipulating the struct, it is the responsibility of
the developer to ensure the field values have the proper type. For
example, you can create a weather struct with an invalid value
for `temp_lo`:
iex> weather = %Weather{temp_lo: "0"}
iex> weather.temp_lo
"0"
However, if you attempt to persist the struct above, an error will
be raised since Ecto validates the types when sending them to the
adapter/database.
Therefore, when working and manipulating external data, it is
recommended the usage of `Ecto.Changeset`'s that are able to filter
and properly cast external data. In fact, `Ecto.Changeset` and custom
types provide a powerful combination to extend Ecto types and queries.
Finally, models can also have virtual fields by passing the
`virtual: true` option. These fields are not persisted to the database
and can optionally not be type checked by declaring type `:any`.
## Reflection
Any schema module will generate the `__schema__` function that can be
used for runtime introspection of the schema:
* `__schema__(:source)` - Returns the source as given to `schema/2`;
* `__schema__(:primary_key)` - Returns a list of the field that is the primary
key or [] if there is none;
* `__schema__(:fields)` - Returns a list of all non-virtual field names;
* `__schema__(:field, field)` - Returns the type of the given non-virtual field;
* `__schema__(:associations)` - Returns a list of all association field names;
* `__schema__(:association, assoc)` - Returns the association reflection of the given assoc;
* `__schema__(:read_after_writes)` - Non-virtual fields that must be read back
from the database after every write (insert or update);
* `__schema__(:autogenerate)` - Non-virtual fields that are auto generated on insert;
* `__schema__(:autogenerate_id)` - Primary key that is auto generated on insert;
* `__schema__(:load, source, idx, values, id_types)` - Loads a new model from a tuple of non-virtual
field values starting at the given index. Typically used by adapters;
Furthermore, both `__struct__` and `__changeset__` functions are
defined so structs and changeset functionalities are available.
"""
defmodule Metadata do
@moduledoc """
Stores metadata of a struct.
The fields are:
* `state` - the state in a struct's lifetime, e.g. :built, :loaded, :deleted
* `source` - the database source of a model, which is the source specified
in schema by default or custom source when building a assoc with the custom source.
"""
defstruct [:state, :source]
end
@doc false
defmacro __using__(_) do
quote do
import Ecto.Schema, only: [schema: 2]
@primary_key {:id, :id, autogenerate: true}
@timestamps_opts []
@foreign_key_type :id
end
end
@doc """
Defines a schema with a source name and field definitions.
"""
defmacro schema(source, [do: block]) do
quote do
source = unquote(source)
unless is_binary(source) do
raise ArgumentError, "schema source must be a string, got: #{inspect source}"
end
Module.register_attribute(__MODULE__, :changeset_fields, accumulate: true)
Module.register_attribute(__MODULE__, :struct_fields, accumulate: true)
Module.register_attribute(__MODULE__, :ecto_fields, accumulate: true)
Module.register_attribute(__MODULE__, :ecto_assocs, accumulate: true)
Module.register_attribute(__MODULE__, :ecto_raw, accumulate: true)
Module.register_attribute(__MODULE__, :ecto_autogenerate, accumulate: true)
Module.put_attribute(__MODULE__, :ecto_autogenerate_id, nil)
Module.put_attribute(__MODULE__, :struct_fields,
{:__meta__, %Metadata{state: :built, source: source}})
primary_key_fields =
case @primary_key do
false ->
[]
{name, type, opts} ->
Ecto.Schema.__field__(__MODULE__, name, type, true, opts)
[name]
other ->
raise ArgumentError, "@primary_key must be false or {name, type, opts}"
end
try do
import Ecto.Schema
unquote(block)
after
:ok
end
fields = @ecto_fields |> Enum.reverse
assocs = @ecto_assocs |> Enum.reverse
Module.eval_quoted __ENV__, [
Ecto.Schema.__struct__(@struct_fields),
Ecto.Schema.__changeset__(@changeset_fields),
Ecto.Schema.__source__(source),
Ecto.Schema.__fields__(fields),
Ecto.Schema.__assocs__(assocs),
Ecto.Schema.__primary_key__(primary_key_fields),
Ecto.Schema.__load__(fields),
Ecto.Schema.__read_after_writes__(@ecto_raw),
Ecto.Schema.__autogenerate__(@ecto_autogenerate, @ecto_autogenerate_id)]
end
end
## API
@doc """
Defines a field on the model schema with given name and type.
## Options
* `:default` - Sets the default value on the schema and the struct.
The default value is calculated at compilation time, so don't use
expressions like `Ecto.DateTime.local` or `Ecto.UUID.generate` as
they would then be the same for all records
* `:autogenerate` - Annotates the field to be autogenerated before
insertion if not value is set.
* `:read_after_writes` - When true, the field only sent on insert
if not nil and always read back from the repository during inserts
and updates.
For relational databases, this means the RETURNING option of those
statements are used. For this reason, MySQL does not support this
option and will raise an error if a model is inserted/updated with
read after writes fields.
* `:virtual` - When true, the field is not persisted to the database.
Notice virtual fields do not support `:autogenerate` nor
`:read_after_writes`.
"""
defmacro field(name, type \\ :string, opts \\ []) do
quote do
Ecto.Schema.__field__(__MODULE__, unquote(name), unquote(type), false, unquote(opts))
end
end
@doc """
Generates `:inserted_at` and `:updated_at` timestamp fields.
When using `Ecto.Model`, the fields generated by this macro
will automatically be set to the current time when inserting
and updating values in a repository.
## Options
* `:type` - the timestamps type, defaults to `Ecto.DateTime`.
* `:usec` - boolean, sets whether microseconds are used in timestamps.
Microseconds will be 0 if false. Defaults to false.
* `:inserted_at` - the name of the column for insertion times or `false`
* `:updated_at` - the name of the column for update times or `false`
All options can be pre-configured by setting `@timestamps_opts`.
"""
defmacro timestamps(opts \\ []) do
quote bind_quoted: binding do
timestamps =
[inserted_at: :inserted_at, updated_at: :updated_at,
type: Ecto.DateTime, usec: false]
|> Keyword.merge(@timestamps_opts)
|> Keyword.merge(opts)
if inserted_at = Keyword.fetch!(timestamps, :inserted_at) do
Ecto.Schema.field(inserted_at, Keyword.fetch!(timestamps, :type), [])
end
if updated_at = Keyword.fetch!(timestamps, :updated_at) do
Ecto.Schema.field(updated_at, Keyword.fetch!(timestamps, :type), [])
end
@ecto_timestamps timestamps
end
end
@doc """
Defines an association.
This macro is used by `belongs_to/3`, `has_one/3` and `has_many/3` to
define associations. However, custom association mechanisms can be provided
by developers and hooked in via this macro.
Read more about custom associations in `Ecto.Association`.
"""
defmacro association(cardinality, name, association, opts \\ []) do
quote do
Ecto.Schema.__association__(__MODULE__, unquote(cardinality), unquote(name),
unquote(association), unquote(opts))
end
end
@doc ~S"""
Indicates a one-to-many association with another model.
The current model has zero or more records of the other model. The other
model often has a `belongs_to` field with the reverse association.
## Options
* `:foreign_key` - Sets the foreign key, this should map to a field on the
other model, defaults to the underscored name of the current model
suffixed by `_id`
* `:references` - Sets the key on the current model to be used for the
association, defaults to the primary key on the model
* `:through` - If this association must be defined in terms of existing
associations. Read below for more information
## Examples
defmodule Post do
use Ecto.Model
schema "posts" do
has_many :comments, Comment
end
end
# Get all comments for a given post
post = Repo.get(Post, 42)
comments = Repo.all assoc(post, :comments)
# The comments can come preloaded on the post struct
[post] = Repo.all(from(p in Post, where: p.id == 42, preload: :comments))
post.comments #=> [%Comment{...}, ...]
## has_many/has_one :through
Ecto also supports defining associations in terms of other associations
via the `:through` option. Let's see an example:
defmodule Post do
use Ecto.Model
schema "posts" do
has_many :comments, Comment
has_one :permalink, Permalink
has_many :comments_authors, through: [:comments, :author]
# Specify the association with custom source
has_many :tags, {"posts_tags", Tag}
end
end
defmodule Comment do
use Ecto.Model
schema "comments" do
belongs_to :author, Author
belongs_to :post, Post
has_one :post_permalink, through: [:post, :permalink]
end
end
In the example above, we have defined a `has_many :through` association
named `:comments_authors`. A `:through` association always expect a list
and the first element of the list must be a previously defined association
in the current module. For example, `:comments_authors` first points to
`:comments` in the same module (Post), which then points to `:author` in
the next model `Comment`.
This `:through` associations will return all authors for all comments
that belongs to that post:
# Get all comments for a given post
post = Repo.get(Post, 42)
authors = Repo.all assoc(post, :comments_authors)
`:through` associations are read-only as they are useful to avoid repetition
allowing the developer to easily retrieve data that is often seem together
but stored across different tables.
`:through` associations can also be preloaded. In such cases, not only
the `:through` association is preloaded but all intermediate steps are
preloaded too:
[post] = Repo.all(from(p in Post, where: p.id == 42, preload: :comments_authors))
post.comments_authors #=> [%Author{...}, ...]
# The comments for each post will be preloaded too
post.comments #=> [%Comment{...}, ...]
# And the author for each comment too
hd(post.comments).authors #=> [%Author{...}, ...]
Finally, `:through` can be used with multiple associations (not only 2)
and with associations of any kind, including `belongs_to` and others
`:through` associations. When the `:through` association is expected to
return one or no item, `has_one :through` should be used instead, as in
the example at the beginning of this section:
# How we defined the association above
has_one :post_permalink, through: [:post, :permalink]
# Get a preloaded comment
[comment] = Repo.all(Comment) |> Repo.preload(:post_permalink)
comment.post_permalink #=> %Permalink{...}
"""
defmacro has_many(name, queryable, opts \\ []) do
quote bind_quoted: binding() do
if is_list(queryable) and Keyword.has_key?(queryable, :through) do
association(:many, name, Ecto.Association.HasThrough, queryable)
else
association(:many, name, Ecto.Association.Has, [queryable: queryable] ++ opts)
end
end
end
@doc ~S"""
Indicates a one-to-one association with another model.
The current model has zero or one records of the other model. The other
model often has a `belongs_to` field with the reverse association.
## Options
* `:foreign_key` - Sets the foreign key, this should map to a field on the
other model, defaults to the underscored name of the current model
suffixed by `_id`
* `:references` - Sets the key on the current model to be used for the
association, defaults to the primary key on the model
* `:through` - If this association must be defined in terms of existing
associations. Read the section in `has_many/3` for more information
## Examples
defmodule Post do
use Ecto.Model
schema "posts" do
has_one :permalink, Permalink
# Specify the association with custom source
has_one :category, {"posts_categories", Category}
end
end
# The permalink can come preloaded on the post struct
[post] = Repo.all(from(p in Post, where: p.id == 42, preload: :permalink))
post.permalink #=> %Permalink{...}
"""
defmacro has_one(name, queryable, opts \\ []) do
quote bind_quoted: binding() do
if is_list(queryable) and Keyword.has_key?(queryable, :through) do
association(:one, name, Ecto.Association.HasThrough, queryable)
else
association(:one, name, Ecto.Association.Has, [queryable: queryable] ++ opts)
end
end
end
@doc ~S"""
Indicates a one-to-one association with another model.
The current model belongs to zero or one records of the other model. The other
model often has a `has_one` or a `has_many` field with the reverse association.
You should use `belongs_to` in the table that contains the foreign key. Imagine
a company <-> manager relationship. If the company contains the `manager_id` in
the underlying database table, we say the company belongs to manager.
In fact, when you invoke this macro, a field with the name of foreign key is
automatically defined in the schema for you.
## Options
* `:foreign_key` - Sets the foreign key field name, defaults to the name
of the association suffixed by `_id`. For example, `belongs_to :company`
will define foreign key of `:company_id`
* `:references` - Sets the key on the other model to be used for the
association, defaults to: `:id`
* `:define_field` - When false, does not automatically define a `:foreign_key`
field, implying the user is defining the field manually elsewhere
* `:type` - Sets the type of automatically defined `:foreign_key`.
Defaults to: `:integer` and be set per schema via `@foreign_key_type`
All other options are forwarded to the underlying foreign key definition
and therefore accept the same options as `field/3`.
## Examples
defmodule Comment do
use Ecto.Model
schema "comments" do
belongs_to :post, Post
end
end
# The post can come preloaded on the comment record
[comment] = Repo.all(from(c in Comment, where: c.id == 42, preload: :post))
comment.post #=> %Post{...}
## Polymorphic associations
One common use case for belongs to associations is to handle
polymorphism. For example, imagine you have defined a Comment
model and you wish to use it for commenting on tasks and posts.
Because Ecto does not tie a model to a given table, we can
achieve this by specifying the table on the association
definition. Let's start over and define a new Comment model:
defmodule Comment do
use Ecto.Model
schema "abstract table: comments" do
# This will be used by associations on each "concrete" table
field :assoc_id, :integer
end
end
Notice we have changed the table name to "abstract table: comment".
You can choose whatever name you want, the point here is that this
particular table will never exist.
Now in your Post and Task models:
defmodule Post do
use Ecto.Model
schema "posts" do
has_many :comments, {"posts_comments", Comment}, foreign_key: :assoc_id
end
end
defmodule Task do
use Ecto.Model
schema "tasks" do
has_many :comments, {"tasks_comments", Comment}, foreign_key: :assoc_id
end
end
Now each association uses its own specific table, "posts_comments"
and "tasks_comments", which must be created on migrations. The
advantage of this approach is that we never store unrelated data
together, ensuring we keep databases references fast and correct.
When using this technique, the only limitation is that you cannot
build comments directly. For example, the command below
Repo.insert(%Comment{})
will attempt to use the abstract table. Instead, one should
Repo.insert(build(post, :comments))
where `build/2` is defined in `Ecto.Model`. You can also
use `assoc/2` in both `Ecto.Model` and in the query syntax
to easily retrieve associated comments to a given post or
task:
# Fetch all comments associated to the given task
Repo.all(assoc(task, :comments))
Finally, if for some reason you wish to query one of comments
table directly, you can also specify the tuple source in
the query syntax:
Repo.all from(c in {"posts_comments", Comment}), ...)
"""
defmacro belongs_to(name, queryable, opts \\ []) do
quote bind_quoted: binding() do
opts = Keyword.put_new(opts, :foreign_key, :"#{name}_id")
foreign_key_type = opts[:type] || @foreign_key_type
if Keyword.get(opts, :define_field, true) do
field(opts[:foreign_key], foreign_key_type, opts)
end
association(:one, name, Ecto.Association.BelongsTo, [queryable: queryable] ++ opts)
end
end
## Callbacks
@doc false
def __field__(mod, name, type, pk?, opts) do
check_type!(name, type, opts[:virtual])
check_default!(name, type, opts[:default])
Module.put_attribute(mod, :changeset_fields, {name, type})
put_struct_field(mod, name, opts[:default])
unless opts[:virtual] do
if raw = opts[:read_after_writes] do
IO.puts :stderr, "[warning] :read_after_writes is deprecated. It was declared for " <>
"field #{inspect name} in model #{inspect mod}"
Module.put_attribute(mod, :ecto_raw, name)
end
if gen = opts[:autogenerate] do
store_autogenerate!(mod, name, type, pk?)
end
if raw && gen do
raise ArgumentError, "cannot mark the same field as autogenerate and read_after_writes"
end
Module.put_attribute(mod, :ecto_fields, {name, type})
end
end
@doc false
def __association__(mod, cardinality, name, association, opts) do
not_loaded = %Ecto.Association.NotLoaded{__owner__: mod,
__field__: name, __cardinality__: cardinality}
put_struct_field(mod, name, not_loaded)
opts = [cardinality: cardinality] ++ opts
Module.put_attribute(mod, :ecto_assocs, {name, association.struct(mod, name, opts)})
end
@doc false
def __load__(struct, source, fields, idx, values, id_types) do
loaded = do_load(struct, fields, idx, values, id_types)
loaded = Map.put(loaded, :__meta__, %Metadata{state: :loaded, source: source})
Ecto.Model.Callbacks.__apply__(struct.__struct__, :after_load, loaded)
end
defp do_load(struct, fields, idx, values, id_types) when is_integer(idx) and is_tuple(values) do
Enum.reduce(fields, {struct, idx}, fn
{field, type}, {acc, idx} ->
type = Ecto.Type.normalize(type, id_types)
value = Ecto.Type.load!(type, elem(values, idx))
{Map.put(acc, field, value), idx + 1}
end) |> elem(0)
end
## Quoted callbacks
@doc false
def __changeset__(changeset_fields) do
map = changeset_fields |> Enum.into(%{}) |> Macro.escape()
quote do
def __changeset__, do: unquote(map)
end
end
@doc false
def __struct__(struct_fields) do
quote do
defstruct unquote(Macro.escape(struct_fields))
end
end
@doc false
def __source__(source) do
quote do
def __schema__(:source), do: unquote(Macro.escape(source))
end
end
@doc false
def __fields__(fields) do
quoted = Enum.map(fields, fn {name, type} ->
quote do
def __schema__(:field, unquote(name)), do: unquote(type)
end
end)
field_names = Enum.map(fields, &elem(&1, 0))
quoted ++ [quote do
def __schema__(:field, _), do: nil
def __schema__(:fields), do: unquote(field_names)
end]
end
@doc false
def __assocs__(assocs) do
quoted =
Enum.map(assocs, fn {name, refl} ->
quote do
def __schema__(:association, unquote(name)) do
unquote(Macro.escape(refl))
end
end
end)
assoc_names = Enum.map(assocs, &elem(&1, 0))
quote do
def __schema__(:associations), do: unquote(assoc_names)
unquote(quoted)
def __schema__(:association, _), do: nil
end
end
@doc false
def __primary_key__(primary_key) do
quote do
def __schema__(:primary_key), do: unquote(primary_key)
end
end
@doc false
def __load__(fields) do
# TODO: Move this to SQL adapter itself.
quote do
def __schema__(:load, source, idx, values, id_types) do
Ecto.Schema.__load__(__struct__(), source, unquote(fields), idx, values, id_types)
end
end
end
@doc false
def __read_after_writes__(fields) do
quote do
def __schema__(:read_after_writes), do: unquote(Enum.reverse(fields))
end
end
@doc false
def __autogenerate__(fields, id) do
map = fields |> Enum.into(%{}) |> Macro.escape()
quote do
def __schema__(:autogenerate), do: unquote(map)
def __schema__(:autogenerate_id), do: unquote(id)
end
end
## Private
defp put_struct_field(mod, name, assoc) do
fields = Module.get_attribute(mod, :struct_fields)
if List.keyfind(fields, name, 0) do
raise ArgumentError, "field/association #{inspect name} is already set on schema"
end
Module.put_attribute(mod, :struct_fields, {name, assoc})
end
defp check_type!(name, type, virtual?) do
cond do
type == :any and not virtual? ->
raise ArgumentError, "only virtual fields can have type :any, " <>
"invalid type for field #{inspect name}"
Ecto.Type.primitive?(type) ->
true
is_atom(type) ->
if Code.ensure_compiled?(type) and function_exported?(type, :type, 0) do
type
else
raise ArgumentError, "invalid or unknown type #{inspect type} for field #{inspect name}"
end
true ->
raise ArgumentError, "invalid type #{inspect type} for field #{inspect name}"
end
end
defp check_default!(name, type, default) do
case Ecto.Type.dump(type, default) do
{:ok, _} ->
:ok
:error ->
raise ArgumentError, "invalid default argument `#{inspect default}` for " <>
"field #{inspect name} of type #{inspect type}"
end
end
defp store_autogenerate!(mod, name, type, true) do
if id = autogenerate_id(type) do
if Module.get_attribute(mod, :ecto_autogenerate_id) do
raise ArgumentError, "only one primary key with ID type may be marked as autogenerated"
end
Module.put_attribute(mod, :ecto_autogenerate_id, {name, id})
else
store_autogenerate!(mod, name, type, false)
end
end
defp store_autogenerate!(mod, name, type, false) do
cond do
_ = autogenerate_id(type) ->
raise ArgumentError, "only primary keys allow :autogenerate for type #{inspect type}, " <>
"field #{inspect name} is not a primary key"
Ecto.Type.primitive?(type) ->
raise ArgumentError, "field #{inspect name} does not support :autogenerate because it uses a " <>
"primitive type #{inspect type}"
# Note the custom type has already been loaded in check_type!/3
not function_exported?(type, :generate, 0) ->
raise ArgumentError, "field #{inspect name} does not support :autogenerate because it uses a " <>
"custom type #{inspect type} that does not define generate/0"
true ->
Module.put_attribute(mod, :ecto_autogenerate, {name, type})
end
end
defp autogenerate_id(type) do
id = if Ecto.Type.primitive?(type), do: type, else: type.type
if id in [:id, :binary_id], do: id, else: nil
end
end
|
lib/ecto/schema.ex
| 0.907967 | 0.666388 |
schema.ex
|
starcoder
|
defmodule GuardianJwks.SecretFetcher do
@moduledoc """
An implementation of `Guardian.Token.Jwt.SecretFetcher` for reading public JWKS URLs.
This secret fetcher is intended to be used when you are _verifying_ a token is signed with
a well known public key. It only implements the `before_verify/2` callback providing a
`JOSE.JWK` for the given token. It is important to notice this is not meant for
use when **GENERATING** a token. So, using this hook with the `Guardian.encode_and_sign`
function **WILL NOT WORK!!!**
To use it, configure guardianpass this hook to Joken either with the `add_hook/2` macro or directly
to each `Joken` function. Example:
defmodule MyToken do
use Joken.Config
add_hook(GuardianJwks, strategy: MyFetchingStrategy)
# rest of your token config
end
Or:
Joken.verify_and_validate(config, token, nil, context, [{Joken.Jwks, strategy: MyStrategy}])
## Fetching strategy
Very rarely, your authentication server might rotate or block its keys. Key rotation is the
process of issuing a new key that in time will replace the older key. This is security hygiene
and should/might be a regular process.
Sometimes it is important to block keys because they got leaked or for any other reason.
Other times you simply don't control the authentication server and can't ensure the keys won't
change. This is the most common scenario for this hook.
In these cases (and some others) it is important to have a cache invalidation strategy: all your
cached keys should be refreshed. Since the best strategy might differ for each use case, there
is a behaviour that can be customized as the "fetching strategy", that is: when to fetch and re-fetch
keys. `GuardianJwks` has a default strategy that tries to be smart and cover most use cases by default.
It combines a time based state machine to avoid overflowing the system with re-fetching keys. If that
is not a good option for your use case, it can still be configured. Please, see
`GuardianJwks.SignerMatchStrategy` or `GuardianJwks.DefaultStrategyTemplate` docs for more information.
"""
@behaviour Guardian.Token.Jwt.SecretFetcher
@impl true
def fetch_signing_secret(mod, opts) do
log_level = opts[:log_level] || apply(mod, :config, [:jwks_log_level])
GuardianJwks.log(
:warn,
log_level,
"#{inspect(__MODULE__)} does not implement fetch_signing_secret/2."
)
{:error, :secret_not_found}
end
@doc """
Fetches a `JOSE.JWK` for the given `token_headers`.
The JWK returned is based on the value of the `kid` header, which is
required for JWKS.
"""
@impl true
def fetch_verifying_secret(mod, token_headers, opts) do
log_level = opts[:log_level] || apply(mod, :config, [:jwks_log_level])
server = opts[:key_server] || apply(mod, :config, [:jwks_key_server])
with {:ok, kid} <- GuardianJwks.SecretFetcher.get_kid_from_headers(token_headers),
{:ok, key} <- server.find_key_by_kid(kid, mod, opts) do
{:ok, key}
else
{:error, reason} ->
GuardianJwks.log(
:error,
log_level,
"#{inspect(mod)} failed fetching verifying secret, reason: #{inspect(reason)}, server: #{inspect(server)}"
)
{:error, :secret_not_found}
end
end
@spec get_kid_from_headers(nil | keyword() | map()) ::
{:error, :no_kid_in_token_headers} | {:ok, any()}
def get_kid_from_headers(headers) do
case headers["kid"] do
kid when not is_nil(kid) -> {:ok, kid}
_ -> {:error, :no_kid_in_token_headers}
end
end
end
|
lib/guardian_jwks/secret_fetcher.ex
| 0.836955 | 0.494202 |
secret_fetcher.ex
|
starcoder
|
defmodule Path do
@moduledoc """
This module provides conveniences for manipulating or
retrieving file system paths.
The functions in this module may receive a char list or
a binary as argument and will return a value of the same
type.
The majority of the functions in this module do not
interact with the file system, except for a few functions
that require it (like `Path.wildcard` and `Path.expand`).
"""
alias :filename, as: FN
@type t :: char_list | atom | binary
@type r :: char_list | binary
@doc """
Converts the given path to an absolute one. Differently from
`Path.expand/1`, no attempt is made to resolve `..`, `.` or `~`.
## Unix examples
Path.absname("foo")
#=> "/usr/local/foo"
Path.absname("../x")
#=> "/usr/local/../x"
## Windows
Path.absname("foo").
"D:/usr/local/foo"
Path.absname("../x").
"D:/usr/local/../x"
"""
def absname(path) do
FN.absname(path, get_cwd(path))
end
@doc """
Builds a path from `relative_to` to `path`. If `path` is already
an absolute path, `relative_to` is ignored. See also `Path.relative/2`.
Differently from `Path.expand/2`, no attempt is made to
resolve `..`, `.` or `~`.
## Examples
iex> Path.absname("foo", "bar")
"bar/foo"
iex> Path.absname("../x", "bar")
"bar/../x"
"""
def absname(path, relative_to) do
FN.absname(path, relative_to)
end
@doc """
Converts the path to an absolute one and expands
any `.` and `..` characters and a leading `~`.
## Examples
iex> Path.expand("/foo/bar/../bar")
"/foo/bar"
"""
def expand(path) do
normalize FN.absname(expand_home(path), get_cwd(path))
end
@doc """
Expands the path relative to the path given as the second argument
expanding any `.` and `..` characters. If the path is already an
absolute path, `relative_to` is ignored.
Note, that this function treats `path` with leading `~` as
an absolute one.
The second argument is first expanded to an absolute path.
## Examples
# Assuming that the absolute path to baz is /quux/baz
Path.expand("foo/bar/../bar", "baz")
#=> "/quux/baz/foo/bar"
iex> Path.expand("foo/bar/../bar", "/baz")
"/baz/foo/bar"
iex> Path.expand("/foo/bar/../bar", "/baz")
"/foo/bar"
"""
def expand(path, relative_to) do
normalize FN.absname(FN.absname(expand_home(path), expand_home(relative_to)), get_cwd(path))
end
@doc """
Returns the path type.
## Unix examples
Path.type("/usr/local/bin") #=> :absolute
Path.type("usr/local/bin") #=> :relative
Path.type("../usr/local/bin") #=> :relative
Path.type("~/file") #=> :relative
## Windows examples
Path.type("D:/usr/local/bin") #=> :absolute
Path.type("usr/local/bin") #=> :relative
Path.type("D:bar.ex") #=> :volumerelative
Path.type("/bar/foo.ex") #=> :volumerelative
"""
def type(name) when is_list(name) or is_binary(name) do
case :os.type() do
{ :win32, _ } -> win32_pathtype(name)
_ -> unix_pathtype(name)
end |> elem(0)
end
@doc """
Forces the path to be a relative path.
## Unix examples
Path.relative("/usr/local/bin") #=> "usr/local/bin"
Path.relative("usr/local/bin") #=> "usr/local/bin"
Path.relative("../usr/local/bin") #=> "../usr/local/bin"
## Windows examples
Path.relative("D:/usr/local/bin") #=> "usr/local/bin"
Path.relative("usr/local/bin") #=> "usr/local/bin"
Path.relative("D:bar.ex") #=> "bar.ex"
Path.relative("/bar/foo.ex") #=> "bar/foo.ex"
"""
def relative(name) do
case :os.type() do
{ :win32, _ } -> win32_pathtype(name)
_ -> unix_pathtype(name)
end |> elem(1)
end
defp unix_pathtype(<<?/, relative :: binary>>), do:
{ :absolute, relative }
defp unix_pathtype([?/|relative]), do:
{ :absolute, relative }
defp unix_pathtype([list|rest]) when is_list(list), do:
unix_pathtype(list ++ rest)
defp unix_pathtype([atom|rest]) when is_atom(atom), do:
unix_pathtype(atom_to_list(atom) ++ rest)
defp unix_pathtype(relative), do:
{ :relative, relative }
@slash [?/, ?\\]
defp win32_pathtype([list|rest]) when is_list(list), do:
win32_pathtype(list++rest)
defp win32_pathtype([atom|rest]) when is_atom(atom), do:
win32_pathtype(atom_to_list(atom)++rest)
defp win32_pathtype([char, list|rest]) when is_list(list), do:
win32_pathtype([char|list++rest])
defp win32_pathtype(<<c1, c2, relative :: binary>>) when c1 in @slash and c2 in @slash, do:
{ :absolute, relative }
defp win32_pathtype(<<c, relative :: binary>>) when c in @slash, do:
{ :volumerelative, relative }
defp win32_pathtype(<<_letter, ?:, c, relative :: binary>>) when c in @slash, do:
{ :absolute, relative }
defp win32_pathtype(<<_letter, ?:, relative :: binary>>), do:
{ :volumerelative, relative }
defp win32_pathtype([c1, c2 | relative]) when c1 in @slash and c2 in @slash, do:
{ :absolute, relative }
defp win32_pathtype([c | relative]) when c in @slash, do:
{ :volumerelative, relative }
defp win32_pathtype([c1, c2, list|rest]) when is_list(list), do:
win32_pathtype([c1, c2|list++rest])
defp win32_pathtype([_letter, ?:, c | relative]) when c in @slash, do:
{ :absolute, relative }
defp win32_pathtype([_letter, ?: | relative]), do:
{ :volumerelative, relative }
defp win32_pathtype(relative), do:
{ :relative, relative }
@doc """
Returns the given `path` relative to the given `from` path.
In other words, it tries to strip the `from` prefix from `path`.
This function does not query the file system, so it assumes
no symlinks in between the paths.
In case a direct relative path cannot be found, it returns
the original path.
## Examples
iex> Path.relative_to("/usr/local/foo", "/usr/local")
"foo"
iex> Path.relative_to("/usr/local/foo", "/")
"usr/local/foo"
iex> Path.relative_to("/usr/local/foo", "/etc")
"/usr/local/foo"
"""
def relative_to(path, from) when is_list(path) and is_binary(from) do
path = filename_string_to_binary(path)
relative_to(FN.split(path), FN.split(from), path)
end
def relative_to(path, from) when is_binary(path) and is_list(from) do
relative_to(FN.split(path), FN.split(filename_string_to_binary(from)), path)
end
def relative_to(path, from) do
relative_to(FN.split(path), FN.split(from), path)
end
defp relative_to([h|t1], [h|t2], original) do
relative_to(t1, t2, original)
end
defp relative_to([_|_] = l1, [], _original) do
FN.join(l1)
end
defp relative_to(_, _, original) do
original
end
@doc """
Returns the last component of the path or the path
itself if it does not contain any directory separators.
## Examples
iex> Path.basename("foo")
"foo"
iex> Path.basename("foo/bar")
"bar"
iex> Path.basename("/")
""
"""
def basename(path) do
FN.basename(path)
end
@doc """
Returns the last component of `path` with the `extension`
stripped. This function should be used to remove a specific
extension which may, or may not, be there.
## Examples
iex> Path.basename("~/foo/bar.ex", ".ex")
"bar"
iex> Path.basename("~/foo/bar.exs", ".ex")
"bar.exs"
iex> Path.basename("~/foo/bar.old.ex", ".ex")
"bar.old"
"""
def basename(path, extension) do
FN.basename(path, extension)
end
@doc """
Returns the directory component of `path`.
## Examples
Path.dirname("/foo/bar.ex")
#=> "/foo"
Path.dirname("/foo/bar/baz.ex")
#=> "/foo/bar"
"""
def dirname(path) do
FN.dirname(path)
end
@doc """
Returns the extension of the last component of `path`.
## Examples
iex> Path.extname("foo.erl")
".erl"
iex> Path.extname("~/foo/bar")
""
"""
def extname(path) do
FN.extension(path)
end
@doc """
Returns the `path` with the `extension` stripped.
## Examples
iex> Path.rootname("/foo/bar")
"/foo/bar"
iex> Path.rootname("/foo/bar.ex")
"/foo/bar"
"""
def rootname(path) do
FN.rootname(path)
end
@doc """
Returns the `path` with the `extension` stripped. This function should be used to
remove a specific extension which might, or might not, be there.
## Examples
iex> Path.rootname("/foo/bar.erl", ".erl")
"/foo/bar"
iex> Path.rootname("/foo/bar.erl", ".ex")
"/foo/bar.erl"
"""
def rootname(path, extension) do
FN.rootname(path, extension)
end
@doc """
Returns a string with one or more path components joined by the path separator.
This function should be used to convert a list of strings to a path.
## Examples
iex> Path.join(["~", "foo"])
"~/foo"
iex> Path.join(["foo"])
"foo"
iex> Path.join(["/", "foo", "bar"])
"/foo/bar"
"""
def join([name1, name2|rest]), do:
join([join(name1, name2)|rest])
def join([name]) when is_list(name), do:
binary_to_filename_string(do_join(filename_string_to_binary(name), <<>>, [], major_os_type()))
def join([name]) when is_binary(name), do:
do_join(name, <<>>, [], major_os_type())
@doc """
Joins two paths.
## Examples
iex> Path.join("foo", "bar")
"foo/bar"
"""
def join(left, right) when is_binary(left) and is_binary(right), do:
do_join(left, Path.relative(right), [], major_os_type())
def join(left, right) when is_binary(left) and is_list(right), do:
join(left, filename_string_to_binary(right))
def join(left, right) when is_list(left) and is_binary(right), do:
join(filename_string_to_binary(left), right)
def join(left, right) when is_list(left) and is_list(right), do:
binary_to_filename_string join(filename_string_to_binary(left), filename_string_to_binary(right))
def join(left, right) when is_atom(left), do:
join(atom_to_binary(left), right)
def join(left, right) when is_atom(right), do:
join(left, atom_to_binary(right))
defp major_os_type do
:os.type |> elem(0)
end
defp do_join(<<uc_letter, ?:, rest :: binary>>, relativename, [], :win32) when uc_letter in ?A..?Z, do:
do_join(rest, relativename, [?:, uc_letter+?a-?A], :win32)
defp do_join(<<?\\,rest :: binary>>, relativename, result, :win32), do:
do_join(<<?/,rest :: binary>>, relativename, result, :win32)
defp do_join(<<?/,rest :: binary>>, relativename, [?., ?/|result], os_type), do:
do_join(rest, relativename, [?/|result], os_type)
defp do_join(<<?/,rest :: binary>>, relativename, [?/|result], os_type), do:
do_join(rest, relativename, [?/|result], os_type)
defp do_join(<<>>, <<>>, result, os_type), do:
list_to_binary(maybe_remove_dirsep(result, os_type))
defp do_join(<<>>, relativename, [?:|rest], :win32), do:
do_join(relativename, <<>>, [?:|rest], :win32)
defp do_join(<<>>, relativename, [?/|result], os_type), do:
do_join(relativename, <<>>, [?/|result], os_type)
defp do_join(<<>>, relativename, result, os_type), do:
do_join(relativename, <<>>, [?/|result], os_type)
defp do_join(<<char,rest :: binary>>, relativename, result, os_type) when is_integer(char), do:
do_join(rest, relativename, [char|result], os_type)
defp maybe_remove_dirsep([?/, ?:, letter], :win32), do:
[letter, ?:, ?/]
defp maybe_remove_dirsep([?/], _), do:
[?/]
defp maybe_remove_dirsep([?/|name], _), do:
:lists.reverse(name)
defp maybe_remove_dirsep(name, _), do:
:lists.reverse(name)
@doc """
Returns a list with the path split by the path separator.
If an empty string is given, returns the root path.
## Examples
iex> Path.split("")
[]
iex> Path.split("foo")
["foo"]
iex> Path.split("/foo/bar")
["/", "foo", "bar"]
"""
# Work around a bug in Erlang on UNIX
def split(""), do: []
def split(path) do
FN.split(path)
end
@doc """
Traverses paths according to the given `glob` expression.
The wildcard looks like an ordinary path, except that certain
"wildcard characters" are interpreted in a special way. The
following characters are special:
* `?` - Matches one character.
* `*` - Matches any number of characters up to the end of
the filename, the next dot, or the next slash.
* `**` - Two adjacent <c>*</c>'s used as a single pattern will
match all files and zero or more directories and subdirectories.
* `[char1,char2,...]` - Matches any of the characters listed. Two characters
separated by a hyphen will match a range of characters.
* `{item1,item2,...}` - Matches one of the alternatives.
Other characters represent themselves. Only paths that have
exactly the same character in the same position will match. Note
that matching is case-sensitive; i.e. "a" will not match "A".
## Examples
Imagine you have a directory called `projects` with three Elixir projects
inside of it: `elixir`, `ex_doc` and `dynamo`. You can find all `.beam` files
inside the ebin directory of each project as follows:
Path.wildcard("projects/*/ebin/**/*.beam")
If you want to search for both `.beam` and `.app` files, you could do:
Path.wildcard("projects/*/ebin/**/*.{beam,app}")
"""
def wildcard(glob) when is_binary(glob) do
paths = :filelib.wildcard :unicode.characters_to_list(glob)
Enum.map paths, :unicode.characters_to_binary(&1)
end
def wildcard(glob) when is_list(glob) do
:filelib.wildcard glob
end
## Helpers
defp get_cwd(path) when is_list(path), do: System.cwd! |> binary_to_filename_string
defp get_cwd(_), do: System.cwd!
defp binary_to_filename_string(binary) do
case :unicode.characters_to_list(binary) do
{ :error, _, _ } ->
:erlang.error(:badarg)
list when is_list(list) ->
list
end
end
defp filename_string_to_binary(list) do
case :unicode.characters_to_binary(:filename.flatten(list), :unicode, :file.native_name_encoding()) do
{ :error, _, _ } ->
:erlang.error(:badarg)
bin when is_binary(bin) ->
bin
end
end
# Normalize the given path by expanding "..", "." and "~".
defp expand_home(<<?~, rest :: binary>>) do
System.user_home! <> rest
end
defp expand_home('~' ++ rest) do
(System.user_home! |> binary_to_filename_string) ++ rest
end
defp expand_home(other), do: other
defp normalize(path), do: normalize(FN.split(path), [])
defp normalize([top|t], [_|acc]) when top in ["..", '..'] do
normalize t, acc
end
defp normalize([top|t], acc) when top in [".", '.'] do
normalize t, acc
end
defp normalize([h|t], acc) do
normalize t, [h|acc]
end
defp normalize([], acc) do
join Enum.reverse(acc)
end
end
|
lib/elixir/lib/path.ex
| 0.78899 | 0.475423 |
path.ex
|
starcoder
|
if Code.ensure_loaded?(Plug) do
defmodule Guardian.Plug.EnsureAuthenticated do
@moduledoc """
This plug ensures that a valid token was provided and has been verified on the request.
If one is not found, the `auth_error` will be called with `:unauthenticated`
This, like all other Guardian plugs, requires a Guardian pipeline to be setup.
It requires an implementation module, an error handler and a key.
These can be set either:
1. Upstream on the connection with `plug Guardian.Pipeline`
2. Upstream on the connection with `Guardian.Pipeline.{put_module, put_error_handler, put_key}`
3. Inline with an option of `:module`, `:error_handler`, `:key`
Options:
* `claims` - The literal claims to check to ensure that a token is valid
* `max_age` - If the token has an "auth_time" claim, check it is not older than the maximum age.
* `key` - The location to find the information in the connection. Defaults to: `default`
* `halt` - Whether to halt the connection in case of error. Defaults to `true`
## Example
```elixir
# setup the upstream pipeline
plug Guardian.Plug.EnsureAuthenticated, claims: %{"typ" => "access"}
plug Guardian.Plug.EnsureAuthenticated, key: :secret
```
"""
@behaviour Plug
@impl Plug
@spec init(Keyword.t()) :: Keyword.t()
def init(opts), do: opts
@impl Plug
@spec call(conn :: Plug.Conn.t(), opts :: Keyword.t()) :: Plug.Conn.t()
def call(conn, opts) do
conn
|> Guardian.Plug.current_token(opts)
|> verify(conn, opts)
|> respond()
end
@spec verify(token :: Guardian.Token.token(), conn :: Plug.Conn.t(), opts :: Keyword.t()) ::
{{:ok, Guardian.Token.claims()} | {:error, any}, Plug.Conn.t(), Keyword.t()}
defp verify(nil, conn, opts), do: {{:error, :unauthenticated}, conn, opts}
defp verify(_token, conn, opts) do
result =
conn
|> Guardian.Plug.current_claims(opts)
|> verify_claims(opts)
{result, conn, opts}
end
@spec respond({{:ok, Guardian.Token.claims()} | {:error, any}, Plug.Conn.t(), Keyword.t()}) :: Plug.Conn.t()
defp respond({{:ok, _}, conn, _opts}), do: conn
defp respond({{:error, reason}, conn, opts}) do
conn
|> Guardian.Plug.Pipeline.fetch_error_handler!(opts)
|> apply(:auth_error, [conn, {:unauthenticated, reason}, opts])
|> Guardian.Plug.maybe_halt(opts)
end
@spec verify_claims(Guardian.Token.claims(), Keyword.t()) :: {:ok, Guardian.Token.claims()} | {:error, any}
defp verify_claims(claims, opts) do
to_check = Keyword.get(opts, :claims)
Guardian.Token.Verify.verify_literal_claims(claims, to_check, opts)
end
end
end
|
lib/guardian/plug/ensure_authenticated.ex
| 0.852552 | 0.821403 |
ensure_authenticated.ex
|
starcoder
|
defmodule PipeHelpers do
@moduledoc """
Helper for piping data
"""
@doc """
Wrap into ok tuple
## Example
iex> socket = "socket"
...> socket |> ok()
{:ok, "socket"}
"""
def ok(val) do
{:ok, val}
end
@doc """
Wrap into noreply tuple (genserver and phoenix socket format)
## Example
iex> state = "gen server state"
...> state |> noreply()
{:noreply, "gen server state"}
"""
def noreply(val) do
{:noreply, val}
end
@doc """
Wrap into reply tuple (genserver and phoenix socket format)
## Example
iex> state = "gen server state"
...> r = "reply"
...> state |> reply(r)
{:reply, "reply", "gen server state"}
"""
def reply(state, reply) do
{:reply, reply, state}
end
@doc """
Wrap into tuple pair
## Example
iex> 1 |> pair(2)
{2, 1}
"""
def pair(val, res) do
{res, val}
end
@doc """
Wrap into tuple rpair
## Example
iex> 1 |> rpair(2)
{1, 2}
"""
def rpair(val, res) do
{val, res}
end
@doc """
Unwrap from a tuple pair
## Example
iex> {:ok, 1} |> unpair()
1
"""
def unpair({_val, res}) do
res
end
@doc """
Tap only if ok tuple
## Example
iex> {:ok, "somedata"} |> tap_ok(fn -> "only executed when {:ok, ...}" end)
{:ok, "somedata"}
iex> {:ok, "somedata"} |> tap_ok(fn _val ->
...> _ = "only executed when {:ok, ...}"
...> "val is available as optional argument"
...> end)
{:ok, "somedata"}
"""
def tap_ok(result, fun) do
then_ok(result, fun)
result
end
@doc """
Tap only if value match
## Example
iex> true |> tap_on(true, fn -> "only executed when true" end)
true
"""
def tap_on(result, value, fun) do
then_on(result, value, fun)
result
end
@doc """
Then only if ok tuple
## Example
iex> {:ok, "somedata"} |> then_ok(fn -> "only executed when {:ok, ...}" end)
"only executed when {:ok, ...}"
iex> {:ok, "somedata"} |> then_ok(fn val ->
...> _ = "only executed when {:ok, ...}"
...> _ = "val is available as optional argument"
...> val
...> end)
"somedata"
"""
def then_ok({:ok, ok_val} = _result, fun) do
fun
|> Function.info()
|> Keyword.get(:arity)
|> case do
0 -> fun.()
1 -> fun.(ok_val)
_ -> raise "then_ok function arity can only be 0 or 1"
end
end
def then_ok(result, _fun), do: result
@doc """
Then only if value match
"""
def then_on(value, value, fun) do
fun
|> Function.info()
|> Keyword.get(:arity)
|> case do
0 -> fun.()
1 -> fun.(value)
_ -> raise "then_on function arity can only be 0 or 1"
end
end
def then_on(result, _value, _fun), do: result
end
|
lib/pipe_helpers.ex
| 0.806738 | 0.406361 |
pipe_helpers.ex
|
starcoder
|
defmodule Game do
@enforce_keys [:players, :turns, :last_player, :token_length]
defstruct @enforce_keys
@board_bound 0..2
def new_game(%Player{token: player_one_token} = player_one,
%Player{token: player_two_token} = player_two,
token_length) do
%Game{players: %{player_one: player_one, player_two: player_two},
turns: %{player_one_token => MapSet.new, player_two_token => MapSet.new},
last_player: :player,
token_length: token_length}
end
def play_turn(%Game{turns: turns, last_player: last_player} = state, player, cell) do
cond do
player.token == last_player ->
{:error, :not_your_turn}
cell_taken?(turns, cell) ->
{:error, :cell_taken}
not in_bounds?(cell) ->
{:error, :out_of_bounds}
true ->
state = update_in(state.turns[player.token], &MapSet.put(&1, cell))
{:ok, %{state | last_player: player.token}}
end
end
def in_bounds?({col, row}) do
col in (@board_bound) && row in (@board_bound)
end
def cell_taken?(turns, cell) do
turns
|> Map.values
|> Enum.any?(&Enum.member?(&1, cell))
end
defp draw?(turns) do
current_moves(turns) >= :math.pow(Enum.count(@board_bound), 2)
end
defp current_moves(turns) do
turns
|> Map.values
|> Enum.reduce(0, &(MapSet.size(&1) + &2))
end
defp player_won?(player_turns) do
win_patterns =
win(@board_bound, :horizontal) ++
win(@board_bound, :vertical) ++
win(@board_bound, :diagonal)
win_patterns
|> Enum.map(&MapSet.new/1)
|> Enum.any?(&MapSet.subset?(&1, player_turns))
end
defp win(bound, :horizontal) do
for col <- bound, do: for row <- bound, do: {row, col}
end
defp win(bound, :vertical) do
for col <- bound, do: for row <- bound, do: {col, row}
end
defp win(bound, :diagonal) do
max = Enum.count(bound)
[(for i <- bound, do: {i, i})] ++
[(for i <- bound, do: {i, max - i - 1})]
end
def status(%Game{players: players, turns: turns}) do
cond do
player_won?(turns[players.player_one.token]) ->
{:ended, {:winner, players.player_one.name}}
player_won?(turns[players.player_two.token]) ->
{:ended, {:winner, players.player_two.name}}
draw?(turns) -> {:ended, :draw}
true ->
:underway
end
end
end
|
lib/game.ex
| 0.625667 | 0.625138 |
game.ex
|
starcoder
|
defmodule Stripe.InvoiceItem do
@moduledoc """
## Attributes
- `id` - `String`
- `object` - `String`, value is "invoiceitem"
- `livemode` - `Boolean`
- `amount` - `Integer`
- `currency` - `String`
- `customer` - `String`
- `date` - `Tuple`
- `proration` - `Boolean` - Whether or not the invoice item was created
automatically as a proration adjustment when the customer
switched plans
- `description` - `String`
- `invoice` - `String`
- `metadata` - `Keyword` - A set of key/value pairs that you can
attach to an invoice item object. It can be useful for storing
additional information about the invoice item in a structured format.
- `subscription` - `String` - The subscription that this invoice item
has been created for, if any.
"""
defstruct id: nil,
object: "invoiceitem",
livemode: nil,
amount: nil,
currency: nil,
customer: nil,
date: nil,
proration: nil,
description: nil,
invoice: nil,
metadata: nil,
subscription: nil
@type id :: binary
@type object :: binary
@type livemode :: boolean
@type amount :: pos_integer
@type currency :: binary
@type customer :: binary
@type date :: {{1970..10000, 1..12, 1..31}, {0..23, 0..59, 0..59}}
@type proration :: boolean
@type description :: binary
@type invoice :: binary
@type metadata :: Keyword.t
@type subscription :: binary
@type t :: %Stripe.InvoiceItem{
id: id,
object: object,
livemode: livemode,
amount: amount,
currency: currency,
customer: customer,
date: date,
proration: proration,
description: description,
invoice: invoice,
metadata: metadata,
subscription: subscription
}
def from_keyword(data) do
datetime = Stripe.Util.datetime_from_timestamp data[:date]
%Stripe.InvoiceItem{
id: data[:id],
object: data[:object],
livemode: data[:livemode],
amount: data[:amount],
currency: data[:currency],
customer: data[:customer],
date: datetime,
proration: data[:proration],
description: data[:description],
invoice: data[:invoice],
metadata: data[:metadata],
subscription: data[:subscription]
}
end
end
|
lib/stripe/invoice_item.ex
| 0.79546 | 0.40248 |
invoice_item.ex
|
starcoder
|
defmodule Mutiny do
@moduledoc """
Functions for generating database commands that enforce immutability.
"""
alias Ecto.Migration.Table
alias Mutiny.Adapter
@doc """
Injects shorthand Mutiny functions that implicitly pass the specified
`adapter`. These functions include:
* `protect/1` - Makes a table immutable
* `protect/3` - Makes specific columns of a table immutable
* `create_prevent_update_function/0` - Creates the database function Mutiny calls
* `drop_prevent_update_function/0` - Drops the database function Mutiny calls
When `use`ing this module, a database adapter module should be specified. The
currently available modules are:
* `Mutiny.Adapters.Postgres`
Note that `Mutiny` exposes public versions of all these functions, should you
desire to call them directly.
## Options
* `adapter` - The Mutiny database adapter to use
## Examples
defmodule MyApp.Repo.Migrations.CreateSnapshots do
use Ecto.Migration
use Mutiny, adapter: Mutiny.Adapter.Postgres
create table("snapshots") do
add :data, :map
end
protect(table("snapshots"))
end
"""
@spec __using__(opts :: keyword()) :: Macro.t()
defmacro __using__(opts) do
quote do
import unquote(__MODULE__)
@adapter unquote(Keyword.fetch!(opts, :adapter))
def protect(table) do
protect(table, @adapter)
end
def protect(table, columns, opts \\ []) do
protect(table, columns, opts, @adapter)
end
def create_prevent_update_function do
create_prevent_update_function(@adapter)
end
def drop_prevent_update_function do
drop_prevent_update_function(@adapter)
end
end
end
@doc """
Returns a command to create a database trigger that prevents `UPDATE`s to the
given `Ecto.Migration.Table`.
An `adapter` module that implements `Mutiny.Adapter` should be specified in
correspondence with your Ecto adapter.
## Examples
iex> table("users")
...> |> protect(Mutiny.Postgres)
...> |> execute()
:ok
"""
@spec protect(Table.t(), atom()) :: String.t()
def protect(table, adapter) do
adapter.protect(table)
end
@doc """
Returns a command to create a database trigger that prevents `UPDATE`s to the
given `columns` of the `Ecto.Migration.Table`.
An `adapter` module that implements `Mutiny.Adapter` should be specified in
correspondence with your Ecto adapter.
Options may be specified as an `opts` list that will be passed to the given
adapter.
## Examples
iex> table("users")
...> |> protect([:uuid, :birthdate], Mutiny.Adapters.Postgres)
...> |> execute()
:ok
iex> table("users")
...> |> protect([:uuid], Mutiny.Adapters.Postgres, nullable: true)
...> |> execute()
:ok
"""
@spec protect(Table.t(), Adapter.columns(), atom(), Adapter.opts()) :: String.t()
def protect(table, columns, adapter, opts \\ []) do
adapter.protect(table, columns, opts)
end
@doc """
Returns a function that can be executed to prevent `UPDATE`s to a database
table. This function only needs to be executed once per database.
## Examples
iex> Mutiny.Adapters.Postgres
...> |> create_prevent_update_function()
...> |> execute()
:ok
"""
@spec create_prevent_update_function(atom()) :: String.t()
def create_prevent_update_function(adapter) do
adapter.create_prevent_update_function()
end
@doc """
Drops the function created by `create_prevent_update_function/0`, if it
exists. Useful when rolling back a migration.
## Examples
iex> Mutiny.Adapters.Postgres
...> |> drop_prevent_update_function()
...> |> execute()
:ok
"""
@spec drop_prevent_update_function(atom()) :: String.t()
def drop_prevent_update_function(adapter) do
adapter.drop_prevent_update_function()
end
end
|
lib/mutiny.ex
| 0.889117 | 0.469642 |
mutiny.ex
|
starcoder
|
defmodule CobolToElixirCase do
import ExUnit.CaptureIO
alias CobolToElixir.Util
require ExUnit.Assertions
require Logger
defmacro __using__([]) do
quote do
use ExUnit.Case
import CobolToElixirCase
end
end
@doc """
Compiles the cobol code and ensures there are no errors/warnings
"""
def validate_cobol_code(cobol) do
tmp_folder = Util.generate_tmp_folder()
try do
case Util.compile_cobol(cobol, tmp_folder) do
{"", 0} -> :ok
{output, 1} -> raise "Error compiling cobol:\n#{output}"
end
after
File.rm_rf!(tmp_folder)
end
end
@doc """
Compiles and then runs the given cobol code with the optional list of inputs being sent.
Input is a list of timeout/command values, e.g. [{1000, "John"}, {500, "Mike"}]
would send "John" after 1 second, then "Mike" after another half a second.
Returns map containing program output and any new files.
"""
def execute_cobol_code!(cobol, input \\ []) do
Util.execute_cobol_code!(cobol, input)
end
@doc """
Compiles and loads the given code into memory, runs the module, and unloads the module.
Accepts list of input same as `execute_cobol_code!/2`.
Returns map containing program output and any new files.
"""
def execute_elixir_code(str, module, input, tmp_folder \\ nil) do
log =
capture_io(:stderr, fn ->
Code.compile_string(str)
{:module, ^module} = Code.ensure_loaded(module)
end)
if log != "" do
Logger.info("compiler warning compiling Elixir: #{log}")
end
io =
capture_io(fn ->
Enum.each(input, &send(self(), {:input, elem(&1, 1)}))
apply(module, :main, [])
end)
true = :code.delete(module)
:code.purge(module)
files =
case tmp_folder do
nil -> nil
_ -> Util.get_new_files([], File.ls!(tmp_folder), tmp_folder)
end
%{output: io, files: files}
end
@doc """
This is the one-stop-shop for ensuring cobol text acts the same as the converted elixir version.
Given cobol text, a module name, expected output, and a list of input, it will
1) Compile and execute the cobol code, sending the specified inputs
2) Run CobolToElixir and convert the cobol code to Elixir
3) Load and run the Elixir code, sending the specified inputs
4) Assert that the output of both the cobol and Elixir programs matches the specififed output
"""
def assert_output_equal(cobol_text, module, opts \\ []) when is_list(opts) do
output = Keyword.get(opts, :output, "")
input = Keyword.get(opts, :input, [])
%{output: cobol_output, files: cobol_files} = execute_cobol_code!(cobol_text, input)
if !is_nil(output) do
ExUnit.Assertions.assert(cobol_output == output)
end
tmp_folder = Util.generate_tmp_folder()
{:ok, elixir_text} = CobolToElixir.convert(cobol_text, accept_via_message: true, io_dir: tmp_folder)
try do
%{output: elixir_output, files: elixir_files} = execute_elixir_code(elixir_text, module, input, tmp_folder)
ExUnit.Assertions.assert(cobol_output == elixir_output)
ExUnit.Assertions.assert(cobol_files == elixir_files)
after
File.rm_rf!(tmp_folder)
end
end
end
|
test/support/cobol_to_elixir_case.ex
| 0.532911 | 0.514217 |
cobol_to_elixir_case.ex
|
starcoder
|
defmodule RecoverableStream do
@moduledoc """
By extracting evaluation of the source stream into a separate process
`RecoverableStream` provides a way to isolate upstream errors
and recover from them.
This module contains public API.
"""
defmodule TasksPool do
@moduledoc """
A default `Supervisor` for tasks spawned by `RecoverableStream`.
"""
@doc """
A template `child_spec` for a custom `Task.Supervisor`.
## Example
iex> {:ok, _} = Supervisor.start_child(
...> RecoverableStreamEx.Supervisor,
...> RecoverableStream.TasksPool.child_spec(:my_sup))
...> RecoverableStream.run(
...> fn x -> Stream.repeatedly(fn -> x end) end,
...> task_supervisor: :my_sup
...> ) |> Stream.take(2) |> Enum.into([])
[nil, nil]
"""
def child_spec(name),
do: Supervisor.Spec.supervisor(Task.Supervisor, [[name: name]], id: name)
end
defmodule RecoverableStreamCtx do
@moduledoc false
defstruct [
:task,
:supervisor,
:reply_ref,
:retries_left,
:stream_fun,
:wrapper_fun,
last_value: nil
]
end
@type last_value_t :: nil | any()
@type stream_arg_t :: any()
@type stream_fun ::
(last_value_t() -> Enumerable.t())
| (last_value_t(), stream_arg_t() -> Enumerable.t())
@type inner_reduce_fun :: (stream_arg_t() -> none())
@type wrapper_fun :: (inner_reduce_fun() -> none())
@type run_option ::
{:retry_attempts, non_neg_integer()}
| {:wrapper_fun, wrapper_fun()}
| {:task_supervisor, atom() | pid()}
@spec run(stream_fun(), [run_option()]) :: Enumerable.t()
@doc """
Evaluates passed `t:stream_fun/0` inside a new `Task` then runs
produced stream, forwarding data back to the caller.
Returns a new `Stream` that gathers data forwarded by the `Task`.
Data is forwarded element by element. Batching is to be implemented
explicitly. For example `Postgrex.stream/3` sends data in chunks
by default.
## Stream function
`t:stream_fun/0` must be a function that accepts one or two arguments.
- The first argument is either `nil` or the last value received from a
stream before recovery.
- The second argument is an arbitrary term passed from `t:wrapper_fun/0`
The function should return a `Stream` (although, any `Enumerable` could work).
## Example
iex> gen_stream_f = fn
...> nil -> Stream.iterate(1, fn x when x < 2 -> x + 1 end)
...> x -> Stream.iterate(x + 1, &(&1+1))
...> end
iex> RecoverableStream.run(gen_stream_f)
...> |> Stream.take(4)
...> |> Enum.into([])
[1, 2, 3, 4]
## Options
- `:retry_attempts` (defaults to `1`) the total number of times
error recovery is performed before an error is propagated.
Retries counter is **not** reset upon a successful recovery!
- `:task_supervisor` either pid or a name of `Task.Supervisor`
to supervise a stream-reducer `Task`.
(defaults to `RecoverableStream.TaskPool`)
See `RecoverableStream.TasksPool.child_spec/1` for details.
- `:wrapper_fun` is a funciton that wraps a stream reducer running
inside a `Task` (defaults to `fun f -> f.(%{}) end`).
Useful when the `t:stream_fun/0` must be run within a certain
context. E.g. `Postgrex.stream/3` only works inside
`Postgrex.transaction/3`.
See [Readme](./readme.html#a-practical-example)
for a more elaborate example.
"""
def run(new_stream_fun, options \\ []) do
retries = Keyword.get(options, :retry_attempts, 1)
wfun = Keyword.get(options, :wrapper_fun, fn f -> f.(%{}) end)
supervisor = Keyword.get(options, :task_supervisor, TasksPool)
Stream.resource(
fn -> start_fun(new_stream_fun, wfun, supervisor, retries, nil) end,
&next_fun/1,
&after_fun/1
)
end
defp start_fun(new_stream_fun, wrapper_fun, supervisor, retries, last_value)
when (is_function(new_stream_fun, 1) or is_function(new_stream_fun, 2)) and
is_integer(retries) and retries >= 0 do
owner = self()
reply_ref = make_ref()
t =
Task.Supervisor.async_nolink(supervisor, fn ->
wrapper_fun.(fn stream_arg ->
if is_function(new_stream_fun, 1) do
new_stream_fun.(last_value)
else
new_stream_fun.(last_value, stream_arg)
end
|> stream_reducer(owner, reply_ref)
end)
end)
%RecoverableStreamCtx{
task: t,
supervisor: supervisor,
reply_ref: reply_ref,
retries_left: retries,
stream_fun: new_stream_fun,
wrapper_fun: wrapper_fun,
last_value: last_value
}
end
defp next_fun(ctx) do
%{
task: %Task{ref: tref, pid: tpid},
supervisor: sup,
reply_ref: rref,
retries_left: retries
} = ctx
send(tpid, {:ready, rref})
receive do
{^tref, {:done, ^rref}} ->
Process.demonitor(tref, [:flush])
{:halt, ctx}
# TODO add an optional retries reset
{:data, ^rref, x} ->
{[x], %{ctx | last_value: x}}
{:DOWN, ^tref, _, _, :normal} ->
{:halt, ctx}
{:DOWN, ^tref, _, _, reason} when retries < 1 ->
exit({reason, {__MODULE__, :next_fun, ctx}})
{:DOWN, ^tref, _, _, _reason} ->
{[], start_fun(ctx.stream_fun, ctx.wrapper_fun, sup, retries - 1, ctx.last_value)}
end
# TODO consider adding a timeout
end
defp after_fun(%{task: %Task{ref: tref, pid: tpid}, reply_ref: rref} = ctx) do
send(tpid, {:done, rref})
receive do
{:DOWN, ^tref, _, _, :normal} ->
:ok
{:DOWN, ^tref, _, _, reason} ->
exit({reason, {__MODULE__, :after_fun, ctx}})
after
100 ->
Process.demonitor(tref, [:flush])
Task.Supervisor.terminate_child(TasksPool, tpid)
end
end
defp stream_reducer(stream, owner, reply_ref) do
mon_ref = Process.monitor(owner)
stream
|> Stream.each(fn x ->
receive do
{:done, ^reply_ref} ->
exit(:normal)
{:ready, ^reply_ref} ->
send(owner, {:data, reply_ref, x})
{:DOWN, ^mon_ref, _, ^owner, reason} ->
exit(reason)
end
# TODO consider adding a timeout
end)
|> Stream.run()
{:done, reply_ref}
end
end
|
lib/recoverable_stream.ex
| 0.854551 | 0.592784 |
recoverable_stream.ex
|
starcoder
|
defmodule Composer.AST do
@moduledoc """
Converts the custom AST into elixir AST
"""
@doc """
Converts the custom AST into elixir AST
## Example
iex> Composer.AST.do_convert({ :+, [ 10, 20 ] })
{:+, [context: Composer.AST, import: Kernel], [ 10, 20 ]}
"""
def do_convert({ :block, args }) do
{ :__block__, [], Enum.map(args, &do_convert/1) }
end
def do_convert({ :var, var }) do
{
:var!,
[ context: Composer.AST, import: Kernel ],
[ { var, [], Elixir }]
}
end
def do_convert({ :!, [ var ] }) do
quote do
!unquote(do_convert(var))
end
end
def do_convert({ :+, [ var ] }) do
quote do
+unquote(do_convert(var))
end
end
def do_convert({ :-, [ var ] }) do
quote do
-unquote(do_convert(var))
end
end
def do_convert({ :abs, [ var ]}) do
quote do
abs(unquote(do_convert(var)))
end
end
def do_convert({ :=, [ left, right ] }) do
quote do
unquote(do_convert(left)) = unquote(do_convert(right))
end
end
def do_convert({ :!=, [ left, right ] }) do
quote do
unquote(do_convert(left)) != unquote(do_convert(right))
end
end
def do_convert({ :!==, [ left, right ] }) do
quote do
unquote(do_convert(left)) !== unquote(do_convert(right))
end
end
def do_convert({ :&&, [ left, right ] }) do
quote do
unquote(do_convert(left)) && unquote(do_convert(right))
end
end
def do_convert({ :||, [ left, right ] }) do
quote do
unquote(do_convert(left)) || unquote(do_convert(right))
end
end
def do_convert({ :*, [ left, right ] }) do
quote do
unquote(do_convert(left)) * unquote(do_convert(right))
end
end
def do_convert({ :++, [ left, right ] }) do
quote do
unquote(do_convert(left)) ++ unquote(do_convert(right))
end
end
def do_convert({ :+, [ left, right ] }) do
quote do
unquote(do_convert(left)) + unquote(do_convert(right))
end
end
def do_convert({ :--, [ left, right ] }) do
quote do
unquote(do_convert(left)) -- unquote(do_convert(right))
end
end
def do_convert({ :-, [ left, right ] }) do
quote do
unquote(do_convert(left)) - unquote(do_convert(right))
end
end
def do_convert({ :/, [ left, right ] }) do
quote do
unquote(do_convert(left)) / unquote(do_convert(right))
end
end
def do_convert({ :<, [ left, right ] }) do
quote do
unquote(do_convert(left)) < unquote(do_convert(right))
end
end
def do_convert({ :<=, [ left, right ] }) do
quote do
unquote(do_convert(left)) <= unquote(do_convert(right))
end
end
def do_convert({ :<>, [ left, right ] }) do
quote do
unquote(do_convert(left)) <> unquote(do_convert(right))
end
end
def do_convert({ :==, [ left, right ] }) do
quote do
unquote(do_convert(left)) == unquote(do_convert(right))
end
end
def do_convert({ :===, [ left, right ] }) do
quote do
unquote(do_convert(left)) === unquote(do_convert(right))
end
end
def do_convert({ :>, [ left, right ] }) do
quote do
unquote(do_convert(left)) > unquote(do_convert(right))
end
end
def do_convert({ :>=, [ left, right ] }) do
quote do
unquote(do_convert(left)) >= unquote(do_convert(right))
end
end
def do_convert({ :rem, [ left, right ] }) do
quote do
rem(unquote(do_convert(left)), unquote(do_convert(right)))
end
end
def do_convert({ :if, [ conditions, first_clause, second_clause ] }) do
quote do
if(unquote(do_convert(conditions))) do
unquote(do_convert(first_clause))
else
unquote(do_convert(second_clause))
end
end
end
def do_convert({ :list, elements }) do
Enum.map(elements, fn(element) ->
quote do: unquote do_convert(element)
end)
end
def do_convert({ :sum, arguments }) do
[ h | t ] = arguments
value = quote do: unquote do_convert(h)
Enum.reduce(t, value, fn(n, acc) ->
a = quote do: unquote do_convert(n)
{ :+, [], [ a, acc]}
end)
end
def do_convert(nil), do: nil
def do_convert(true), do: true
def do_convert(false), do: false
def do_convert(x) when is_atom(x), do: x
def do_convert(x) when is_binary(x), do: x
def do_convert(x) when is_number(x), do: x
end
|
apps/composer/lib/ast.ex
| 0.711331 | 0.595669 |
ast.ex
|
starcoder
|
defmodule FalconPlusApi.Api.Nodata do
alias Maxwell.Conn
alias FalconPlusApi.{Util, Sig, Api}
@doc """
* [Session](#/authentication) Required
### Request
```{
"tags": "",
"step": 60,
"obj_type": "host",
"obj": "docker-agent",
"name": "testnodata",
"mock": -1,
"metric": "test.metric",
"dstype": "GAUGE"
}```
### Response
```Status: 200```
```{
"id": 4,
"name": "testnodata",
"obj": "docker-agent",
"obj_type": "host",
"metric": "test.metric",
"tags": "",
"dstype": "GAUGE",
"step": 60,
"mock": -1,
"creator": "root"
}```
"""
def create(sig, addr, opts \\ []) do
sig = Sig.get_sig(sig)
~s</api/v1/nodata/>
|> Util.url(addr)
|> Conn.new()
|> Api.set_opts(opts)
|> Conn.put_req_header("Apitoken", sig)
|> Api.post
|> Api.get_result
end
@doc """
* [Session](#/authentication) Required
* ex. /api/v1/nodata/4
### Response
```Status: 200```
```{"message":"mockcfg:4 is deleted"}```
"""
def delete(nodata_id, sig, addr, opts \\ []) do
sig = Sig.get_sig(sig)
~s</api/v1/nodata/#{nodata_id}>
|> Util.url(addr)
|> Conn.new()
|> Api.set_opts(opts)
|> Conn.put_req_header("Apitoken", sig)
|> Api.delete
|> Api.get_result
end
@doc """
* [Session](#/authentication) Required
* ex. /api/v1/nodata/2
### Response
```Status: 200```
```{
"id": 2,
"name": "owl_nodate",
"obj": "docker-agent",
"obj_type": "host",
"metric": "test.metric",
"tags": "",
"dstype": "GAUGE",
"step": 60,
"mock": -2,
"creator": "root"
}```
"""
def info_by_id(nodata_id, sig, addr, opts \\ []) do
sig = Sig.get_sig(sig)
~s</api/v1/nodata/#{nodata_id}>
|> Util.url(addr)
|> Conn.new()
|> Api.set_opts(opts)
|> Conn.put_req_header("Apitoken", sig)
|> Api.get
|> Api.get_result
end
@doc """
* [Session](#/authentication) Required
### Response
```Status: 200```
```[
{
"id": 2,
"name": "owl_nodate",
"obj": "docker-agent",
"obj_type": "host",
"metric": "test.metric",
"tags": "",
"dstype": "GAUGE",
"step": 60,
"mock": -2,
"creator": "root"
}
]```
"""
def list(sig, addr, opts \\ []) do
sig = Sig.get_sig(sig)
~s</api/v1/nodata>
|> Util.url(addr)
|> Conn.new()
|> Api.set_opts(opts)
|> Conn.put_req_header("Apitoken", sig)
|> Api.get
|> Api.get_result
end
@doc """
* [Session](#/authentication) Required
### Request
```{
"tags": "",
"step": 60,
"obj_type": "host",
"obj": "docker-agent",
"mock": -2,
"metric": "test.metric",
"id": 4,
"dstype": "GAUGE"
}```
### Response
```Status: 200```
```{
"id": 0,
"name": "",
"obj": "docker-agent",
"obj_type": "host",
"metric": "test.metric",
"tags": "",
"dstype": "GAUGE",
"step": 60,
"mock": -2,
"creator": ""
}```
"""
def update(sig, addr, opts \\ []) do
sig = Sig.get_sig(sig)
~s</api/v1/nodata/>
|> Util.url(addr)
|> Conn.new()
|> Api.set_opts(opts)
|> Conn.put_req_header("Apitoken", sig)
|> Api.put
|> Api.get_result
end
end
|
lib/falcon_plus_api/api/nodata.ex
| 0.539711 | 0.716814 |
nodata.ex
|
starcoder
|
defmodule Bunch.Access do
@moduledoc """
A bunch of functions for easier manipulation on terms of types implementing `Access`
behaviour.
"""
import Kernel, except: [get_in: 2, put_in: 2, update_in: 3, get_and_update_in: 3, pop_in: 2]
use Bunch
@compile {:inline, map_keys: 1}
@gen_common_docs fn fun_name ->
"""
Works like `Kernel.#{fun_name}` with small differences.
Behaviour differs in the following aspects:
- empty lists of keys are allowed
- single key does not have to be wrapped in a list
"""
end
@doc """
Implements `Access` behaviour by delegating callbacks to `Map` module.
All the callbacks are overridable.
"""
defmacro __using__(_args) do
quote do
@behaviour Access
@impl true
defdelegate fetch(term, key), to: Map
@impl true
defdelegate get_and_update(data, key, list), to: Map
@impl true
defdelegate pop(data, key), to: Map
defoverridable Access
end
end
@doc """
#{@gen_common_docs.("get_in/2")}
## Examples
iex> #{inspect(__MODULE__)}.get_in(%{a: %{b: 10}}, [:a, :b])
10
iex> #{inspect(__MODULE__)}.get_in(%{a: 10}, :a)
10
iex> #{inspect(__MODULE__)}.get_in(%{a: %{b: 10}}, [])
%{a: %{b: 10}}
"""
@spec get_in(Access.t(), Access.key() | [Access.key()]) :: Access.value()
def get_in(container, []), do: container
def get_in(container, keys), do: container |> Kernel.get_in(keys |> map_keys)
@doc """
#{@gen_common_docs.("put_in/3")}
## Examples
iex> #{inspect(__MODULE__)}.put_in(%{a: %{b: 10}}, [:a, :b], 20)
%{a: %{b: 20}}
iex> #{inspect(__MODULE__)}.put_in(%{a: 10}, :a, 20)
%{a: 20}
iex> #{inspect(__MODULE__)}.put_in(%{a: %{b: 10}}, [], 20)
20
"""
@spec put_in(Access.t(), Access.key() | [Access.key()], Access.value()) :: Access.value()
def put_in(_map, [], v), do: v
def put_in(container, keys, v), do: container |> Kernel.put_in(keys |> map_keys, v)
@doc """
#{@gen_common_docs.("update_in/3")}
## Examples
iex> #{inspect(__MODULE__)}.update_in(%{a: %{b: 10}}, [:a, :b], & &1 * 2)
%{a: %{b: 20}}
iex> #{inspect(__MODULE__)}.update_in(%{a: 10}, :a, & &1 * 2)
%{a: 20}
iex> #{inspect(__MODULE__)}.update_in(10, [], & &1 * 2)
20
"""
@spec update_in(Access.t(), Access.key() | [Access.key()], (Access.value() -> Access.value())) ::
Access.t()
def update_in(container, [], f), do: f.(container)
def update_in(container, keys, f), do: container |> Kernel.update_in(keys |> map_keys, f)
@doc """
#{@gen_common_docs.("get_and_update_in/3")}
## Examples
iex> #{inspect(__MODULE__)}.get_and_update_in(%{a: %{b: 10}}, [:a, :b], & {&1, &1 * 2})
{10, %{a: %{b: 20}}}
iex> #{inspect(__MODULE__)}.get_and_update_in(%{a: 10}, :a, & {&1, &1 * 2})
{10, %{a: 20}}
iex> #{inspect(__MODULE__)}.get_and_update_in(10, [], & {&1, &1 * 2})
{10, 20}
"""
@spec get_and_update_in(Access.t(), Access.key() | [Access.key()], (a -> {b, a})) ::
{b, Access.t()}
when a: Access.value(), b: any
def get_and_update_in(container, [], f), do: f.(container)
def get_and_update_in(container, keys, f),
do: container |> Kernel.get_and_update_in(keys |> map_keys, f)
@doc """
Updates value at `keys` in a nested data structure and returns new value and updated structure.
Uses `get_and_update_in/3` under the hood.
## Example
iex> %{a: %{b: 10}} |> #{inspect(__MODULE__)}.get_updated_in([:a, :b], & &1+1)
{11, %{a: %{b: 11}}}
"""
@spec get_updated_in(Access.t(), Access.key() | [Access.key()], (Access.value() -> a)) ::
{a, Access.t()}
when a: Access.value()
def get_updated_in(container, keys, f),
do: container |> get_and_update_in(keys, fn a -> f.(a) ~> {&1, &1} end)
@doc """
#{@gen_common_docs.("pop_in/2")}
## Examples
iex> #{inspect(__MODULE__)}.pop_in(%{a: %{b: 10}}, [:a, :b])
{10, %{a: %{}}}
iex> #{inspect(__MODULE__)}.pop_in(%{a: 10}, :a)
{10, %{}}
iex> #{inspect(__MODULE__)}.pop_in(10, [])
{10, nil}
"""
@spec pop_in(Access.t(), Access.key() | [Access.key()]) :: {Access.value(), Access.t()}
def pop_in(container, []), do: {container, nil}
def pop_in(container, keys), do: container |> Kernel.pop_in(keys |> map_keys)
@doc """
Works like `pop_in/2`, but discards returned value.
## Examples
iex> #{inspect(__MODULE__)}.delete_in(%{a: %{b: 10}}, [:a, :b])
%{a: %{}}
iex> #{inspect(__MODULE__)}.delete_in(%{a: 10}, :a)
%{}
iex> #{inspect(__MODULE__)}.delete_in(10, [])
nil
"""
@spec delete_in(Access.t(), Access.key() | [Access.key()]) :: Access.t()
def delete_in(container, keys), do: pop_in(container, keys) ~> ({_out, container} -> container)
@spec map_keys(Access.key() | [Access.key()]) :: [Access.key()]
defp map_keys(keys), do: keys |> Bunch.listify()
end
|
lib/bunch/access.ex
| 0.816882 | 0.450541 |
access.ex
|
starcoder
|
defmodule AlertProcessor.ExtendedTime do
@moduledoc """
ExtendedTime is for saving schedule-related times while accounting for fact that a "day" of a schedule stretches into the next actual day. I.E. a trip that leaves at 11:30pm on Jan 1 and another that leaves at 12:30am on Jan 2 are both counted as part of the Jan 1 schedule, but when you are naively sorting times separated from dates the 12:30am trip will incorrectly be sorted before the 11:30pm trip. To account for this we include a "relative_day" concept where 1 represents the base day and 2 represents the next day the schedule extends into.
"""
alias AlertProcessor.ExtendedTime
defstruct [
:relative_day,
:time
]
@type id :: String.t()
@type t :: %__MODULE__{
relative_day: 1 | 2,
time: Time.t()
}
@doc """
Builds an ExtendedTime struct by comparing a NaiveDateTime with a base date.
## Examples
iex> AlertProcessor.ExtendedTime.new(~N[2018-01-02 00:30:00], ~D[2018-01-01])
{:ok, %AlertProcessor.ExtendedTime{relative_day: 2, time: ~T[00:30:00]}}
"""
@spec new(NaiveDateTime.t(), Date.t()) :: {:ok, t}
def new(%NaiveDateTime{} = date_time, %Date{} = base_date) do
relative_day =
if Date.compare(NaiveDateTime.to_date(date_time), base_date) == :eq, do: 1, else: 2
extendedday_time = %ExtendedTime{
relative_day: relative_day,
time: NaiveDateTime.to_time(date_time)
}
{:ok, extendedday_time}
end
@doc """
Compares two ExtendedTime structs.
Returns :gt if first date is later than the second and :lt for vice versa. If the two dates are equal :eq is returned.
## Examples
iex> {:ok, x} = AlertProcessor.ExtendedTime.new(~N[2018-01-01 23:00:00], ~D[2018-01-01])
iex> {:ok, y} = AlertProcessor.ExtendedTime.new(~N[2018-01-01 23:30:00], ~D[2018-01-01])
iex> {:ok, z} = AlertProcessor.ExtendedTime.new(~N[2018-01-02 00:30:00], ~D[2018-01-01])
iex> AlertProcessor.ExtendedTime.compare(x, y)
:lt
iex> AlertProcessor.ExtendedTime.compare(y, x)
:gt
iex> AlertProcessor.ExtendedTime.compare(y, z)
:lt
iex> AlertProcessor.ExtendedTime.compare(z, y)
:gt
iex> AlertProcessor.ExtendedTime.compare(z, z)
:eq
"""
@spec compare(t, t) :: :lt | :eq | :gt
def compare(%ExtendedTime{relative_day: relative_day_a}, %ExtendedTime{
relative_day: relative_day_b
})
when relative_day_a < relative_day_b,
do: :lt
def compare(%ExtendedTime{relative_day: relative_day_a}, %ExtendedTime{
relative_day: relative_day_b
})
when relative_day_a > relative_day_b,
do: :gt
def compare(%ExtendedTime{time: time_a}, %ExtendedTime{time: time_b}),
do: Time.compare(time_a, time_b)
end
|
apps/alert_processor/lib/extended_time.ex
| 0.91985 | 0.654574 |
extended_time.ex
|
starcoder
|
defmodule Mix.Local.Installer do
@moduledoc """
This module implements pieces of functionality shared by the archive- and escript-related
tasks.
"""
@doc """
Checks that the argument given to install is supported by the respective module.
"""
@callback check_path_or_url(String.t) :: :ok | {:error, String.t}
@doc """
Returns a list of already installed version of the same archive or escript.
"""
@callback find_previous_versions(String.t, Path.t) :: [Path.t]
@doc """
Custom actions to be performed before the actual installation.
"""
@callback before_install(String.t, Path.t) :: :ok | {:error, String.t}
@doc """
Custom actions to be performed after the installation has succeeded.
"""
@callback after_install(Path.t, [Path.t]) :: term
@doc """
Common implementation of installation for archives and escripts.
Relies on a few callbacks provided by respective callback modules
for customizing certain steps in the installation process.
"""
@spec install({module, atom}, OptionParser.argv, Keyword.t) :: boolean
def install({module, name}, argv, switches) do
{opts, args, _} = OptionParser.parse(argv, switches: switches)
case args do
[src] ->
with :ok <- check_argument(src), :ok <- module.check_path_or_url(src) do
do_install({module, name}, src, opts)
else
{:error, message} -> Mix.raise message <> "\n" <> usage(name)
end
[] ->
src = Mix.Local.name_for(name, Mix.Project.config)
if File.exists?(src) do
do_install({module, name}, src, opts)
else
Mix.raise "Expected an #{name} to exist in the current directory " <>
"or an argument to be given.\n#{usage(name)}"
end
_ ->
Mix.raise "Unexpected arguments.\n#{usage(name)}"
end
end
defp check_argument(arg) do
if local_path?(arg) or file_url?(arg) do
:ok
else
{:error, "Expected a local file path or a file URL."}
end
end
defp local_path?(url_or_path) do
File.regular?(url_or_path)
end
defp file_url?(url_or_path) do
URI.parse(url_or_path).scheme in ["http", "https"]
end
defp usage(name), do: "Usage: mix #{name}.install <path or url>"
defp do_install({module, name}, src, opts) do
src_basename = Path.basename(URI.parse(src).path)
dst_file_path = Path.join(Mix.Local.path_for(name), src_basename)
dst_dir_path = Path.dirname(dst_file_path)
previous_files = module.find_previous_versions(src, dst_file_path)
if opts[:force] || should_install?(name, src, previous_files) do
case module.before_install(src, dst_file_path) do
:ok -> :ok
{:error, message} -> Mix.raise message
end
case Mix.Utils.read_path(src, opts) do
{:ok, binary} ->
File.mkdir_p!(dst_dir_path)
File.write!(dst_file_path, binary)
:badpath ->
Mix.raise "Expected #{inspect src} to be a URL or a local file path"
{:local, message} ->
Mix.raise message
{kind, message} when kind in [:remote, :checksum] ->
Mix.raise """
#{message}
Could not fetch #{name} at:
#{src}
Please download the #{name} above manually to your current directory and run:
mix #{name}.install ./#{src_basename}
"""
end
Mix.shell.info [:green, "* creating ", :reset, Path.relative_to_cwd(dst_file_path)]
_ = module.after_install(dst_file_path, previous_files)
true
else
false
end
end
defp should_install?(name, src, previous_files) do
message = case previous_files do
[] ->
"Are you sure you want to install #{name} #{inspect src}?"
[file] ->
"Found existing #{name}: #{file}.\n" <>
"Are you sure you want to replace it with #{inspect src}?"
files ->
"Found existing #{name}s: #{Enum.map_join(files, ", ", &Path.basename/1)}.\n" <>
"Are you sure you want to replace them with #{inspect src}?"
end
Mix.shell.yes?(message)
end
@doc """
Print a list of items in a uniform way. Used for printing the list of installed archives and
escripts.
"""
@spec print_list(atom, [String.t]) :: :ok
def print_list(type, []) do
Mix.shell.info "No #{type}s currently installed."
end
def print_list(type, items) do
Enum.each items, fn item -> Mix.shell.info ["* ", item] end
item_name = String.capitalize("#{type}")
Mix.shell.info "#{item_name}s installed at: #{Mix.Local.path_for(type)}"
end
@doc """
A common implementation for uninstalling archives and scripts.
"""
@spec uninstall(atom, OptionParser.argv) :: boolean
def uninstall(type, argv) do
{_, argv, _} = OptionParser.parse(argv)
item_name = "#{type}"
item_plural = "#{type}s"
root = Mix.Local.path_for(type)
if name = List.first(argv) do
path = Path.join(root, name)
cond do
not File.regular?(path) ->
Mix.shell.error "Could not find a local #{item_name} named #{inspect name}. "<>
"Existing #{item_plural} are:"
Mix.Task.run item_name
nil
should_uninstall?(path, item_name) ->
File.rm!(path)
path
true ->
nil
end
else
Mix.raise "No #{item_name} was given to #{item_name}.uninstall"
end
end
defp should_uninstall?(path, item_name) do
Mix.shell.yes?("Are you sure you want to uninstall #{item_name} #{path}?")
end
end
|
lib/mix/lib/mix/local/installer.ex
| 0.730963 | 0.417687 |
installer.ex
|
starcoder
|
defmodule Astarte.Core.Mapping.ValueType do
@behaviour Ecto.Type
@mapping_value_type_double 1
@mapping_value_type_doublearray 2
@mapping_value_type_integer 3
@mapping_value_type_integerarray 4
@mapping_value_type_longinteger 5
@mapping_value_type_longintegerarray 6
@mapping_value_type_string 7
@mapping_value_type_stringarray 8
@mapping_value_type_boolean 9
@mapping_value_type_booleanarray 10
@mapping_value_type_binaryblob 11
@mapping_value_type_binaryblobarray 12
@mapping_value_type_datetime 13
@mapping_value_type_datetimearray 14
@valid_atoms [
:double,
:integer,
:boolean,
:longinteger,
:string,
:binaryblob,
:datetime,
:doublearray,
:integerarray,
:booleanarray,
:longintegerarray,
:stringarray,
:binaryblobarray,
:datetimearray
]
# The following limits are really conservative,
# it is always easier to increase them in future releases
@blob_size 65536
@list_len 1024
@string_size 65536
@impl true
def type, do: :integer
@impl true
def cast(nil), do: {:ok, nil}
def cast(atom) when is_atom(atom) do
if Enum.member?(@valid_atoms, atom) do
{:ok, atom}
else
:error
end
end
def cast(string) when is_binary(string) do
case string do
"double" -> {:ok, :double}
"integer" -> {:ok, :integer}
"boolean" -> {:ok, :boolean}
"longinteger" -> {:ok, :longinteger}
"string" -> {:ok, :string}
"binaryblob" -> {:ok, :binaryblob}
"datetime" -> {:ok, :datetime}
"doublearray" -> {:ok, :doublearray}
"integerarray" -> {:ok, :integerarray}
"booleanarray" -> {:ok, :booleanarray}
"longintegerarray" -> {:ok, :longintegerarray}
"stringarray" -> {:ok, :stringarray}
"binaryblobarray" -> {:ok, :binaryblobarray}
"datetimearray" -> {:ok, :datetimearray}
_ -> :error
end
end
def cast(_), do: :error
@impl true
def dump(value_type) when is_atom(value_type) do
case value_type do
:double -> {:ok, @mapping_value_type_double}
:integer -> {:ok, @mapping_value_type_integer}
:boolean -> {:ok, @mapping_value_type_boolean}
:longinteger -> {:ok, @mapping_value_type_longinteger}
:string -> {:ok, @mapping_value_type_string}
:binaryblob -> {:ok, @mapping_value_type_binaryblob}
:datetime -> {:ok, @mapping_value_type_datetime}
:doublearray -> {:ok, @mapping_value_type_doublearray}
:integerarray -> {:ok, @mapping_value_type_integerarray}
:booleanarray -> {:ok, @mapping_value_type_booleanarray}
:longintegerarray -> {:ok, @mapping_value_type_longintegerarray}
:stringarray -> {:ok, @mapping_value_type_stringarray}
:binaryblobarray -> {:ok, @mapping_value_type_binaryblobarray}
:datetimearray -> {:ok, @mapping_value_type_datetimearray}
_ -> :error
end
end
def to_int(value_type) when is_atom(value_type) do
case dump(value_type) do
{:ok, int} -> int
:error -> raise ArgumentError, message: "#{inspect(value_type)} is not a valid value type"
end
end
@impl true
def load(value_type_int) when is_integer(value_type_int) do
case value_type_int do
@mapping_value_type_double -> {:ok, :double}
@mapping_value_type_integer -> {:ok, :integer}
@mapping_value_type_boolean -> {:ok, :boolean}
@mapping_value_type_longinteger -> {:ok, :longinteger}
@mapping_value_type_string -> {:ok, :string}
@mapping_value_type_binaryblob -> {:ok, :binaryblob}
@mapping_value_type_datetime -> {:ok, :datetime}
@mapping_value_type_doublearray -> {:ok, :doublearray}
@mapping_value_type_integerarray -> {:ok, :integerarray}
@mapping_value_type_booleanarray -> {:ok, :booleanarray}
@mapping_value_type_longintegerarray -> {:ok, :longintegerarray}
@mapping_value_type_stringarray -> {:ok, :stringarray}
@mapping_value_type_binaryblobarray -> {:ok, :binaryblobarray}
@mapping_value_type_datetimearray -> {:ok, :datetimearray}
_ -> :error
end
end
def from_int(value_type_int) when is_integer(value_type_int) do
case load(value_type_int) do
{:ok, value_type} ->
value_type
:error ->
raise ArgumentError,
message: "#{value_type_int} is not a valid value type int representation"
end
end
def validate_value(expected_type, value) do
case {value, expected_type} do
{v, :double} when is_number(v) ->
:ok
{v, :integer} when is_integer(v) and abs(v) <= 0x7FFFFFFF ->
:ok
{v, :boolean} when is_boolean(v) ->
:ok
{v, :longinteger} when is_integer(v) and abs(v) <= 0x7FFFFFFFFFFFFFFF ->
:ok
{v, :string} when is_binary(v) ->
cond do
String.valid?(v) == false ->
{:error, :unexpected_value_type}
byte_size(v) > @string_size ->
{:error, :value_size_exceeded}
true ->
:ok
end
{v, :binaryblob} when is_binary(v) ->
if byte_size(v) > @blob_size do
{:error, :value_size_exceeded}
else
:ok
end
{{_subtype, bin}, :binaryblob} when is_binary(bin) ->
if byte_size(bin) > @blob_size do
{:error, :value_size_exceeded}
else
:ok
end
{%DateTime{} = _v, :datetime} ->
:ok
{v, :datetime} when is_integer(v) ->
:ok
{v, :doublearray} when is_list(v) ->
validate_array_value(:double, v)
{v, :integerarray} when is_list(v) ->
validate_array_value(:integer, v)
{v, :booleanarray} when is_list(v) ->
validate_array_value(:boolean, v)
{v, :longintegerarray} when is_list(v) ->
validate_array_value(:longinteger, v)
{v, :stringarray} when is_list(v) ->
validate_array_value(:string, v)
{v, :binaryblobarray} when is_list(v) ->
validate_array_value(:binaryblob, v)
{v, :datetimearray} when is_list(v) ->
validate_array_value(:datetime, v)
_ ->
{:error, :unexpected_value_type}
end
end
defp validate_array_value(type, values) do
cond do
length(values) > @list_len ->
{:error, :value_size_exceeded}
Enum.all?(values, fn item -> validate_value(type, item) == :ok end) == false ->
{:error, :unexpected_value_type}
true ->
:ok
end
end
end
|
lib/astarte_core/mapping/value_type.ex
| 0.758063 | 0.578418 |
value_type.ex
|
starcoder
|
defmodule HT16K33 do
@moduledoc """
API for working with HT16K33 14-segment display backpacks
"""
use Bitwise
alias Circuits.I2C
@type backpack_state :: %{
ref: I2C.bus(),
addr: integer
}
@i2c_code "i2c-1"
# Default address for HT16K33 - up to 0x77
@addr 0x70
@doc """
Returns a state map with nil I2C ref, i2c bus code "i2c-1" and address 0x70
Generally, prefer `init`
"""
def default_state do
%{
ref: nil,
addr: @addr
}
end
@doc """
Initializes an HT16K33 at `addr` and `i2c_code` and returns state
## Parameters
- i2c_code: a string representing an i2c bus code, default "i2c-1"
- addr: a number indicating the i2c address, default 0x70
"""
@spec init(String.t(), integer) :: backpack_state
def init(i2c_code \\ @i2c_code, addr \\ @addr) do
{:ok, ref} = I2C.open(i2c_code)
I2C.write(ref, addr, <<0x21>>)
%{default_state() | ref: ref, addr: addr}
end
@doc """
Deinitializes the HT16K33 at `state.addr` referenced by `state.ref` and
closes `state.ref`
## Parameters
- state: an HT16K33 state
"""
@spec deinit(backpack_state) :: :ok
def deinit(state) do
I2C.write(state[:ref], state[:addr], <<0x20>>)
I2C.close(state[:ref])
end
@doc """
Takes `on` as new display power setting
## Parameters
- state: an HT16K33 state
- on: a boolean, defaulting to true to turn power on
"""
@spec power(backpack_state, boolean) :: backpack_state
def power(state, on \\ true) do
I2C.write(state[:ref], state[:addr], <<0x80 ||| if(on, do: 0x01, else: 0x00)>>)
state
end
@doc """
Clears display (extinguishes all segments)
## Parameters
- state: an HT16K33 state
"""
@spec clear(backpack_state) :: backpack_state
def clear(state) do
I2C.write(state[:ref], state[:addr], <<0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00>>)
state
end
@doc """
Fills a display (illuminates all segments)
## Parameters
- state: an HT16K33 state
"""
@spec fill(backpack_state) :: backpack_state
def fill(state) do
I2C.write(state[:ref], state[:addr], <<0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF>>)
state
end
@doc """
Sets display to blink at hz rate
## Parameters
- state: an HT16K33 state
- hz: blink rate, valid options are 0.5, 1.0, 2.0, default is no blink, which is equivalent to `power(state, true)`
"""
@spec blink(backpack_state, float) :: backpack_state
def blink(state, hz) when hz == 0.5 do
I2C.write(state[:ref], state[:addr], <<0x87>>)
state
end
def blink(state, hz) when hz == 1.0 do
I2C.write(state[:ref], state[:addr], <<0x85>>)
state
end
def blink(state, hz) when hz == 2.0 do
I2C.write(state[:ref], state[:addr], <<0x83>>)
state
end
def blink(state, _hz) do
I2C.write(state[:ref], state[:addr], <<0x81>>)
state
end
@doc """
Sets display brightness level
## Parameters
- state: an HT16K33 state
- brightness: brightness level, valid [0, 16)
"""
@spec radiate(backpack_state, non_neg_integer) :: backpack_state
def radiate(state, brightness) when 0 <= brightness and brightness < 16 do
I2C.write(state[:ref], state[:addr], <<0xE0 ||| brightness>>)
state
end
def radiate(state, _brightness), do: state
@doc """
Write a character to a position
## Parameters
- state: an HT16K33 state
- pos: on which 14-segment display to render the character, [0, 3]
- code: bitstream code to output
"""
@spec write_char_to(backpack_state, 0..3, bitstring) :: backpack_state
def write_char_to(state, pos, code) when 0 <= pos and pos < 4 do
I2C.write(state[:ref], state[:addr], <<pos * 2>> <> code)
state
end
def write_char_to(state, _pos, _code), do: state
@doc """
Write a string to a position
## Parameters
- state: an HT16K33 state
- pos: on which 14-segment display to start rendering the str, [0, 4-length(str)]
- str: list of chars
"""
@spec write_string_to(backpack_state, 0..3, list(char)) :: backpack_state
def write_string_to(state, pos, str) when length(str) + pos <= 4 and 0 <= pos do
I2C.write(
state[:ref],
state[:addr],
<<pos * 2>> <> List.foldl(str, <<>>, fn x, acc -> acc <> character_for(x) end)
)
state
end
def write_string_to(state, _pos, _str), do: state
@doc """
Add a decimal point to a character bitstream value
## Parameters
- char: a bitstream character
"""
@spec with_decimal_point(bitstring) :: bitstring
def with_decimal_point(char) do
<<f, s>> = char
<<f, s ||| 0x40>>
end
@doc """
Get a bitstream value for a character `char`
## Parameters
- char: a character, currently digits and ['%', 'C', 'H', 'E', 'T'] are supported, and unsupported characters are blank
"""
@spec character_for(char) :: bitstring
def character_for(char) do
case char do
'1' -> <<0b00000110, 0b00000000>>
'2' -> <<0b11011011, 0b00000000>>
'3' -> <<0b11001111, 0b00000000>>
'4' -> <<0b11100110, 0b00000000>>
'5' -> <<0b11101101, 0b00000000>>
'6' -> <<0b11111101, 0b00000000>>
'7' -> <<0b00000001, 0b00001100>>
'8' -> <<0b11111111, 0b00000000>>
'9' -> <<0b11100111, 0b00000000>>
'0' -> <<0b00111111, 0b00000000>>
'%' -> <<0b11100100, 0b00011110>>
'A' -> <<0b11110111, 0b00000000>>
'B' -> <<0b01111001, 0b00100100>>
'C' -> <<0b00111001, 0b00000000>>
'D' -> <<0b00110000, 0b00001001>>
'E' -> <<0b11111001, 0b00000000>>
'F' -> <<0b11110001, 0b00000000>>
'G' -> <<0b10111101, 0b00100000>>
'H' -> <<0b11110110, 0b00000000>>
'I' -> <<0b00001001, 0b00010010>>
'J' -> <<0b00001110, 0b00001000>>
'K' -> <<0b01110000, 0b00100100>>
'L' -> <<0b00111000, 0b00000000>>
'M' -> <<0b00110110, 0b00000101>>
'N' -> <<0b00110110, 0b00100001>>
'O' -> <<0b00111111, 0b00000000>>
'P' -> <<0b11110011, 0b00000000>>
'Q' -> <<0b00111111, 0b00100000>>
'R' -> <<0b11110011, 0b00100000>>
'S' -> <<0b00001001, 0b00100001>>
'T' -> <<0b00000001, 0b00010010>>
'U' -> <<0b00111110, 0b00000000>>
'V' -> <<0b00110000, 0b00001100>>
'W' -> <<0b00110110, 0b00101000>>
'X' -> <<0b00000000, 0b00101101>>
'Y' -> <<0b00000000, 0b00010101>>
'Z' -> <<0b00001001, 0b00001100>>
'a' -> <<0b11110111, 0b00000000>>
'b' -> <<0b01111001, 0b00100100>>
'c' -> <<0b00111001, 0b00000000>>
'd' -> <<0b00110000, 0b00001001>>
'e' -> <<0b11111001, 0b00000000>>
'f' -> <<0b11110001, 0b00000000>>
'g' -> <<0b10111101, 0b00100000>>
'h' -> <<0b11110110, 0b00000000>>
'i' -> <<0b00001001, 0b00010010>>
'j' -> <<0b00001110, 0b00001000>>
'k' -> <<0b01110000, 0b00100100>>
'l' -> <<0b00111000, 0b00000000>>
'm' -> <<0b00110110, 0b00000101>>
'n' -> <<0b00110110, 0b00100001>>
'o' -> <<0b00111111, 0b00000000>>
'p' -> <<0b11110011, 0b00000000>>
'q' -> <<0b00111111, 0b00100000>>
'r' -> <<0b11110011, 0b00100000>>
's' -> <<0b00001001, 0b00100001>>
't' -> <<0b00000001, 0b00010010>>
'u' -> <<0b00111110, 0b00000000>>
'v' -> <<0b00110000, 0b00001100>>
'w' -> <<0b00110110, 0b00101000>>
'x' -> <<0b00000000, 0b00101101>>
'y' -> <<0b00000000, 0b00010101>>
'z' -> <<0b00001001, 0b00001100>>
' ' -> <<0b00000000, 0b00000000>>
_ -> <<0b00000000, 0b00000000>>
end
end
end
|
lib/hT16K33.ex
| 0.858748 | 0.444324 |
hT16K33.ex
|
starcoder
|
defimpl Timex.Protocol, for: NaiveDateTime do
@moduledoc """
This module implements Timex functionality for NaiveDateTime
"""
alias Timex.AmbiguousDateTime
import Timex.Macros
@epoch_seconds :calendar.datetime_to_gregorian_seconds({{1970, 1, 1}, {0, 0, 0}})
def now(), do: NaiveDateTime.utc_now()
def to_julian(%NaiveDateTime{year: y, month: m, day: d}) do
Timex.Calendar.Julian.julian_date(y, m, d)
end
def to_gregorian_seconds(date) do
with {s, _} <- Timex.NaiveDateTime.to_gregorian_seconds(date), do: s
end
def to_gregorian_microseconds(%NaiveDateTime{} = date) do
with {s, us} <- Timex.NaiveDateTime.to_gregorian_seconds(date) do
s * (1_000 * 1_000) + us
end
end
def to_unix(date) do
with {s, _} <- Timex.NaiveDateTime.to_gregorian_seconds(date) do
s - @epoch_seconds
end
end
def to_date(date), do: NaiveDateTime.to_date(date)
def to_datetime(%NaiveDateTime{} = naive, timezone) do
with %DateTime{} = datetime <- Timex.Timezone.convert(naive, timezone) do
datetime
else
%AmbiguousDateTime{} = datetime ->
datetime
{:error, _} = err ->
err
end
end
def to_naive_datetime(%NaiveDateTime{} = date), do: date
def to_erl(%NaiveDateTime{} = d), do: NaiveDateTime.to_erl(d)
def century(%NaiveDateTime{:year => year}), do: Timex.century(year)
def is_leap?(%NaiveDateTime{year: year}), do: :calendar.is_leap_year(year)
def beginning_of_day(%NaiveDateTime{:microsecond => {_, precision}} = datetime) do
%{datetime | :hour => 0, :minute => 0, :second => 0, :microsecond => {0, precision}}
end
def end_of_day(%NaiveDateTime{microsecond: {_, precision}} = datetime) do
us = Timex.DateTime.Helpers.construct_microseconds(999_999, precision)
%{datetime | :hour => 23, :minute => 59, :second => 59, :microsecond => us}
end
def beginning_of_week(%NaiveDateTime{microsecond: {_, precision}} = date, weekstart) do
with ws when is_atom(ws) <- Timex.standardize_week_start(weekstart) do
date = Timex.Date.beginning_of_week(date, ws)
Timex.NaiveDateTime.new!(date.year, date.month, date.day, 0, 0, 0, {0, precision})
end
end
def end_of_week(%NaiveDateTime{microsecond: {_, precision}} = date, weekstart) do
with ws when is_atom(ws) <- Timex.standardize_week_start(weekstart) do
date = Timex.Date.end_of_week(date, ws)
us = Timex.DateTime.Helpers.construct_microseconds(999_999, precision)
Timex.NaiveDateTime.new!(date.year, date.month, date.day, 23, 59, 59, us)
end
end
def beginning_of_year(%NaiveDateTime{year: year, microsecond: {_, precision}}) do
Timex.NaiveDateTime.new!(year, 1, 1, 0, 0, 0, {0, precision})
end
def end_of_year(%NaiveDateTime{year: year, microsecond: {_, precision}}) do
us = Timex.DateTime.Helpers.construct_microseconds(999_999, precision)
Timex.NaiveDateTime.new!(year, 12, 31, 23, 59, 59, us)
end
def beginning_of_quarter(%NaiveDateTime{month: month} = date) do
month = 1 + 3 * (Timex.quarter(month) - 1)
beginning_of_month(%{date | :month => month, :day => 1})
end
def end_of_quarter(%NaiveDateTime{month: month} = date) do
month = 3 * Timex.quarter(month)
end_of_month(%{date | :month => month, :day => 1})
end
def beginning_of_month(%NaiveDateTime{year: year, month: month, microsecond: {_, precision}}),
do: Timex.NaiveDateTime.new!(year, month, 1, 0, 0, 0, {0, precision})
def end_of_month(%NaiveDateTime{year: year, month: month, microsecond: {_, precision}} = date) do
day = days_in_month(date)
us = Timex.DateTime.Helpers.construct_microseconds(999_999, precision)
Timex.NaiveDateTime.new!(year, month, day, 23, 59, 59, us)
end
def quarter(%NaiveDateTime{year: y, month: m, day: d}),
do: Calendar.ISO.quarter_of_year(y, m, d)
def days_in_month(%NaiveDateTime{year: y, month: m}), do: Timex.days_in_month(y, m)
def week_of_month(%NaiveDateTime{year: y, month: m, day: d}),
do: Timex.week_of_month(y, m, d)
def weekday(%NaiveDateTime{year: y, month: m, day: d}),
do: :calendar.day_of_the_week({y, m, d})
def weekday(%NaiveDateTime{} = date, weekstart),
do: Timex.Date.day_of_week(date, weekstart)
def day(%NaiveDateTime{} = date), do: Date.day_of_year(date)
def is_valid?(%NaiveDateTime{
:year => y,
:month => m,
:day => d,
:hour => h,
:minute => min,
:second => sec
}) do
:calendar.valid_date({y, m, d}) and Timex.is_valid_time?({h, min, sec})
end
def iso_week(%NaiveDateTime{:year => y, :month => m, :day => d}),
do: Timex.iso_week(y, m, d)
def from_iso_day(%NaiveDateTime{year: year} = date, day) when is_day_of_year(day) do
{year, month, day_of_month} = Timex.Helpers.iso_day_to_date_tuple(year, day)
%{date | :year => year, :month => month, :day => day_of_month}
end
def set(%NaiveDateTime{} = date, options) do
validate? = Keyword.get(options, :validate, true)
Enum.reduce(options, date, fn
_option, {:error, _} = err ->
err
option, result ->
case option do
{:validate, _} ->
result
{:datetime, {{y, m, d}, {h, min, sec}}} ->
if validate? do
%{
result
| :year => Timex.normalize(:year, y),
:month => Timex.normalize(:month, m),
:day => Timex.normalize(:day, {y, m, d}),
:hour => Timex.normalize(:hour, h),
:minute => Timex.normalize(:minute, min),
:second => Timex.normalize(:second, sec)
}
else
%{
result
| :year => y,
:month => m,
:day => d,
:hour => h,
:minute => min,
:second => sec
}
end
{:date, {y, m, d}} ->
if validate? do
{yn, mn, dn} = Timex.normalize(:date, {y, m, d})
%{result | :year => yn, :month => mn, :day => dn}
else
%{result | :year => y, :month => m, :day => d}
end
{:date, %Date{} = d} ->
Timex.set(result, date: {d.year, d.month, d.day})
{:time, {h, m, s}} ->
if validate? do
%{
result
| :hour => Timex.normalize(:hour, h),
:minute => Timex.normalize(:minute, m),
:second => Timex.normalize(:second, s)
}
else
%{result | :hour => h, :minute => m, :second => s}
end
{:time, {h, m, s, ms}} ->
if validate? do
%{
result
| :hour => Timex.normalize(:hour, h),
:minute => Timex.normalize(:minute, m),
:second => Timex.normalize(:second, s),
:microsecond => Timex.normalize(:microsecond, ms)
}
else
%{result | :hour => h, :minute => m, :second => s, :microsecond => ms}
end
{:time, %Time{} = t} ->
Timex.set(result, time: {t.hour, t.minute, t.second, t.microsecond})
{:day, d} ->
if validate? do
%{result | :day => Timex.normalize(:day, {result.year, result.month, d})}
else
%{result | :day => d}
end
{name, val} when name in [:year, :month, :hour, :minute, :second, :microsecond] ->
if validate? do
Map.put(result, name, Timex.normalize(name, val))
else
Map.put(result, name, val)
end
{name, _} when name in [:timezone] ->
result
{option_name, _} ->
{:error, {:bad_option, option_name}}
end
end)
end
def shift(%NaiveDateTime{} = datetime, shifts) when is_list(shifts) do
with {:ok, dt} <- DateTime.from_naive(datetime, "Etc/UTC", Timex.Timezone.Database) do
case Timex.shift(dt, shifts) do
{:error, _} = err ->
err
%AmbiguousDateTime{after: datetime} ->
DateTime.to_naive(datetime)
%DateTime{} = datetime ->
DateTime.to_naive(datetime)
end
end
end
end
|
lib/datetime/naivedatetime.ex
| 0.773901 | 0.625838 |
naivedatetime.ex
|
starcoder
|
defmodule AWS.Polly do
@moduledoc """
Amazon Polly is a web service that makes it easy to synthesize speech from
text.
The Amazon Polly service provides API operations for synthesizing
high-quality speech from plain text and Speech Synthesis Markup Language
(SSML), along with managing pronunciations lexicons that enable you to get
the best results for your application domain.
"""
@doc """
Deletes the specified pronunciation lexicon stored in an AWS Region. A
lexicon which has been deleted is not available for speech synthesis, nor
is it possible to retrieve it using either the `GetLexicon` or
`ListLexicon` APIs.
For more information, see [Managing
Lexicons](http://docs.aws.amazon.com/polly/latest/dg/managing-lexicons.html).
"""
def delete_lexicon(client, name, input, options \\ []) do
url = "/v1/lexicons/#{URI.encode(name)}"
headers = []
request(client, :delete, url, headers, input, options, 200)
end
@doc """
Returns the list of voices that are available for use when requesting
speech synthesis. Each voice speaks a specified language, is either male or
female, and is identified by an ID, which is the ASCII version of the voice
name.
When synthesizing speech ( `SynthesizeSpeech` ), you provide the voice ID
for the voice you want from the list of voices returned by
`DescribeVoices`.
For example, you want your news reader application to read news in a
specific language, but giving a user the option to choose the voice. Using
the `DescribeVoices` operation you can provide the user with a list of
available voices to select from.
You can optionally specify a language code to filter the available voices.
For example, if you specify `en-US`, the operation returns a list of all
available US English voices.
This operation requires permissions to perform the `polly:DescribeVoices`
action.
"""
def describe_voices(client, options \\ []) do
url = "/v1/voices"
headers = []
request(client, :get, url, headers, nil, options, 200)
end
@doc """
Returns the content of the specified pronunciation lexicon stored in an AWS
Region. For more information, see [Managing
Lexicons](http://docs.aws.amazon.com/polly/latest/dg/managing-lexicons.html).
"""
def get_lexicon(client, name, options \\ []) do
url = "/v1/lexicons/#{URI.encode(name)}"
headers = []
request(client, :get, url, headers, nil, options, 200)
end
@doc """
Returns a list of pronunciation lexicons stored in an AWS Region. For more
information, see [Managing
Lexicons](http://docs.aws.amazon.com/polly/latest/dg/managing-lexicons.html).
"""
def list_lexicons(client, options \\ []) do
url = "/v1/lexicons"
headers = []
request(client, :get, url, headers, nil, options, 200)
end
@doc """
Stores a pronunciation lexicon in an AWS Region. If a lexicon with the same
name already exists in the region, it is overwritten by the new lexicon.
Lexicon operations have eventual consistency, therefore, it might take some
time before the lexicon is available to the SynthesizeSpeech operation.
For more information, see [Managing
Lexicons](http://docs.aws.amazon.com/polly/latest/dg/managing-lexicons.html).
"""
def put_lexicon(client, name, input, options \\ []) do
url = "/v1/lexicons/#{URI.encode(name)}"
headers = []
request(client, :put, url, headers, input, options, 200)
end
@doc """
Synthesizes UTF-8 input, plain text or SSML, to a stream of bytes. SSML
input must be valid, well-formed SSML. Some alphabets might not be
available with all the voices (for example, Cyrillic might not be read at
all by English voices) unless phoneme mapping is used. For more
information, see [How it
Works](http://docs.aws.amazon.com/polly/latest/dg/how-text-to-speech-works.html).
"""
def synthesize_speech(client, input, options \\ []) do
url = "/v1/speech"
headers = []
case request(client, :post, url, headers, input, options, 200) do
{:ok, body, response} ->
if !is_nil(response.headers["Content-Type"]) do
body = %{body | "ContentType" => response.headers["Content-Type"]}
end
if !is_nil(response.headers["x-amzn-RequestCharacters"]) do
body = %{body | "RequestCharacters" => response.headers["x-amzn-RequestCharacters"]}
end
{:ok, body, response}
result ->
result
end
end
defp request(client, method, url, headers, input, options, success_status_code) do
client = %{client | service: "polly"}
host = get_host("polly", client)
url = get_url(host, url, client)
headers = Enum.concat([{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"}],
headers)
payload = encode_payload(input)
headers = AWS.Request.sign_v4(client, method, url, headers, payload)
perform_request(method, url, payload, headers, options, success_status_code)
end
defp perform_request(method, url, payload, headers, options, nil) do
case HTTPoison.request(method, url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, response=%HTTPoison.Response{status_code: 202, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, response=%HTTPoison.Response{status_code: 204, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
reason = Poison.Parser.parse!(body)["message"]
{:error, reason}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp perform_request(method, url, payload, headers, options, success_status_code) do
case HTTPoison.request(method, url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: ^success_status_code, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: ^success_status_code, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
reason = Poison.Parser.parse!(body)["message"]
{:error, reason}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, url, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}#{url}/"
end
defp encode_payload(input) do
if input != nil do
Poison.Encoder.encode(input, [])
else
""
end
end
end
|
lib/aws/polly.ex
| 0.864811 | 0.415077 |
polly.ex
|
starcoder
|
defmodule Credo.Code.Scope do
@moduledoc """
This module provides helper functions to determine the scope name at a certain
point in the analysed code.
"""
alias Credo.Code.Block
alias Credo.Code.Module
@def_ops [:def, :defp, :defmacro]
def mod_name(nil), do: nil
def mod_name(scope_name) do
names = scope_name |> String.split(".")
if names |> List.last |> String.match?(~r/^[a-z]/) do
names |> Enum.slice(0..length(names) - 2) |> Enum.join(".")
else
scope_name
end
end
@doc """
Returns the scope for the given line as a tuple consisting of the call to
define the scope (`:defmodule`, `:def`, `:defp` or `:defmacro`) and the
name of the scope.
Examples:
{:defmodule, "Foo.Bar"}
{:def, "Foo.Bar.baz"}
"""
def name(_ast, [line: 0]), do: nil
def name(ast, [line: line]) do
result =
case find_scope(ast, line, [], nil) do
nil -> name(ast, line: line - 1)
{op, list} -> {op, Enum.join(list, ".")}
end
result || {nil, ""}
end
# Returns a List for the scope name
defp find_scope({:__block__, _meta, arguments}, line, name_list, last_op) do
find_scope(arguments, line, name_list, last_op)
end
defp find_scope({:do, arguments}, line, name_list, last_op) do
find_scope(arguments, line, name_list, last_op)
end
defp find_scope({:else, arguments}, line, name_list, last_op) do
find_scope(arguments, line, name_list, last_op)
end
defp find_scope(list, line, name_list, last_op) when is_list(list) do
list
|> Enum.find_value(&find_scope(&1, line, name_list, last_op))
end
defp find_scope({:defmodule, meta, [{:__aliases__, _, module_name}, arguments]}, line, name_list, _last_op) do
name_list = name_list ++ module_name
cond do
meta[:line] == line -> {:defmodule, name_list}
meta[:line] > line -> nil
true -> arguments |> Block.do_block_for! |> find_scope(line, name_list, :defmodule)
end
end
for op <- @def_ops do
defp find_scope({unquote(op) = op, meta, arguments} = ast, line, name_list, _last_op) when is_list(arguments) do
fun_name = Module.def_name(ast)
name_list = name_list ++ [fun_name]
cond do
meta[:line] == line -> {op, name_list}
meta[:line] > line -> nil
true -> arguments |> Block.do_block_for! |> find_scope(line, name_list, op)
end
end
end
defp find_scope({_atom, meta, arguments}, line, name_list, last_op) do
if meta[:line] == line do
{last_op, name_list}
else
find_scope(arguments, line, name_list, last_op)
end
end
defp find_scope(_value, _line, _name_list, _last_op) do
nil
end
end
|
lib/credo/code/scope.ex
| 0.605916 | 0.423279 |
scope.ex
|
starcoder
|
defmodule Exenv do
@moduledoc """
Loads env vars using an adapter-based approach.
Exenv dynamically assigns env vars on application start using whatever adapters
have been configured to run. By default, Exenv is setup to use the included
`Exenv.Adapters.Dotenv` adapter - loading env vars from a `.env` file in your
projects directory on startup.
## Configuration
If you need to further configure Exenv - it is typically done via application config.
config :exenv, [
adapters: [
{Exenv.Adapters.Dotenv, [file: "path/to/.env"]}
]
]
You can simply list the adapters and any options you would like to pass to it
via `{MyAdapter, opts}` - where `opts` is a keyword list of options defined by
the adapter.
Alternatively, you can also configure Exenv to be used via your own supervision
tree. In this case simply add the following to your config:
config :exenv, start_on_application: false
You can then add Exenv to your supervisor.
children = [
{Exenv, [adapters: [{Exenv.Adapters.Dotenv, [file: "path/to/.env"]}]]}
]
## Encryption
Exenv has support for encryption out of the box. This allows you to keep an
encrypted secrets file checked into your repository. Please see `Exenv.Encryption`
for more details.
"""
use Application
alias Exenv.Utils
@type on_load :: [{Exenv.Adapter.t(), Exenv.Adapter.result()}]
@impl true
@spec start(any(), any()) :: {:ok, pid()}
def start(_type, _args) do
if Exenv.Config.get(:start_on_application) do
start_link()
else
Supervisor.start_link([], strategy: :one_for_one)
end
end
@doc """
Starts the Exenv process.
"""
@spec start_link(any()) :: Supervisor.on_start()
def start_link(opts \\ []) do
Exenv.Supervisor.start_link(opts)
end
@doc false
def child_spec(opts \\ []) do
%{
id: Exenv.Supervisor,
start: {Exenv.Supervisor, :start_link, [opts]}
}
end
@doc """
Returns `{:ok, binary}`, where binary is a binary data object that contains the
contents of path, or `{:error, reason}` if an error occurs.
You can optionally pass an mfa `{module, function, args}` that will be evaluated
and should return the intended path. This allows for runtime setup.
## Options
* `:encryption` - options used to decrypt the binary result if required.
```
# Decrypts the file using the MASTER_KEY env var
[encryption: true]
# Decrypts the file using the master key file
[encryption: [master_key: "/path/to/master.key"]]
```
"""
@spec read_file(binary() | mfa(), keyword()) :: {:ok, binary} | {:error, any()}
def read_file(path_or_mfa, opts \\ []) do
try do
path = Utils.build_path(path_or_mfa)
encryption = Keyword.get(opts, :encryption, false)
file =
if encryption do
encryption = if is_list(encryption), do: encryption, else: []
encryption
|> Keyword.get(:master_key)
|> Exenv.Encryption.get_master_key!()
|> Exenv.Encryption.decrypt_secrets!(path)
else
File.read!(path)
end
{:ok, file}
rescue
error -> {:error, error}
end
end
@doc """
Loads all env vars using the adapters defined within our config.
"""
@spec load() :: on_load()
def load do
Exenv.Server.load()
end
@doc """
Loads all env vars using the adapter config provided.
"""
@spec load(adapters :: [Exenv.Adapter.config()]) :: on_load()
def load(adapters) when is_list(adapters) do
for {adapter, opts} <- adapters do
result = load(adapter, opts)
{adapter, result}
end
end
@doc """
Loads env vars using the adapter and options provided.
"""
@spec load(adapter :: Exenv.Adapter.t(), opts :: keyword()) :: Exenv.Adapter.result()
def load(adapter, opts) when is_atom(adapter) and is_list(opts) do
apply(adapter, :load, [opts])
end
end
|
lib/exenv.ex
| 0.873889 | 0.612527 |
exenv.ex
|
starcoder
|
defmodule Grizzly.ZWave.Commands.NetworkManagementMultiChannelEndPointReport do
@moduledoc """
Command use to advertise the number of Multi Channel End Points
Params:
* `:seq_number` - the sequence number for this command
* `:node_id` - the node id in question
* `:individual_end_points` - the number of individual end points this device
supports
* `:aggregated_end_points` - the number of aggregated end points this device
supports (optional, defaults to 0)
Aggregated end points are used for reporting accumulated consumption of a
physical resource via Meter and Multilevel Sensor Command Class. For example,
if there is a power switch with 3 binary switches it could support support 1
aggregated endpoint that can report the total accumulated power consumption
for all 3 switches on the power strip.
For more information on Z-Wave Multi Channel see:
https://www.silabs.com/documents/public/application-notes/APL12955-Z-Wave-Multi-Channel-Basics.pdf
"""
@behaviour Grizzly.ZWave.Command
alias Grizzly.ZWave
alias Grizzly.ZWave.{Command, NodeId}
alias Grizzly.ZWave.CommandClasses.NetworkManagementProxy
@type param() ::
{:seq_number, ZWave.seq_number()}
| {:node_id, ZWave.node_id()}
| {:individual_end_points, 0..127}
| {:aggregated_end_points, 0..127}
@impl Grizzly.ZWave.Command
@spec new([param()]) :: {:ok, Command.t()}
def new(params \\ []) do
command = %Command{
name: :network_management_multi_channel_end_point_report,
command_byte: 0x06,
command_class: NetworkManagementProxy,
params: params,
impl: __MODULE__
}
{:ok, command}
end
@impl Grizzly.ZWave.Command
def encode_params(command, encode_opts \\ []) do
seq_number = Command.param!(command, :seq_number)
node_id = Command.param!(command, :node_id)
individual_end_points = Command.param!(command, :individual_end_points)
aggregated_end_points = Command.param(command, :aggregated_end_points, 0)
# first byte is 0x00 as it is marked as reserved in the Z-Wave specification
end_points_bin = <<0x00, 0::1, individual_end_points::7, 0::1, aggregated_end_points::7>>
case Keyword.get(encode_opts, :command_class_version, 4) do
4 ->
<<seq_number, NodeId.encode_extended(node_id, delimiter: end_points_bin)::binary>>
v when v < 4 ->
<<seq_number, node_id, end_points_bin::binary>>
end
end
@impl Grizzly.ZWave.Command
def decode_params(<<seq_number, params::binary>>) do
<<_node_id, _reserved_byte, _reserved1::1, individual_end_points::7, _reversed2::1,
aggregated_end_points::7, _rest::binary>> = params
{:ok,
[
seq_number: seq_number,
node_id: NodeId.parse(params, delimiter_size: 3),
individual_end_points: individual_end_points,
aggregated_end_points: aggregated_end_points
]}
end
end
|
lib/grizzly/zwave/commands/network_management_multi_channel_end_point_report.ex
| 0.889742 | 0.50653 |
network_management_multi_channel_end_point_report.ex
|
starcoder
|
defmodule Puid.Entropy do
@moduledoc """
[Entropy](https://en.wikipedia.org/wiki/Entropy_(information_theory)) related calculations
The implementation is based on mathematical approximations to the solution of what is often
referred to as the [Birthday
Problem](https://en.wikipedia.org/wiki/Birthday_problem#Calculating_the_probability).
"""
alias Puid.CharSet
@doc """
Entropy bits for generating a `total` number of instances with the given `risk` of repeat
The total size of the instance pool is 2<sup>bits</sup>.
## Example
iex> Puid.Entropy.bits(10.0e6, 1.0e12)
85.37013046707142
"""
@spec bits(pos_integer, pos_integer) :: float()
def bits(1, _), do: 0
def bits(_, 1), do: 0
def bits(total, risk) when is_number(total) and is_number(risk) do
n =
cond do
total < 1000 ->
:math.log2(total) + :math.log2(total - 1)
true ->
2 * :math.log2(total)
end
n + :math.log2(risk) - 1
end
@doc """
Entropy bits of a string of `len` generated from characters `charset`, where `charset` is
either an pre-defined `Puid.CharSet` or a string of unique characters.
The character set must be comprised of unique symbols, and it is assumed each symbol in the
character set has equal probability of occurrence (which maximizes entropy).
## Example
iex> Puid.Entropy.bits_for_len(14, :alphanum)
{:ok, 83}
iex> Puid.Entropy.bits_for_len(14, "dingosky")
{:ok, 42}
"""
@spec bits_for_len(non_neg_integer(), atom() | String.t()) ::
{:ok, non_neg_integer()} | {:error, Error.reason()}
def bits_for_len(len, charset) when -1 < len and (is_atom(charset) or is_binary(charset)) do
case bits_per_char(charset) do
{:ok, ebpc} ->
{:ok, (len * ebpc) |> trunc()}
error ->
error
end
end
@doc """
Same as `Puid.Entropy.bits_for_len/2` but either returns the integer __bits__ or raises a
`Puid.Error`
## Example
iex> Puid.Entropy.bits_for_len!(14, :alphanum)
83
iex> Puid.Entropy.bits_for_len!(14, "dingosky")
42
"""
@spec bits_for_len!(non_neg_integer(), atom() | String.t()) :: non_neg_integer()
def bits_for_len!(len, charset) when -1 < len and (is_atom(charset) or is_binary(charset)) do
case(bits_for_len(len, charset)) do
{:ok, ebpc} ->
ebpc
{:error, reason} ->
raise Puid.Error, reason
end
end
@deprecated "Use Puid.Entropy.bits_for_len"
defdelegate bits_for_length(len, charset), to: Puid.Entropy, as: :bits_for_len
@deprecated "Use Puid.Entropy.bits_for_len!"
defdelegate bits_for_length!(len, charset), to: Puid.Entropy, as: :bits_for_len!
@doc """
Entropy bits per character where `charset` is either an pre-defined `Puid.CharSet` or a string of
unique characters.
The character set must be comprised of unique symbols, and it is assumed each symbol in the
character set has equal probability of occurrence (which maximizes entropy).
Returns `{:ok, bits}`; or `{:error, reason}` if `arg` is either an unrecognized pre-defined
`Puid.CharSet` or a string of non-unique characters.
## Example
iex> Puid.Entropy.bits_per_char(:alphanum)
{:ok, 5.954196310386875}
iex> Puid.Entropy.bits_per_char("dingosky")
{:ok, 3.0}
"""
@spec bits_per_char(atom() | String.t()) :: {:ok, float()} | {:error, Error.reason()}
def bits_per_char(charset)
def bits_per_char(charset) when is_atom(charset) do
case CharSet.chars(charset) do
:undefined ->
{:error, "Invalid: charset not recognized"}
chars ->
{:ok, ebpc(chars)}
end
end
def bits_per_char(chars) when is_binary(chars) do
if CharSet.unique?(chars),
do: {:ok, ebpc(chars)},
else: {:error, "Invalid: chars not unique"}
end
@doc """
Same as `bits_per_char/1` but either returns the `bits` or raises a `Puid.Error`
## Example
iex> Puid.Entropy.bits_per_char!(:alphanum)
5.954196310386875
Puid.Entropy.bits_per_char!("dingosky")
3.0
"""
@spec bits_per_char!(atom() | String.t()) :: float()
def bits_per_char!(charset)
def bits_per_char!(arg) do
case bits_per_char(arg) do
{:ok, ebpc} ->
ebpc
{:error, reason} ->
raise Puid.Error, reason
end
end
defp ebpc(chars) do
chars |> String.length() |> :math.log2()
end
@doc """
Length needed for a string generated from `charset` to have `bits` of entropy.
The character set must be comprised of unique symbols, and it is assumed each symbol in the
character set has equal probability of occurrence (which maximizes entropy).
## Example
iex> Puid.Entropy.len_for_bits(128, :alphanum)
{:ok, 22}
iex> Puid.Entropy.len_for_bits(128, "dingosky")
{:ok, 43}
"""
@spec len_for_bits(non_neg_integer(), atom() | String.t()) ::
{:ok, non_neg_integer()} | {:error, Error.reason()}
def len_for_bits(bits, charset) when -1 < bits and (is_atom(charset) or is_binary(charset)) do
case bits_per_char(charset) do
{:ok, ebpc} ->
{:ok, (bits / ebpc) |> :math.ceil() |> round()}
error ->
error
end
end
@doc """
Same as `Puid.Entropy.len_for_bits/2` but either returns the integer __len__ or raises a
`Puid.Error`
## Example
iex> Puid.Entropy.len_for_bits!(128, :alphanum)
22
iex> Puid.Entropy.len_for_bits!(128, "dingosky")
43
"""
@spec len_for_bits!(non_neg_integer(), atom() | String.t()) :: non_neg_integer()
def len_for_bits!(bits, charset) when -1 < bits and (is_atom(charset) or is_binary(charset)) do
case len_for_bits(bits, charset) do
{:ok, len} ->
len
{:error, reason} ->
raise Puid.Error, reason
end
end
end
|
lib/puid/entropy.ex
| 0.933817 | 0.757884 |
entropy.ex
|
starcoder
|
defmodule Hangman.Pass do
@moduledoc """
Module defines types `Pass.key` and `Pass.t`
Returns result of pass runs as distinguished by initial
`:start` or subsequent `:guessing` modes.
Pass data is a group of the pass size, the letter frequency tally,
and relevant data on final word information.
Given a new `Hangman` game, initially the words pass is the size of all words
in the dictionary of secret length k. As each round proceeds, this is reduced by the
`Hangman` pattern sequence. It is these remaining possible word set instances
that are stored in the cache.
After each player has made their `Hangman` round guess, the resultant reduced
words `pass` data is stored into the `Pass.Cache` for access on the
subsequent round. The expired `pass` data from stale rounds is subsequently
removed from the `cache`.
"""
alias Hangman.{Reduction, Pass, Pass.Cache, Words, Counter, Dictionary}
defstruct size: 0, tally: %{}, possible: "", last_word: ""
@typedoc "Defines word `pass` type"
@type t :: %__MODULE__{}
@typedoc "Defines word `pass` key type"
@type key ::
{id :: String.t() | tuple, game_num :: non_neg_integer, round_num :: non_neg_integer}
@doc """
Result routine retrieves the `pass` size, tally, possible words,
and other data given these cache `keys`. Relies on either the Dictionary
Cache or the Reduction Engine to compute new pass data
* `:start` - this is the initial game start `pass`, so we
request the data from the `Dictionary.Cache`. The data is stored into
the `Pass.Cache` via `Pass.Cache.Writer.write/2`. Returns `pass` data type.
* `:guessing` - retrieves the pass data from the last
player round and relies on `Reduction.Engine.reduce/3` to reduce the possible
`Hangman` words set with `reduce_key`. When the reduction is finished, we
write the data back to the `Pass.Cache` and return the new `pass` data.
"""
@spec result(atom, pass_key :: key, reduce_key :: Reduction.key()) :: {key, t}
def result(:start, {id, game_no, round_no} = pass_key, reduce_key)
when (is_binary(id) or is_tuple(id)) and is_number(game_no) and is_number(round_no) do
# Asserts
{:ok, true} = Keyword.fetch(reduce_key, :start)
{:ok, length_key} = Keyword.fetch(reduce_key, :secret_length)
# Since this is the first pass, grab the words and tally from
# the Dictionary Cache
# Subsequent round lookups will be from the pass table
words = %Words{} = Dictionary.lookup(:words, length_key)
tally = %Counter{} = Dictionary.lookup(:tally, length_key)
pass_size = Words.count(words)
pass_info = %Pass{size: pass_size, tally: tally, last_word: ""}
# Store pass info into ets table for round 2 (next pass)
# Allow writer engine to execute (and distribute) as necessary
next_pass_key = increment_key(pass_key)
Cache.put(next_pass_key, words)
{pass_key, pass_info}
end
def result(:guessing, {id, game_no, round_no} = pass_key, reduce_key)
when (is_binary(id) or is_tuple(id)) and is_number(game_no) and is_number(round_no) do
{:ok, exclusion_set} = Keyword.fetch(reduce_key, :guessed_letters)
{:ok, regex_key} = Keyword.fetch(reduce_key, :regex_match_key)
# Send pass and reduce information off to Engine server
# to execute (and distribute) as appropriate
# operation subsequently writes back to pass_cache
pass_info = Reduction.Engine.reduce(pass_key, regex_key, exclusion_set)
{pass_key, pass_info}
end
@doc """
Removes pass key from ets
"""
@spec delete(key) :: :ok
def delete({id, game_no, round_no} = pass_key)
when (is_binary(id) or is_tuple(id)) and is_number(game_no) and is_number(round_no) do
Cache.delete(pass_key)
end
# HELPERS
@doc "Helper to increment pass key"
@spec increment_key(key) :: key
def increment_key({id, game_num, round_num} = _key) do
{id, game_num, round_num + 1}
end
end
|
lib/hangman/pass.ex
| 0.880116 | 0.836955 |
pass.ex
|
starcoder
|
defmodule FloUI.Scrollable.ScrollBar do
@moduledoc """
Scroll bars are meant to be used within the Scrollable.Container component, but you can use them to build your own scrollable containers.
The following events are emitted.
``` elixir
{:register_scroll_bar, direction, scroll_bar_state}
{:update_scroll_position, direction, scroll_position}
{:scroll_bar_state_changed, direction, scroll_bar_state}
```
additionally you can cast a vector2 offset to a scroll bar
``` elixir
GenServer.cast(scroll_bar_pid, {:update_cursor_scroll, offset})
```
data is an object in the form of
``` elixir
%{
direction: :vertical,
content_size: {200, 200},
width: 15,
height: 500,
scroll_position: {0, 0}
}
```
The following options are accepted
``` elixir
[
show_buttons: true,
theme: Scenic.Primitive.Style.Theme.preset(:dark),
border: 1,
radius: 3,
thickness: 15
]
```
"""
alias Scenic.Graph
alias FloUI.Scrollable.Direction
alias FloUI.Scrollable.Drag
alias FloUI.Scrollable.Wheel
alias FloUI.Scrollable.PositionCap
use SnapFramework.Component,
name: :scroll_bar,
template: "lib/scrollable/scroll_bar.eex",
controller: FloUI.Scrollable.ScrollBarController,
assigns: [],
opts: []
defcomponent(:scroll_bar, :map)
@default_drag_settings %{mouse_buttons: [:btn_left, :btn_right, :btn_middle]}
@default_thickness 15
@default_radius 3
@default_id :scroll_bar
use_effect([assigns: [scroll_position: :any]],
run: [:on_scroll_position_change]
)
@impl true
def setup(%{assigns: %{data: data, opts: opts}} = scene) do
scene =
assign(scene,
id: opts[:id] || @default_id,
thickness: opts[:thickness] || @default_thickness,
radius: opts[:radius] || @default_radius,
width: Direction.as_horizontal(data.width),
height: Direction.as_vertical(data.height),
direction: data.direction,
content_size: Direction.from_vector_2(data.content_size, data.direction),
frame_size: Direction.from_vector_2({data.width, data.height}, data.direction),
scroll_position: Direction.from_vector_2(data.scroll_position, data.direction),
scroll_bar_slider_background: :released,
last_scroll_position: Direction.from_vector_2(data.scroll_position, data.direction),
scroll_bar_state: %{
scrolling: :idle,
drag_state: Drag.init(opts[:scroll_drag] || @default_drag_settings),
wheel_state: %Wheel{},
scroll_buttons: nil,
pid: self()
}
)
|> init_scroll_buttons
|> init_size
|> init_position_cap
|> init_scroll_bar_background
|> init_scroll_bar_drag_control
|> init_scroll_bar_buttons
send_parent_event(
scene,
{:register_scroll_bar, scene.assigns.direction, scene.assigns.scroll_bar_state}
)
scene
end
@impl true
def bounds(data, _opts) do
{0.0, 0.0, data.width, data.height}
end
@impl true
def process_update(data, _opts, scene) do
{:noreply,
assign(scene,
last_scroll_position: scene.assigns.scroll_position,
scroll_position: Direction.from_vector_2(data.scroll_position, scene.assigns.direction)
)}
end
@impl true
def process_input(
{:cursor_button, {button, action, _, position}},
:scroll_bar_slider_drag_control,
%{assigns: %{direction: direction, scroll_bar_state: scroll_bar_state}} = scene
) do
case action do
0 ->
unrequest_input(scene, [:cursor_pos, :cursor_button])
scrolling = :idle
{_, content_start} = Direction.from_vector_2(scroll_bar_state.drag_state.drag_start_content_position, direction)
{_, drag_start} = Direction.from_vector_2(scroll_bar_state.drag_state.drag_start, direction)
scroll_position =
Direction.from_vector_2(position, direction)
|> Direction.map_horizontal(fn pos -> pos - drag_start + content_start end)
|> Direction.map_vertical(fn pos -> pos - drag_start + content_start end)
scroll_position = local_to_world(scene, scroll_position)
drag_state =
Drag.handle_mouse_release(
scroll_bar_state.drag_state,
button,
position
)
scroll_bar_state = %{
scroll_bar_state |
scrolling: scrolling,
drag_state: drag_state
}
scene = assign(scene,
scroll_bar_state: scroll_bar_state,
last_scroll_position: scene.assigns.scroll_position,
scroll_position: scroll_position
)
send_parent_event(scene, {:update_scroll_position, direction, scroll_position})
{:noreply, scene}
1 ->
request_input(scene, [:cursor_pos, :cursor_button])
scrolling = :dragging
drag_state =
Drag.handle_mouse_click(
scroll_bar_state.drag_state,
button,
position,
local_scroll_position_vector2(scene)
)
scroll_bar_state = %{
scroll_bar_state |
scrolling: scrolling,
drag_state: drag_state
}
scene = assign(scene,
scroll_bar_state: scroll_bar_state
)
{:noreply, scene}
end
end
def process_input(
{:cursor_pos, position},
_,
%{assigns: %{direction: direction, scroll_bar_state: scroll_bar_state}} =
scene
) do
{_, content_start} = Direction.from_vector_2(scroll_bar_state.drag_state.drag_start_content_position, direction)
{_, drag_start} = Direction.from_vector_2(scroll_bar_state.drag_state.drag_start, direction)
scroll_position =
Direction.from_vector_2(position, direction)
|> Direction.map_horizontal(fn pos -> pos - drag_start + content_start end)
|> Direction.map_vertical(fn pos -> pos - drag_start + content_start end)
scroll_position = local_to_world(scene, scroll_position)
drag_state = Drag.handle_mouse_move(scroll_bar_state.drag_state, position)
scroll_bar_state = %{
scroll_bar_state |
drag_state: drag_state
}
scene =
assign(scene,
scroll_bar_state: scroll_bar_state,
last_scroll_position: scene.assigns.scroll_position,
scroll_position: scroll_position
)
send_parent_event(scene, {:update_scroll_position, direction, scroll_position})
{:noreply, scene}
end
def process_input(
{:cursor_pos, _},
_,
scene
) do
{:noreply, scene}
end
def process_input(
{:cursor_button, {_button, 1, _, _}},
:scroll_bar_slider_background,
scene
) do
{:noreply, scene}
end
def process_input(
{:cursor_button, {_button, 0, _, position}},
:scroll_bar_slider_background,
%{assigns: %{direction: direction}} = scene
) do
scroll_position =
Direction.from_vector_2(position, direction)
|> Direction.map_vertical(fn pos -> pos - button_height(scene) / 2 + pos end)
|> Direction.map_horizontal(fn pos -> pos - button_width(scene) / 2 + pos end)
scroll_position = local_to_world(scene, scroll_position)
scene =
scene
|> assign(
last_scroll_position: scene.assigns.scroll_position,
scroll_position: scroll_position
)
send_parent_event(scene, {:update_scroll_position, direction, scroll_position})
{:noreply, scene}
end
def process_input(
{:cursor_button, {button, 0, _, position}},
nil,
%{assigns: %{direction: direction, scroll_bar_state: scroll_bar_state}} = scene
) do
unrequest_input(scene, [:cursor_pos, :cursor_button])
scrolling = :idle
drag_state =
Drag.handle_mouse_release(
scroll_bar_state.drag_state,
button,
position
)
scroll_bar_state = %{
scroll_bar_state |
scrolling: scrolling,
drag_state: drag_state
}
scene = assign(scene, scroll_bar_state: scroll_bar_state)
send_parent_event(scene, {:scroll_bar_state_changed, direction, scroll_bar_state})
{:noreply, scene}
end
def process_input(
{:cursor_button, {_button, 1, _, _}},
button,
%{assigns: %{direction: direction, scroll_bar_state: scroll_bar_state}} = scene
) do
scroll_buttons = Map.update!(scroll_bar_state.scroll_buttons, button, fn _ -> :pressed end)
scrolling = :scrolling
scroll_bar_state = %{
scroll_bar_state |
scrolling: scrolling,
scroll_buttons: scroll_buttons
}
scene =
scene
|> assign(scroll_bar_state: scroll_bar_state)
send_parent_event(scene, {:scroll_bar_state_changed, direction, scroll_bar_state})
{:noreply, scene}
end
def process_input(
{:cursor_button, {_button, 0, _, _}},
button,
%{assigns: %{direction: direction, scroll_bar_state: scroll_bar_state}} = scene
) do
scroll_buttons = Map.update!(scroll_bar_state.scroll_buttons, button, fn _ -> :released end)
scrolling = :idle
scroll_bar_state = %{
scroll_bar_state |
scrolling: scrolling,
scroll_buttons: scroll_buttons
}
scene =
scene
|> assign(scroll_bar_state: scroll_bar_state)
send_parent_event(scene, {:scroll_bar_state_changed, direction, scroll_bar_state})
{:noreply, scene}
end
@impl true
def process_cast(
{:update_cursor_scroll, {{_, offset_y}, _}},
%{assigns: %{direction: :vertical = direction, scroll_bar_state: scroll_bar_state}} = scene
) do
scene =
if Float.floor(offset_y) == 0 or Float.ceil(offset_y) == 0 do
scroll_bar_state = %{
scroll_bar_state |
wheel_state: Wheel.stop_scrolling(scroll_bar_state.wheel_state, {direction, 0}),
scrolling: :idle,
}
send_parent_event(scene, {:scroll_bar_state_changed, direction, scroll_bar_state})
assign(scene, scroll_bar_state: scroll_bar_state)
else
scroll_bar_state = %{
scroll_bar_state |
wheel_state: Wheel.start_scrolling(scroll_bar_state.wheel_state, {direction, offset_y}),
scrolling: :wheel,
}
send_parent_event(scene, {:scroll_bar_state_changed, direction, scroll_bar_state})
assign(scene, scroll_bar_state: scroll_bar_state)
end
{:noreply, scene}
end
def process_cast(
{:update_cursor_scroll, {{offset_x, _}, _}},
%{assigns: %{direction: :horizontal = direction, scroll_bar_state: scroll_bar_state}} = scene
) do
scene =
if Float.floor(offset_x) == 0 or Float.ceil(offset_x) == 0 do
scroll_bar_state = %{
scroll_bar_state |
wheel_state: Wheel.stop_scrolling(scroll_bar_state.wheel_state, {direction, offset_x}),
scrolling: :idle,
}
send_parent_event(scene, {:scroll_bar_state_changed, direction, scroll_bar_state})
assign(scene, scroll_bar_state: scroll_bar_state)
else
scroll_bar_state = %{
scroll_bar_state |
wheel_state: Wheel.start_scrolling(scroll_bar_state.wheel_state, {direction, offset_x}),
scrolling: :wheel,
}
send_parent_event(scene, {:scroll_bar_state_changed, direction, scroll_bar_state})
assign(scene, scroll_bar_state: scroll_bar_state)
end
{:noreply, scene}
end
@spec init_scroll_bar_background(Scenic.Scene.t) :: Scenic.Scene.t
defp init_scroll_bar_background(
%{assigns: %{direction: :vertical, thickness: thickness, height: height}} = scene
) do
scroll_bar_background_width = thickness
scroll_bar_background_height = Direction.unwrap(height)
scroll_bar_background_pos = scene.assigns.scroll_bar_displacement
assign(
scene,
scroll_bar_background_width: scroll_bar_background_width,
scroll_bar_background_height: scroll_bar_background_height,
scroll_bar_background_pos: scroll_bar_background_pos
)
end
defp init_scroll_bar_background(
%{assigns: %{direction: :horizontal, thickness: thickness, width: width}} = scene
) do
scroll_bar_background_height = thickness
scroll_bar_background_width = Direction.unwrap(width)
scroll_bar_background_pos = scene.assigns.scroll_bar_displacement
assign(
scene,
scroll_bar_background_width: scroll_bar_background_width,
scroll_bar_background_height: scroll_bar_background_height,
scroll_bar_background_pos: scroll_bar_background_pos
)
end
@spec init_scroll_bar_drag_control(Scenic.Scene.t) :: Scenic.Scene.t
defp init_scroll_bar_drag_control(scene) do
scroll_bar_drag_control_width = button_width(scene)
scroll_bar_drag_control_height = button_height(scene)
scroll_bar_drag_control_pos = local_scroll_position_vector2(scene)
assign(
scene,
scroll_bar_drag_control_width: scroll_bar_drag_control_width,
scroll_bar_drag_control_height: scroll_bar_drag_control_height,
scroll_bar_drag_control_pos: scroll_bar_drag_control_pos
)
end
@spec init_scroll_bar_buttons(Scenic.Scene.t) :: Scenic.Scene.t
defp init_scroll_bar_buttons(%{assigns: %{direction: :vertical = direction}} = scene) do
size = scroll_button_size(scene)
{button_2_x, button_2_y} =
Direction.return(size, direction)
|> Direction.add(scene.assigns.width)
|> Direction.add(scene.assigns.height)
|> Direction.to_vector_2()
scroll_button_1_width = size
scroll_button_1_height = size
scroll_button_1_pos = {0, -2}
scroll_button_1_icon_rotation = :math.pi()
scroll_button_1_icon_pos =
{size / 2 - 48 / 2, size / 2 - 48 / 2}
scroll_button_2_width = size
scroll_button_2_height = size
scroll_button_2_pos = {button_2_x, button_2_y + 2}
scroll_button_2_icon_rotation = 0
scroll_button_2_icon_pos =
Scenic.Math.Vector2.add(scroll_button_2_pos, scroll_button_1_icon_pos)
assign(scene,
scroll_button_1_width: scroll_button_1_width,
scroll_button_1_height: scroll_button_1_height,
scroll_button_1_pos: scroll_button_1_pos,
scroll_button_1_icon_rotation: scroll_button_1_icon_rotation,
scroll_button_1_icon_pos: scroll_button_1_icon_pos,
scroll_button_2_width: scroll_button_2_width,
scroll_button_2_height: scroll_button_2_height,
scroll_button_2_pos: scroll_button_2_pos,
scroll_button_2_icon_rotation: scroll_button_2_icon_rotation,
scroll_button_2_icon_pos: scroll_button_2_icon_pos
)
end
defp init_scroll_bar_buttons(%{assigns: %{direction: :horizontal = direction}} = scene) do
size = scroll_button_size(scene)
{button_2_x, button_2_y} =
Direction.return(size, direction)
|> Direction.add(scene.assigns.width)
|> Direction.add(scene.assigns.height)
|> Direction.to_vector_2()
scroll_button_1_width = size
scroll_button_1_height = size
scroll_button_1_pos = {-2, 0}
scroll_button_1_icon_rotation = :math.pi() / 2
scroll_button_1_icon_pos =
{size / 2 - 48 / 2, size / 2 - 48 / 2}
scroll_button_2_width = size
scroll_button_2_height = size
scroll_button_2_pos = {button_2_x + 2, button_2_y}
scroll_button_2_icon_rotation = -:math.pi() / 2
scroll_button_2_icon_pos =
Scenic.Math.Vector2.add(scroll_button_2_pos, scroll_button_1_icon_pos)
assign(scene,
scroll_button_1_width: scroll_button_1_width,
scroll_button_1_height: scroll_button_1_height,
scroll_button_1_pos: scroll_button_1_pos,
scroll_button_1_icon_rotation: scroll_button_1_icon_rotation,
scroll_button_1_icon_pos: scroll_button_1_icon_pos,
scroll_button_2_width: scroll_button_2_width,
scroll_button_2_height: scroll_button_2_height,
scroll_button_2_pos: scroll_button_2_pos,
scroll_button_2_icon_rotation: scroll_button_2_icon_rotation,
scroll_button_2_icon_pos: scroll_button_2_icon_pos
)
end
@spec init_scroll_buttons(Scenic.Scene.t) :: Scenic.Scene.t
defp init_scroll_buttons(%{assigns: %{scroll_bar_state: scroll_bar_state, opts: opts}} = scene) do
scroll_buttons =
if opts[:show_buttons] do
%{
scroll_button_1: :released,
scroll_button_2: :released
}
else
nil
end
assign(scene, scroll_bar_state: %{scroll_bar_state | scroll_buttons: scroll_buttons})
end
@spec init_size(Scenic.Scene.t) :: Scenic.Scene.t
defp init_size(%{assigns: %{scroll_bar_state: %{scroll_buttons: nil}}} = scene) do
assign(scene, scroll_bar_displacement: Direction.to_vector_2(scroll_bar_displacement(scene)))
end
defp init_size(%{assigns: %{width: width, height: height}} = scene) do
displacement = scroll_bar_displacement(scene)
button_size_difference = Direction.map(displacement, &(&1 * 2))
assign(scene,
width: Direction.subtract(width, button_size_difference),
height: Direction.subtract(height, button_size_difference),
scroll_bar_displacement: Direction.to_vector_2(displacement)
)
end
@spec init_position_cap(Scenic.Scene.t) :: Scenic.Scene.t
defp init_position_cap(%{assigns: %{direction: direction}} = scene) do
max =
Direction.return(0, direction)
|> Direction.add(scene.assigns.width)
|> Direction.add(scene.assigns.height)
|> Direction.map_horizontal(fn width ->
width - button_width(scene) + scroll_button_size(scene)
end)
|> Direction.map_vertical(fn height ->
height - button_height(scene) + scroll_button_size(scene)
end)
|> Direction.to_vector_2()
min =
# scroll_bar_displacement(scene)
scene.assigns.scroll_bar_displacement
assign(scene, position_cap: PositionCap.init(%{min: min, max: max}))
end
@spec scroll_button_size(Scenic.Scene.t) :: number
defp scroll_button_size(%{assigns: %{scroll_bar_state: %{scroll_buttons: nil}}}),
do: 0
defp scroll_button_size(%{assigns: %{width: width, height: height, direction: direction}}) do
Direction.return(1, direction)
|> Direction.invert()
|> Direction.multiply(width)
|> Direction.multiply(height)
|> Direction.unwrap()
end
@spec button_width(Scenic.Scene.t) :: number
defp button_width(%{assigns: %{direction: :horizontal}} = scene) do
Direction.divide(scene.assigns.frame_size, scene.assigns.content_size)
|> Direction.multiply(scene.assigns.width)
|> Direction.unwrap()
end
defp button_width(scene), do: scene.assigns.thickness
@spec button_height(Scenic.Scene.t) :: number
defp button_height(%{assigns: %{direction: :vertical}} = scene) do
Direction.divide(scene.assigns.frame_size, scene.assigns.content_size)
|> Direction.multiply(scene.assigns.height)
|> Direction.unwrap()
end
defp button_height(scene), do: scene.assigns.opts[:thickness]
@spec width_factor(Scenic.Scene.t) :: number
defp width_factor(%{assigns: %{content_size: {:horizontal, size}, width: {_, width}}}) do
width / size
end
defp width_factor(_), do: 1
@spec height_factor(Scenic.Scene.t) :: number
defp height_factor(%{assigns: %{content_size: {:vertical, size}, height: {_, height}}}) do
height / size
end
defp height_factor(_), do: 1
# POSITION CALCULATIONS
@spec scroll_bar_displacement(Scenic.Scene.t) :: Direction.t
defp scroll_bar_displacement(%{assigns: %{direction: direction}} = scene) do
scroll_button_size(scene)
|> Direction.return(direction)
end
@spec scroll_position_vector2(Scenic.Scene.t) :: Vector2.t
defp scroll_position_vector2(scene) do
Direction.to_vector_2(scene.assigns.scroll_position)
end
@spec local_scroll_position_vector2(Scenic.Scene.t) :: Vector2.t
defp local_scroll_position_vector2(scene) do
world_to_local(scene, scroll_position_vector2(scene))
end
@spec local_to_world(Scenic.Scene.t, Direction.t() | Vector2.t | number)
:: Direction.t() | Vector2.t | number
defp local_to_world(%{assigns: %{direction: :horizontal}} = scene, {:horizontal, x}) do
{:horizontal, local_to_world(scene, x)}
end
defp local_to_world(%{assigns: %{direction: :vertical}} = scene, {:vertical, y}) do
{:vertical, local_to_world(scene, y)}
end
defp local_to_world(_, {:horizontal, _}), do: {:horizontal, 0}
defp local_to_world(_, {:vertical, _}), do: {:vertical, 0}
defp local_to_world(scene, {x, y}) do
{local_to_world(scene, x), local_to_world(scene, y)}
end
defp local_to_world(_, 0), do: 0
defp local_to_world(%{assigns: %{direction: :horizontal}} = scene, x) do
{x, _} = PositionCap.cap(scene.assigns.position_cap, {x, 0})
-(x - scroll_button_size(scene)) / width_factor(scene)
end
defp local_to_world(%{assigns: %{direction: :vertical}} = scene, y) do
{_, y} = PositionCap.cap(scene.assigns.position_cap, {0, y})
-(y - scroll_button_size(scene)) / height_factor(scene)
end
@spec world_to_local(Scenic.Scene.t, Vector2.t | number) :: number | Vector2.t
defp world_to_local(%{assigns: %{direction: direction}} = scene, {x, y}) do
position =
Direction.from_vector_2({x, y}, direction)
|> Direction.map(&world_to_local(scene, &1))
|> Direction.to_vector_2()
PositionCap.cap(scene.assigns.position_cap, position)
end
defp world_to_local(%{assigns: %{direction: :horizontal}} = scene, x),
do: -x * width_factor(scene) + scroll_button_size(scene)
defp world_to_local(%{assigns: %{direction: :vertical}} = scene, y),
do: -y * height_factor(scene) + scroll_button_size(scene)
end
|
lib/scrollable/scroll_bar.ex
| 0.88311 | 0.6735 |
scroll_bar.ex
|
starcoder
|
defmodule Jerboa.Params do
@moduledoc """
Data structure representing STUN message parameters
"""
alias Jerboa.Format.Header.Type.{Class, Method}
alias Jerboa.Format.Body.Attribute
defstruct [:class, :method, :identifier, attributes: [],
signed?: false, verified?: false]
@typedoc """
The main data structure representing STUN message parameters
The following fields coresspond to the those described in the [STUN
RFC](https://tools.ietf.org/html/rfc5389#section-6):
* `class` is one of request, success or failure response, or indication
* `method` is a STUN (or TURN) message method described in one of the respective RFCs
* `identifier` is a unique transaction identifier
* `attributes` is a list of STUN (or TURN) attributes as described in their
respective RFCs
* `signed?` indicates wheter STUN message was signed with MESSAGE-INTEGRITY
attribute - it isn't important when encoding a message
* `verified?` - indicates wheter MESSAGE-INTEGRIY from STUN message was
successfully verified. Same as `signed?`, it's only relevant when decoding
messages. Note that messages which are `verified?` are also `signed?`, but not
the other way around.
"""
@type t :: %__MODULE__{
class: Class.t,
method: Method.t,
identifier: binary,
attributes: [Attribute.t],
signed?: boolean,
verified?: boolean
}
@doc """
Returns params struct with filled in transaction id
"""
@spec new :: t
def new do
%__MODULE__{class: :request,
method: :binding,
identifier: generate_id()}
end
@doc """
Sets STUN class in params struct
"""
@spec put_class(t, Class.t) :: t
def put_class(params, class) do
%{params | class: class}
end
@doc """
Retrieves class field from params struct
"""
@spec get_class(t) :: Class.t | nil
def get_class(%__MODULE__{class: class}), do: class
@doc """
Sets STUN method in params struct
"""
@spec put_method(t, Method.t) :: t
def put_method(params, method) do
%{params | method: method}
end
@doc """
Retrieves method field from params struct
"""
@spec get_method(t) :: Method.t | nil
def get_method(%__MODULE__{method: method}), do: method
@doc """
Sets STUN transaction identifier in params struct
"""
@spec put_id(t, binary) :: t
def put_id(params, id) do
%{params | identifier: id}
end
@doc """
Retrieves transaction ID from params struct
"""
@spec get_id(t) :: binary | nil
def get_id(%__MODULE__{identifier: id}), do: id
@doc """
Retrieves all attributes from params struct
"""
@spec get_attrs(t) :: [Attribute.t]
def get_attrs(%__MODULE__{attributes: attrs}), do: attrs
@doc """
Retrieves all attributes with given name from params struct
"""
@spec get_attrs(t, attr_name :: module) :: [Attribute.t]
def get_attrs(%__MODULE__{attributes: attrs}, attr_name) do
Enum.filter attrs, & Attribute.name(&1) == attr_name
end
@doc """
Sets whole attributes list in params struct
"""
@spec set_attrs(t, [Attribute.t]) :: t
def set_attrs(params, attrs) do
%{params | attributes: attrs}
end
@doc """
Retrieves single attribute from params struct
Returns `nil` if attribute is not present.
"""
@spec get_attr(t, attr_name :: module) :: Attribute.t | nil
def get_attr(params, attr_name) do
params.attributes
|> Enum.find(fn a -> Attribute.name(a) === attr_name end)
end
@doc """
Puts single attribute in params struct
`:overwrite` option determines wheter attributes of the same type
will be removed and the new one will be put in their place.
Defaults to `true`.
"""
@spec put_attr(t, Attribute.t, overwrite: boolean) :: t
def put_attr(params, attr, opts \\ [overwrite: true])
def put_attr(params, attr, opts) do
attrs =
if opts[:overwrite] do
params.attributes
|> Enum.reject(fn a -> Attribute.name(a) === Attribute.name(attr) end)
else
params.attributes
end
%{params | attributes: [attr | attrs]}
end
@doc """
Adds list of attriubutes to params struct
It's functionally equal to recursively calling `put_attr/2`
with `overwrite: false` on params struct.
"""
@spec put_attrs(t, [Attribute.t]) :: t
def put_attrs(params, attrs) when is_list(attrs) do
Enum.reduce attrs, params,
fn attr, acc -> put_attr(acc, attr, overwrite: false) end
end
@doc """
Generates STUN transaction ID
"""
@spec generate_id :: binary
def generate_id do
:crypto.strong_rand_bytes(12)
end
end
|
lib/jerboa/params.ex
| 0.86267 | 0.52141 |
params.ex
|
starcoder
|
defmodule Ash do
@moduledoc """
Ash Framework

## ALPHA NOTICE
Ash is in alpha. The package version is 1.0.0+, and most of the time that means stable, but in this case it _does not_. The 2.0 release will be the stable release.
## Quick Links
- [Resource DSL Documentation](Ash.Resource.Dsl.html)
- [Api DSL Documentation](Ash.Api.Dsl.html)
- [Api interface documentation](Ash.Api.html)
- [Query Documentation](Ash.Query.html)
- [Changeset Documentation](Ash.Changeset.html)
- [Guides](getting_started.html)
## Introduction
Traditional MVC Frameworks (Rails, Django, .Net, Phoenix, etc) leave it up to the user to build the glue between requests for data (HTTP requests in various forms as well as server-side domain logic) and their respective ORMs. In that space, there is an incredible amount of boilerplate code that must get written from scratch for each application (authentication, authorization, sorting, filtering, sideloading relationships, serialization, etc).
Ash is an opinionated yet configurable framework designed to reduce boilerplate in an Elixir application. Ash does this by providing a layer of abstraction over your system's data layer(s) with `Resources`. It is designed to be used in conjunction with a phoenix application, or on its own.
To riff on a famous JRR Tolkien quote, a `Resource`is "One Interface to rule them all, One Interface to find them" and will become an indispensable place to define contracts for interacting with data throughout your application.
To start using Ash, first declare your `Resources` using the Ash `Resource` DSL. You could technically stop there, and just leverage the Ash Elixir API to avoid writing boilerplate. More likely, you would use extensions like Ash.JsonApi or Ash.GraphQL with Phoenix to add external interfaces to those resources without having to write any extra code at all.
Ash is an open-source project and draws inspiration from similar ideas in other frameworks and concepts. The goal of Ash is to lower the barrier to adopting and using Elixir and Phoenix, and in doing so help these amazing communities attract new developers, projects, and companies.
## Example Resource
```elixir
defmodule Post do
use Ash.Resource
actions do
read :default
create :default
end
attributes do
attribute :name, :string
end
relationships do
belongs_to :author, Author
end
end
```
See the [getting started guide](getting_started.html) for more information.
For those looking to add ash extensions:
* see `Ash.Dsl.Extension` for adding configuration.
* If you are looking to write a new data source, also see the `Ash.DataLayer` documentation.
* If you are looking to write a new authorizer, see `Ash.Authorizer`
* If you are looking to write a "front end", something powered by Ash resources, a guide on
building those kinds of tools is in the works.
"""
alias Ash.Resource.Actions.{Create, Destroy, Read, Update}
alias Ash.Resource.Relationships.{BelongsTo, HasMany, HasOne, ManyToMany}
@type action :: Create.t() | Read.t() | Update.t() | Destroy.t()
@type action_type :: :read | :create | :update | :destroy
@type actor :: Ash.record()
@type aggregate :: Ash.Query.Aggregate.t() | Ash.Resource.Aggregate.t()
@type aggregate_kind :: Ash.Query.Aggregate.kind()
@type api :: module
@type attribute :: Ash.Resource.Attribute.t()
@type calculation :: Ash.Resource.Calculation.t()
@type cardinality_many_relationship() :: HasMany.t() | ManyToMany.t()
@type cardinality_one_relationship() :: HasOne.t() | BelongsTo.t()
@type changeset :: Ash.Changeset.t()
@type data_layer :: module
@type data_layer_query :: struct
@type error :: struct
@type filter :: Ash.Filter.t()
@type params :: Keyword.t()
@type primary_key :: record() | map | term
@type query :: Ash.Query.t()
@type record :: struct
@type relationship :: cardinality_one_relationship() | cardinality_many_relationship()
@type relationship_cardinality :: :many | :one
@type resource :: module
@type side_loads :: term
@type sort :: Keyword.t()
@type validation :: Ash.Resource.Validation.t()
require Ash.Dsl.Extension
def implements_behaviour?(module, behaviour) do
:attributes
|> module.module_info()
|> Enum.flat_map(fn
{:behaviour, value} -> List.wrap(value)
_ -> []
end)
|> Enum.any?(&(&1 == behaviour))
end
@doc "Returns all extensions of a resource or api"
@spec extensions(resource() | api()) :: [module]
def extensions(resource) do
:persistent_term.get({resource, :extensions}, [])
end
end
|
lib/ash.ex
| 0.864081 | 0.854156 |
ash.ex
|
starcoder
|
defmodule AWS.SSM do
@moduledoc """
AWS Systems Manager
AWS Systems Manager is a collection of capabilities that helps you automate
management tasks such as collecting system inventory, applying operating
system (OS) patches, automating the creation of Amazon Machine Images
(AMIs), and configuring operating systems (OSs) and applications at scale.
Systems Manager lets you remotely and securely manage the configuration of
your managed instances. A *managed instance* is any Amazon Elastic Compute
Cloud instance (EC2 instance), or any on-premises server or virtual machine
(VM) in your hybrid environment that has been configured for Systems
Manager.
This reference is intended to be used with the [AWS Systems Manager User
Guide](https://docs.aws.amazon.com/systems-manager/latest/userguide/).
To get started, verify prerequisites and configure managed instances. For
more information, see [Setting up AWS Systems
Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-setting-up.html)
in the *AWS Systems Manager User Guide*.
For information about other API actions you can perform on EC2 instances,
see the [Amazon EC2 API
Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/). For
information about how to use a Query API, see [Making API
requests](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/making-api-requests.html).
"""
@doc """
Adds or overwrites one or more tags for the specified resource. Tags are
metadata that you can assign to your documents, managed instances,
maintenance windows, Parameter Store parameters, and patch baselines. Tags
enable you to categorize your resources in different ways, for example, by
purpose, owner, or environment. Each tag consists of a key and an optional
value, both of which you define. For example, you could define a set of
tags for your account's managed instances that helps you track each
instance's owner and stack level. For example: Key=Owner and Value=DbAdmin,
SysAdmin, or Dev. Or Key=Stack and Value=Production, Pre-Production, or
Test.
Each resource can have a maximum of 50 tags.
We recommend that you devise a set of tag keys that meets your needs for
each resource type. Using a consistent set of tag keys makes it easier for
you to manage your resources. You can search and filter the resources based
on the tags you add. Tags don't have any semantic meaning to and are
interpreted strictly as a string of characters.
For more information about using tags with EC2 instances, see [Tagging your
Amazon EC2
resources](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html)
in the *Amazon EC2 User Guide*.
"""
def add_tags_to_resource(client, input, options \\ []) do
request(client, "AddTagsToResource", input, options)
end
@doc """
Attempts to cancel the command specified by the Command ID. There is no
guarantee that the command will be terminated and the underlying process
stopped.
"""
def cancel_command(client, input, options \\ []) do
request(client, "CancelCommand", input, options)
end
@doc """
Stops a maintenance window execution that is already in progress and
cancels any tasks in the window that have not already starting running.
(Tasks already in progress will continue to completion.)
"""
def cancel_maintenance_window_execution(client, input, options \\ []) do
request(client, "CancelMaintenanceWindowExecution", input, options)
end
@doc """
Generates an activation code and activation ID you can use to register your
on-premises server or virtual machine (VM) with Systems Manager.
Registering these machines with Systems Manager makes it possible to manage
them using Systems Manager capabilities. You use the activation code and ID
when installing SSM Agent on machines in your hybrid environment. For more
information about requirements for managing on-premises instances and VMs
using Systems Manager, see [Setting up AWS Systems Manager for hybrid
environments](https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances.html)
in the *AWS Systems Manager User Guide*.
<note> On-premises servers or VMs that are registered with Systems Manager
and EC2 instances that you manage with Systems Manager are all called
*managed instances*.
</note>
"""
def create_activation(client, input, options \\ []) do
request(client, "CreateActivation", input, options)
end
@doc """
A State Manager association defines the state that you want to maintain on
your instances. For example, an association can specify that anti-virus
software must be installed and running on your instances, or that certain
ports must be closed. For static targets, the association specifies a
schedule for when the configuration is reapplied. For dynamic targets, such
as an AWS Resource Group or an AWS Autoscaling Group, State Manager applies
the configuration when new instances are added to the group. The
association also specifies actions to take when applying the configuration.
For example, an association for anti-virus software might run once a day.
If the software is not installed, then State Manager installs it. If the
software is installed, but the service is not running, then the association
might instruct State Manager to start the service.
"""
def create_association(client, input, options \\ []) do
request(client, "CreateAssociation", input, options)
end
@doc """
Associates the specified Systems Manager document with the specified
instances or targets.
When you associate a document with one or more instances using instance IDs
or tags, SSM Agent running on the instance processes the document and
configures the instance as specified.
If you associate a document with an instance that already has an associated
document, the system returns the AssociationAlreadyExists exception.
"""
def create_association_batch(client, input, options \\ []) do
request(client, "CreateAssociationBatch", input, options)
end
@doc """
Creates a Systems Manager (SSM) document. An SSM document defines the
actions that Systems Manager performs on your managed instances. For more
information about SSM documents, including information about supported
schemas, features, and syntax, see [AWS Systems Manager
Documents](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-ssm-docs.html)
in the *AWS Systems Manager User Guide*.
"""
def create_document(client, input, options \\ []) do
request(client, "CreateDocument", input, options)
end
@doc """
Creates a new maintenance window.
<note> The value you specify for `Duration` determines the specific end
time for the maintenance window based on the time it begins. No maintenance
window tasks are permitted to start after the resulting endtime minus the
number of hours you specify for `Cutoff`. For example, if the maintenance
window starts at 3 PM, the duration is three hours, and the value you
specify for `Cutoff` is one hour, no maintenance window tasks can start
after 5 PM.
</note>
"""
def create_maintenance_window(client, input, options \\ []) do
request(client, "CreateMaintenanceWindow", input, options)
end
@doc """
Creates a new OpsItem. You must have permission in AWS Identity and Access
Management (IAM) to create a new OpsItem. For more information, see
[Getting started with
OpsCenter](https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html)
in the *AWS Systems Manager User Guide*.
Operations engineers and IT professionals use OpsCenter to view,
investigate, and remediate operational issues impacting the performance and
health of their AWS resources. For more information, see [AWS Systems
Manager
OpsCenter](https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html)
in the *AWS Systems Manager User Guide*.
"""
def create_ops_item(client, input, options \\ []) do
request(client, "CreateOpsItem", input, options)
end
@doc """
Creates a patch baseline.
<note> For information about valid key and value pairs in `PatchFilters`
for each supported operating system type, see
[PatchFilter](http://docs.aws.amazon.com/systems-manager/latest/APIReference/API_PatchFilter.html).
</note>
"""
def create_patch_baseline(client, input, options \\ []) do
request(client, "CreatePatchBaseline", input, options)
end
@doc """
A resource data sync helps you view data from multiple sources in a single
location. Systems Manager offers two types of resource data sync:
`SyncToDestination` and `SyncFromSource`.
You can configure Systems Manager Inventory to use the `SyncToDestination`
type to synchronize Inventory data from multiple AWS Regions to a single S3
bucket. For more information, see [Configuring Resource Data Sync for
Inventory](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-inventory-datasync.html)
in the *AWS Systems Manager User Guide*.
You can configure Systems Manager Explorer to use the `SyncFromSource` type
to synchronize operational work items (OpsItems) and operational data
(OpsData) from multiple AWS Regions to a single S3 bucket. This type can
synchronize OpsItems and OpsData from multiple AWS accounts and Regions or
`EntireOrganization` by using AWS Organizations. For more information, see
[Setting up Systems Manager Explorer to display data from multiple accounts
and
Regions](https://docs.aws.amazon.com/systems-manager/latest/userguide/Explorer-resource-data-sync.html)
in the *AWS Systems Manager User Guide*.
A resource data sync is an asynchronous operation that returns immediately.
After a successful initial sync is completed, the system continuously syncs
data. To check the status of a sync, use the `ListResourceDataSync`.
<note> By default, data is not encrypted in Amazon S3. We strongly
recommend that you enable encryption in Amazon S3 to ensure secure data
storage. We also recommend that you secure access to the Amazon S3 bucket
by creating a restrictive bucket policy.
</note>
"""
def create_resource_data_sync(client, input, options \\ []) do
request(client, "CreateResourceDataSync", input, options)
end
@doc """
Deletes an activation. You are not required to delete an activation. If you
delete an activation, you can no longer use it to register additional
managed instances. Deleting an activation does not de-register managed
instances. You must manually de-register managed instances.
"""
def delete_activation(client, input, options \\ []) do
request(client, "DeleteActivation", input, options)
end
@doc """
Disassociates the specified Systems Manager document from the specified
instance.
When you disassociate a document from an instance, it does not change the
configuration of the instance. To change the configuration state of an
instance after you disassociate a document, you must create a new document
with the desired configuration and associate it with the instance.
"""
def delete_association(client, input, options \\ []) do
request(client, "DeleteAssociation", input, options)
end
@doc """
Deletes the Systems Manager document and all instance associations to the
document.
Before you delete the document, we recommend that you use
`DeleteAssociation` to disassociate all instances that are associated with
the document.
"""
def delete_document(client, input, options \\ []) do
request(client, "DeleteDocument", input, options)
end
@doc """
Delete a custom inventory type or the data associated with a custom
Inventory type. Deleting a custom inventory type is also referred to as
deleting a custom inventory schema.
"""
def delete_inventory(client, input, options \\ []) do
request(client, "DeleteInventory", input, options)
end
@doc """
Deletes a maintenance window.
"""
def delete_maintenance_window(client, input, options \\ []) do
request(client, "DeleteMaintenanceWindow", input, options)
end
@doc """
Delete a parameter from the system.
"""
def delete_parameter(client, input, options \\ []) do
request(client, "DeleteParameter", input, options)
end
@doc """
Delete a list of parameters.
"""
def delete_parameters(client, input, options \\ []) do
request(client, "DeleteParameters", input, options)
end
@doc """
Deletes a patch baseline.
"""
def delete_patch_baseline(client, input, options \\ []) do
request(client, "DeletePatchBaseline", input, options)
end
@doc """
Deletes a Resource Data Sync configuration. After the configuration is
deleted, changes to data on managed instances are no longer synced to or
from the target. Deleting a sync configuration does not delete data.
"""
def delete_resource_data_sync(client, input, options \\ []) do
request(client, "DeleteResourceDataSync", input, options)
end
@doc """
Removes the server or virtual machine from the list of registered servers.
You can reregister the instance again at any time. If you don't plan to use
Run Command on the server, we suggest uninstalling SSM Agent first.
"""
def deregister_managed_instance(client, input, options \\ []) do
request(client, "DeregisterManagedInstance", input, options)
end
@doc """
Removes a patch group from a patch baseline.
"""
def deregister_patch_baseline_for_patch_group(client, input, options \\ []) do
request(client, "DeregisterPatchBaselineForPatchGroup", input, options)
end
@doc """
Removes a target from a maintenance window.
"""
def deregister_target_from_maintenance_window(client, input, options \\ []) do
request(client, "DeregisterTargetFromMaintenanceWindow", input, options)
end
@doc """
Removes a task from a maintenance window.
"""
def deregister_task_from_maintenance_window(client, input, options \\ []) do
request(client, "DeregisterTaskFromMaintenanceWindow", input, options)
end
@doc """
Describes details about the activation, such as the date and time the
activation was created, its expiration date, the IAM role assigned to the
instances in the activation, and the number of instances registered by
using this activation.
"""
def describe_activations(client, input, options \\ []) do
request(client, "DescribeActivations", input, options)
end
@doc """
Describes the association for the specified target or instance. If you
created the association by using the `Targets` parameter, then you must
retrieve the association by using the association ID. If you created the
association by specifying an instance ID and a Systems Manager document,
then you retrieve the association by specifying the document name and the
instance ID.
"""
def describe_association(client, input, options \\ []) do
request(client, "DescribeAssociation", input, options)
end
@doc """
Use this API action to view information about a specific execution of a
specific association.
"""
def describe_association_execution_targets(client, input, options \\ []) do
request(client, "DescribeAssociationExecutionTargets", input, options)
end
@doc """
Use this API action to view all executions for a specific association ID.
"""
def describe_association_executions(client, input, options \\ []) do
request(client, "DescribeAssociationExecutions", input, options)
end
@doc """
Provides details about all active and terminated Automation executions.
"""
def describe_automation_executions(client, input, options \\ []) do
request(client, "DescribeAutomationExecutions", input, options)
end
@doc """
Information about all active and terminated step executions in an
Automation workflow.
"""
def describe_automation_step_executions(client, input, options \\ []) do
request(client, "DescribeAutomationStepExecutions", input, options)
end
@doc """
Lists all patches eligible to be included in a patch baseline.
"""
def describe_available_patches(client, input, options \\ []) do
request(client, "DescribeAvailablePatches", input, options)
end
@doc """
Describes the specified Systems Manager document.
"""
def describe_document(client, input, options \\ []) do
request(client, "DescribeDocument", input, options)
end
@doc """
Describes the permissions for a Systems Manager document. If you created
the document, you are the owner. If a document is shared, it can either be
shared privately (by specifying a user's AWS account ID) or publicly
(*All*).
"""
def describe_document_permission(client, input, options \\ []) do
request(client, "DescribeDocumentPermission", input, options)
end
@doc """
All associations for the instance(s).
"""
def describe_effective_instance_associations(client, input, options \\ []) do
request(client, "DescribeEffectiveInstanceAssociations", input, options)
end
@doc """
Retrieves the current effective patches (the patch and the approval state)
for the specified patch baseline. Note that this API applies only to
Windows patch baselines.
"""
def describe_effective_patches_for_patch_baseline(client, input, options \\ []) do
request(client, "DescribeEffectivePatchesForPatchBaseline", input, options)
end
@doc """
The status of the associations for the instance(s).
"""
def describe_instance_associations_status(client, input, options \\ []) do
request(client, "DescribeInstanceAssociationsStatus", input, options)
end
@doc """
Describes one or more of your instances, including information about the
operating system platform, the version of SSM Agent installed on the
instance, instance status, and so on.
If you specify one or more instance IDs, it returns information for those
instances. If you do not specify instance IDs, it returns information for
all your instances. If you specify an instance ID that is not valid or an
instance that you do not own, you receive an error.
<note> The IamRole field for this API action is the Amazon Identity and
Access Management (IAM) role assigned to on-premises instances. This call
does not return the IAM role for EC2 instances.
</note>
"""
def describe_instance_information(client, input, options \\ []) do
request(client, "DescribeInstanceInformation", input, options)
end
@doc """
Retrieves the high-level patch state of one or more instances.
"""
def describe_instance_patch_states(client, input, options \\ []) do
request(client, "DescribeInstancePatchStates", input, options)
end
@doc """
Retrieves the high-level patch state for the instances in the specified
patch group.
"""
def describe_instance_patch_states_for_patch_group(client, input, options \\ []) do
request(client, "DescribeInstancePatchStatesForPatchGroup", input, options)
end
@doc """
Retrieves information about the patches on the specified instance and their
state relative to the patch baseline being used for the instance.
"""
def describe_instance_patches(client, input, options \\ []) do
request(client, "DescribeInstancePatches", input, options)
end
@doc """
Describes a specific delete inventory operation.
"""
def describe_inventory_deletions(client, input, options \\ []) do
request(client, "DescribeInventoryDeletions", input, options)
end
@doc """
Retrieves the individual task executions (one per target) for a particular
task run as part of a maintenance window execution.
"""
def describe_maintenance_window_execution_task_invocations(client, input, options \\ []) do
request(client, "DescribeMaintenanceWindowExecutionTaskInvocations", input, options)
end
@doc """
For a given maintenance window execution, lists the tasks that were run.
"""
def describe_maintenance_window_execution_tasks(client, input, options \\ []) do
request(client, "DescribeMaintenanceWindowExecutionTasks", input, options)
end
@doc """
Lists the executions of a maintenance window. This includes information
about when the maintenance window was scheduled to be active, and
information about tasks registered and run with the maintenance window.
"""
def describe_maintenance_window_executions(client, input, options \\ []) do
request(client, "DescribeMaintenanceWindowExecutions", input, options)
end
@doc """
Retrieves information about upcoming executions of a maintenance window.
"""
def describe_maintenance_window_schedule(client, input, options \\ []) do
request(client, "DescribeMaintenanceWindowSchedule", input, options)
end
@doc """
Lists the targets registered with the maintenance window.
"""
def describe_maintenance_window_targets(client, input, options \\ []) do
request(client, "DescribeMaintenanceWindowTargets", input, options)
end
@doc """
Lists the tasks in a maintenance window.
"""
def describe_maintenance_window_tasks(client, input, options \\ []) do
request(client, "DescribeMaintenanceWindowTasks", input, options)
end
@doc """
Retrieves the maintenance windows in an AWS account.
"""
def describe_maintenance_windows(client, input, options \\ []) do
request(client, "DescribeMaintenanceWindows", input, options)
end
@doc """
Retrieves information about the maintenance window targets or tasks that an
instance is associated with.
"""
def describe_maintenance_windows_for_target(client, input, options \\ []) do
request(client, "DescribeMaintenanceWindowsForTarget", input, options)
end
@doc """
Query a set of OpsItems. You must have permission in AWS Identity and
Access Management (IAM) to query a list of OpsItems. For more information,
see [Getting started with
OpsCenter](https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html)
in the *AWS Systems Manager User Guide*.
Operations engineers and IT professionals use OpsCenter to view,
investigate, and remediate operational issues impacting the performance and
health of their AWS resources. For more information, see [AWS Systems
Manager
OpsCenter](https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html)
in the *AWS Systems Manager User Guide*.
"""
def describe_ops_items(client, input, options \\ []) do
request(client, "DescribeOpsItems", input, options)
end
@doc """
Get information about a parameter.
<note> Request results are returned on a best-effort basis. If you specify
`MaxResults` in the request, the response includes information up to the
limit specified. The number of items returned, however, can be between zero
and the value of `MaxResults`. If the service reaches an internal limit
while processing the results, it stops the operation and returns the
matching values up to that point and a `NextToken`. You can specify the
`NextToken` in a subsequent call to get the next set of results.
</note>
"""
def describe_parameters(client, input, options \\ []) do
request(client, "DescribeParameters", input, options)
end
@doc """
Lists the patch baselines in your AWS account.
"""
def describe_patch_baselines(client, input, options \\ []) do
request(client, "DescribePatchBaselines", input, options)
end
@doc """
Returns high-level aggregated patch compliance state for a patch group.
"""
def describe_patch_group_state(client, input, options \\ []) do
request(client, "DescribePatchGroupState", input, options)
end
@doc """
Lists all patch groups that have been registered with patch baselines.
"""
def describe_patch_groups(client, input, options \\ []) do
request(client, "DescribePatchGroups", input, options)
end
@doc """
Lists the properties of available patches organized by product, product
family, classification, severity, and other properties of available
patches. You can use the reported properties in the filters you specify in
requests for actions such as `CreatePatchBaseline`, `UpdatePatchBaseline`,
`DescribeAvailablePatches`, and `DescribePatchBaselines`.
The following section lists the properties that can be used in filters for
each major operating system type:
<dl> <dt>AMAZON_LINUX</dt> <dd> Valid properties: PRODUCT, CLASSIFICATION,
SEVERITY
</dd> <dt>AMAZON_LINUX_2</dt> <dd> Valid properties: PRODUCT,
CLASSIFICATION, SEVERITY
</dd> <dt>CENTOS</dt> <dd> Valid properties: PRODUCT, CLASSIFICATION,
SEVERITY
</dd> <dt>DEBIAN</dt> <dd> Valid properties: PRODUCT, PRIORITY
</dd> <dt>ORACLE_LINUX</dt> <dd> Valid properties: PRODUCT, CLASSIFICATION,
SEVERITY
</dd> <dt>REDHAT_ENTERPRISE_LINUX</dt> <dd> Valid properties: PRODUCT,
CLASSIFICATION, SEVERITY
</dd> <dt>SUSE</dt> <dd> Valid properties: PRODUCT, CLASSIFICATION,
SEVERITY
</dd> <dt>UBUNTU</dt> <dd> Valid properties: PRODUCT, PRIORITY
</dd> <dt>WINDOWS</dt> <dd> Valid properties: PRODUCT, PRODUCT_FAMILY,
CLASSIFICATION, MSRC_SEVERITY
</dd> </dl>
"""
def describe_patch_properties(client, input, options \\ []) do
request(client, "DescribePatchProperties", input, options)
end
@doc """
Retrieves a list of all active sessions (both connected and disconnected)
or terminated sessions from the past 30 days.
"""
def describe_sessions(client, input, options \\ []) do
request(client, "DescribeSessions", input, options)
end
@doc """
Get detailed information about a particular Automation execution.
"""
def get_automation_execution(client, input, options \\ []) do
request(client, "GetAutomationExecution", input, options)
end
@doc """
Gets the state of the AWS Systems Manager Change Calendar at an optional,
specified time. If you specify a time, `GetCalendarState` returns the state
of the calendar at a specific time, and returns the next time that the
Change Calendar state will transition. If you do not specify a time,
`GetCalendarState` assumes the current time. Change Calendar entries have
two possible states: `OPEN` or `CLOSED`.
If you specify more than one calendar in a request, the command returns the
status of `OPEN` only if all calendars in the request are open. If one or
more calendars in the request are closed, the status returned is `CLOSED`.
For more information about Systems Manager Change Calendar, see [AWS
Systems Manager Change
Calendar](https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-change-calendar.html)
in the *AWS Systems Manager User Guide*.
"""
def get_calendar_state(client, input, options \\ []) do
request(client, "GetCalendarState", input, options)
end
@doc """
Returns detailed information about command execution for an invocation or
plugin.
"""
def get_command_invocation(client, input, options \\ []) do
request(client, "GetCommandInvocation", input, options)
end
@doc """
Retrieves the Session Manager connection status for an instance to
determine whether it is running and ready to receive Session Manager
connections.
"""
def get_connection_status(client, input, options \\ []) do
request(client, "GetConnectionStatus", input, options)
end
@doc """
Retrieves the default patch baseline. Note that Systems Manager supports
creating multiple default patch baselines. For example, you can create a
default patch baseline for each operating system.
If you do not specify an operating system value, the default patch baseline
for Windows is returned.
"""
def get_default_patch_baseline(client, input, options \\ []) do
request(client, "GetDefaultPatchBaseline", input, options)
end
@doc """
Retrieves the current snapshot for the patch baseline the instance uses.
This API is primarily used by the AWS-RunPatchBaseline Systems Manager
document.
"""
def get_deployable_patch_snapshot_for_instance(client, input, options \\ []) do
request(client, "GetDeployablePatchSnapshotForInstance", input, options)
end
@doc """
Gets the contents of the specified Systems Manager document.
"""
def get_document(client, input, options \\ []) do
request(client, "GetDocument", input, options)
end
@doc """
Query inventory information.
"""
def get_inventory(client, input, options \\ []) do
request(client, "GetInventory", input, options)
end
@doc """
Return a list of inventory type names for the account, or return a list of
attribute names for a specific Inventory item type.
"""
def get_inventory_schema(client, input, options \\ []) do
request(client, "GetInventorySchema", input, options)
end
@doc """
Retrieves a maintenance window.
"""
def get_maintenance_window(client, input, options \\ []) do
request(client, "GetMaintenanceWindow", input, options)
end
@doc """
Retrieves details about a specific a maintenance window execution.
"""
def get_maintenance_window_execution(client, input, options \\ []) do
request(client, "GetMaintenanceWindowExecution", input, options)
end
@doc """
Retrieves the details about a specific task run as part of a maintenance
window execution.
"""
def get_maintenance_window_execution_task(client, input, options \\ []) do
request(client, "GetMaintenanceWindowExecutionTask", input, options)
end
@doc """
Retrieves information about a specific task running on a specific target.
"""
def get_maintenance_window_execution_task_invocation(client, input, options \\ []) do
request(client, "GetMaintenanceWindowExecutionTaskInvocation", input, options)
end
@doc """
Lists the tasks in a maintenance window.
"""
def get_maintenance_window_task(client, input, options \\ []) do
request(client, "GetMaintenanceWindowTask", input, options)
end
@doc """
Get information about an OpsItem by using the ID. You must have permission
in AWS Identity and Access Management (IAM) to view information about an
OpsItem. For more information, see [Getting started with
OpsCenter](https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html)
in the *AWS Systems Manager User Guide*.
Operations engineers and IT professionals use OpsCenter to view,
investigate, and remediate operational issues impacting the performance and
health of their AWS resources. For more information, see [AWS Systems
Manager
OpsCenter](https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html)
in the *AWS Systems Manager User Guide*.
"""
def get_ops_item(client, input, options \\ []) do
request(client, "GetOpsItem", input, options)
end
@doc """
View a summary of OpsItems based on specified filters and aggregators.
"""
def get_ops_summary(client, input, options \\ []) do
request(client, "GetOpsSummary", input, options)
end
@doc """
Get information about a parameter by using the parameter name. Don't
confuse this API action with the `GetParameters` API action.
"""
def get_parameter(client, input, options \\ []) do
request(client, "GetParameter", input, options)
end
@doc """
Retrieves the history of all changes to a parameter.
"""
def get_parameter_history(client, input, options \\ []) do
request(client, "GetParameterHistory", input, options)
end
@doc """
Get details of a parameter. Don't confuse this API action with the
`GetParameter` API action.
"""
def get_parameters(client, input, options \\ []) do
request(client, "GetParameters", input, options)
end
@doc """
Retrieve information about one or more parameters in a specific hierarchy.
<note> Request results are returned on a best-effort basis. If you specify
`MaxResults` in the request, the response includes information up to the
limit specified. The number of items returned, however, can be between zero
and the value of `MaxResults`. If the service reaches an internal limit
while processing the results, it stops the operation and returns the
matching values up to that point and a `NextToken`. You can specify the
`NextToken` in a subsequent call to get the next set of results.
</note>
"""
def get_parameters_by_path(client, input, options \\ []) do
request(client, "GetParametersByPath", input, options)
end
@doc """
Retrieves information about a patch baseline.
"""
def get_patch_baseline(client, input, options \\ []) do
request(client, "GetPatchBaseline", input, options)
end
@doc """
Retrieves the patch baseline that should be used for the specified patch
group.
"""
def get_patch_baseline_for_patch_group(client, input, options \\ []) do
request(client, "GetPatchBaselineForPatchGroup", input, options)
end
@doc """
`ServiceSetting` is an account-level setting for an AWS service. This
setting defines how a user interacts with or uses a service or a feature of
a service. For example, if an AWS service charges money to the account
based on feature or service usage, then the AWS service team might create a
default setting of "false". This means the user can't use this feature
unless they change the setting to "true" and intentionally opt in for a
paid feature.
Services map a `SettingId` object to a setting value. AWS services teams
define the default value for a `SettingId`. You can't create a new
`SettingId`, but you can overwrite the default value if you have the
`ssm:UpdateServiceSetting` permission for the setting. Use the
`UpdateServiceSetting` API action to change the default setting. Or use the
`ResetServiceSetting` to change the value back to the original value
defined by the AWS service team.
Query the current service setting for the account.
"""
def get_service_setting(client, input, options \\ []) do
request(client, "GetServiceSetting", input, options)
end
@doc """
A parameter label is a user-defined alias to help you manage different
versions of a parameter. When you modify a parameter, Systems Manager
automatically saves a new version and increments the version number by one.
A label can help you remember the purpose of a parameter when there are
multiple versions.
Parameter labels have the following requirements and restrictions.
<ul> <li> A version of a parameter can have a maximum of 10 labels.
</li> <li> You can't attach the same label to different versions of the
same parameter. For example, if version 1 has the label Production, then
you can't attach Production to version 2.
</li> <li> You can move a label from one version of a parameter to another.
</li> <li> You can't create a label when you create a new parameter. You
must attach a label to a specific version of a parameter.
</li> <li> You can't delete a parameter label. If you no longer want to use
a parameter label, then you must move it to a different version of a
parameter.
</li> <li> A label can have a maximum of 100 characters.
</li> <li> Labels can contain letters (case sensitive), numbers, periods
(.), hyphens (-), or underscores (_).
</li> <li> Labels can't begin with a number, "aws," or "ssm" (not case
sensitive). If a label fails to meet these requirements, then the label is
not associated with a parameter and the system displays it in the list of
InvalidLabels.
</li> </ul>
"""
def label_parameter_version(client, input, options \\ []) do
request(client, "LabelParameterVersion", input, options)
end
@doc """
Retrieves all versions of an association for a specific association ID.
"""
def list_association_versions(client, input, options \\ []) do
request(client, "ListAssociationVersions", input, options)
end
@doc """
Returns all State Manager associations in the current AWS account and
Region. You can limit the results to a specific State Manager association
document or instance by specifying a filter.
"""
def list_associations(client, input, options \\ []) do
request(client, "ListAssociations", input, options)
end
@doc """
An invocation is copy of a command sent to a specific instance. A command
can apply to one or more instances. A command invocation applies to one
instance. For example, if a user runs SendCommand against three instances,
then a command invocation is created for each requested instance ID.
ListCommandInvocations provide status about command execution.
"""
def list_command_invocations(client, input, options \\ []) do
request(client, "ListCommandInvocations", input, options)
end
@doc """
Lists the commands requested by users of the AWS account.
"""
def list_commands(client, input, options \\ []) do
request(client, "ListCommands", input, options)
end
@doc """
For a specified resource ID, this API action returns a list of compliance
statuses for different resource types. Currently, you can only specify one
resource ID per call. List results depend on the criteria specified in the
filter.
"""
def list_compliance_items(client, input, options \\ []) do
request(client, "ListComplianceItems", input, options)
end
@doc """
Returns a summary count of compliant and non-compliant resources for a
compliance type. For example, this call can return State Manager
associations, patches, or custom compliance types according to the filter
criteria that you specify.
"""
def list_compliance_summaries(client, input, options \\ []) do
request(client, "ListComplianceSummaries", input, options)
end
@doc """
List all versions for a document.
"""
def list_document_versions(client, input, options \\ []) do
request(client, "ListDocumentVersions", input, options)
end
@doc """
Returns all Systems Manager (SSM) documents in the current AWS account and
Region. You can limit the results of this request by using a filter.
"""
def list_documents(client, input, options \\ []) do
request(client, "ListDocuments", input, options)
end
@doc """
A list of inventory items returned by the request.
"""
def list_inventory_entries(client, input, options \\ []) do
request(client, "ListInventoryEntries", input, options)
end
@doc """
Returns a resource-level summary count. The summary includes information
about compliant and non-compliant statuses and detailed compliance-item
severity counts, according to the filter criteria you specify.
"""
def list_resource_compliance_summaries(client, input, options \\ []) do
request(client, "ListResourceComplianceSummaries", input, options)
end
@doc """
Lists your resource data sync configurations. Includes information about
the last time a sync attempted to start, the last sync status, and the last
time a sync successfully completed.
The number of sync configurations might be too large to return using a
single call to `ListResourceDataSync`. You can limit the number of sync
configurations returned by using the `MaxResults` parameter. To determine
whether there are more sync configurations to list, check the value of
`NextToken` in the output. If there are more sync configurations to list,
you can request them by specifying the `NextToken` returned in the call to
the parameter of a subsequent call.
"""
def list_resource_data_sync(client, input, options \\ []) do
request(client, "ListResourceDataSync", input, options)
end
@doc """
Returns a list of the tags assigned to the specified resource.
"""
def list_tags_for_resource(client, input, options \\ []) do
request(client, "ListTagsForResource", input, options)
end
@doc """
Shares a Systems Manager document publicly or privately. If you share a
document privately, you must specify the AWS user account IDs for those
people who can use the document. If you share a document publicly, you must
specify *All* as the account ID.
"""
def modify_document_permission(client, input, options \\ []) do
request(client, "ModifyDocumentPermission", input, options)
end
@doc """
Registers a compliance type and other compliance details on a designated
resource. This action lets you register custom compliance details with a
resource. This call overwrites existing compliance information on the
resource, so you must provide a full list of compliance items each time
that you send the request.
ComplianceType can be one of the following:
<ul> <li> ExecutionId: The execution ID when the patch, association, or
custom compliance item was applied.
</li> <li> ExecutionType: Specify patch, association, or Custom:`string`.
</li> <li> ExecutionTime. The time the patch, association, or custom
compliance item was applied to the instance.
</li> <li> Id: The patch, association, or custom compliance ID.
</li> <li> Title: A title.
</li> <li> Status: The status of the compliance item. For example,
`approved` for patches, or `Failed` for associations.
</li> <li> Severity: A patch severity. For example, `critical`.
</li> <li> DocumentName: A SSM document name. For example,
AWS-RunPatchBaseline.
</li> <li> DocumentVersion: An SSM document version number. For example, 4.
</li> <li> Classification: A patch classification. For example, `security
updates`.
</li> <li> PatchBaselineId: A patch baseline ID.
</li> <li> PatchSeverity: A patch severity. For example, `Critical`.
</li> <li> PatchState: A patch state. For example,
`InstancesWithFailedPatches`.
</li> <li> PatchGroup: The name of a patch group.
</li> <li> InstalledTime: The time the association, patch, or custom
compliance item was applied to the resource. Specify the time by using the
following format: yyyy-MM-dd'T'HH:mm:ss'Z'
</li> </ul>
"""
def put_compliance_items(client, input, options \\ []) do
request(client, "PutComplianceItems", input, options)
end
@doc """
Bulk update custom inventory items on one more instance. The request adds
an inventory item, if it doesn't already exist, or updates an inventory
item, if it does exist.
"""
def put_inventory(client, input, options \\ []) do
request(client, "PutInventory", input, options)
end
@doc """
Add a parameter to the system.
"""
def put_parameter(client, input, options \\ []) do
request(client, "PutParameter", input, options)
end
@doc """
Defines the default patch baseline for the relevant operating system.
To reset the AWS predefined patch baseline as the default, specify the full
patch baseline ARN as the baseline ID value. For example, for CentOS,
specify
`arn:aws:ssm:us-east-2:733109147000:patchbaseline/pb-0574b43a65ea646ed`
instead of `pb-0574b43a65ea646ed`.
"""
def register_default_patch_baseline(client, input, options \\ []) do
request(client, "RegisterDefaultPatchBaseline", input, options)
end
@doc """
Registers a patch baseline for a patch group.
"""
def register_patch_baseline_for_patch_group(client, input, options \\ []) do
request(client, "RegisterPatchBaselineForPatchGroup", input, options)
end
@doc """
Registers a target with a maintenance window.
"""
def register_target_with_maintenance_window(client, input, options \\ []) do
request(client, "RegisterTargetWithMaintenanceWindow", input, options)
end
@doc """
Adds a new task to a maintenance window.
"""
def register_task_with_maintenance_window(client, input, options \\ []) do
request(client, "RegisterTaskWithMaintenanceWindow", input, options)
end
@doc """
Removes tag keys from the specified resource.
"""
def remove_tags_from_resource(client, input, options \\ []) do
request(client, "RemoveTagsFromResource", input, options)
end
@doc """
`ServiceSetting` is an account-level setting for an AWS service. This
setting defines how a user interacts with or uses a service or a feature of
a service. For example, if an AWS service charges money to the account
based on feature or service usage, then the AWS service team might create a
default setting of "false". This means the user can't use this feature
unless they change the setting to "true" and intentionally opt in for a
paid feature.
Services map a `SettingId` object to a setting value. AWS services teams
define the default value for a `SettingId`. You can't create a new
`SettingId`, but you can overwrite the default value if you have the
`ssm:UpdateServiceSetting` permission for the setting. Use the
`GetServiceSetting` API action to view the current value. Use the
`UpdateServiceSetting` API action to change the default setting.
Reset the service setting for the account to the default value as
provisioned by the AWS service team.
"""
def reset_service_setting(client, input, options \\ []) do
request(client, "ResetServiceSetting", input, options)
end
@doc """
Reconnects a session to an instance after it has been disconnected.
Connections can be resumed for disconnected sessions, but not terminated
sessions.
<note> This command is primarily for use by client machines to
automatically reconnect during intermittent network issues. It is not
intended for any other use.
</note>
"""
def resume_session(client, input, options \\ []) do
request(client, "ResumeSession", input, options)
end
@doc """
Sends a signal to an Automation execution to change the current behavior or
status of the execution.
"""
def send_automation_signal(client, input, options \\ []) do
request(client, "SendAutomationSignal", input, options)
end
@doc """
Runs commands on one or more managed instances.
"""
def send_command(client, input, options \\ []) do
request(client, "SendCommand", input, options)
end
@doc """
Use this API action to run an association immediately and only one time.
This action can be helpful when troubleshooting associations.
"""
def start_associations_once(client, input, options \\ []) do
request(client, "StartAssociationsOnce", input, options)
end
@doc """
Initiates execution of an Automation document.
"""
def start_automation_execution(client, input, options \\ []) do
request(client, "StartAutomationExecution", input, options)
end
@doc """
Initiates a connection to a target (for example, an instance) for a Session
Manager session. Returns a URL and token that can be used to open a
WebSocket connection for sending input and receiving outputs.
<note> AWS CLI usage: `start-session` is an interactive command that
requires the Session Manager plugin to be installed on the client machine
making the call. For information, see [Install the Session Manager plugin
for the AWS
CLI](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html)
in the *AWS Systems Manager User Guide*.
AWS Tools for PowerShell usage: Start-SSMSession is not currently supported
by AWS Tools for PowerShell on Windows local machines.
</note>
"""
def start_session(client, input, options \\ []) do
request(client, "StartSession", input, options)
end
@doc """
Stop an Automation that is currently running.
"""
def stop_automation_execution(client, input, options \\ []) do
request(client, "StopAutomationExecution", input, options)
end
@doc """
Permanently ends a session and closes the data connection between the
Session Manager client and SSM Agent on the instance. A terminated session
cannot be resumed.
"""
def terminate_session(client, input, options \\ []) do
request(client, "TerminateSession", input, options)
end
@doc """
Updates an association. You can update the association name and version,
the document version, schedule, parameters, and Amazon S3 output.
In order to call this API action, your IAM user account, group, or role
must be configured with permission to call the `DescribeAssociation` API
action. If you don't have permission to call DescribeAssociation, then you
receive the following error: `An error occurred (AccessDeniedException)
when calling the UpdateAssociation operation: User: <user_arn> is not
authorized to perform: ssm:DescribeAssociation on resource:
<resource_arn>`
<important> When you update an association, the association immediately
runs against the specified targets.
</important>
"""
def update_association(client, input, options \\ []) do
request(client, "UpdateAssociation", input, options)
end
@doc """
Updates the status of the Systems Manager document associated with the
specified instance.
"""
def update_association_status(client, input, options \\ []) do
request(client, "UpdateAssociationStatus", input, options)
end
@doc """
Updates one or more values for an SSM document.
"""
def update_document(client, input, options \\ []) do
request(client, "UpdateDocument", input, options)
end
@doc """
Set the default version of a document.
"""
def update_document_default_version(client, input, options \\ []) do
request(client, "UpdateDocumentDefaultVersion", input, options)
end
@doc """
Updates an existing maintenance window. Only specified parameters are
modified.
<note> The value you specify for `Duration` determines the specific end
time for the maintenance window based on the time it begins. No maintenance
window tasks are permitted to start after the resulting endtime minus the
number of hours you specify for `Cutoff`. For example, if the maintenance
window starts at 3 PM, the duration is three hours, and the value you
specify for `Cutoff` is one hour, no maintenance window tasks can start
after 5 PM.
</note>
"""
def update_maintenance_window(client, input, options \\ []) do
request(client, "UpdateMaintenanceWindow", input, options)
end
@doc """
Modifies the target of an existing maintenance window. You can change the
following:
<ul> <li> Name
</li> <li> Description
</li> <li> Owner
</li> <li> IDs for an ID target
</li> <li> Tags for a Tag target
</li> <li> From any supported tag type to another. The three supported tag
types are ID target, Tag target, and resource group. For more information,
see `Target`.
</li> </ul> <note> If a parameter is null, then the corresponding field is
not modified.
</note>
"""
def update_maintenance_window_target(client, input, options \\ []) do
request(client, "UpdateMaintenanceWindowTarget", input, options)
end
@doc """
Modifies a task assigned to a maintenance window. You can't change the task
type, but you can change the following values:
<ul> <li> TaskARN. For example, you can change a RUN_COMMAND task from
AWS-RunPowerShellScript to AWS-RunShellScript.
</li> <li> ServiceRoleArn
</li> <li> TaskInvocationParameters
</li> <li> Priority
</li> <li> MaxConcurrency
</li> <li> MaxErrors
</li> </ul> If the value for a parameter in `UpdateMaintenanceWindowTask`
is null, then the corresponding field is not modified. If you set `Replace`
to true, then all fields required by the
`RegisterTaskWithMaintenanceWindow` action are required for this request.
Optional fields that aren't specified are set to null.
<important> When you update a maintenance window task that has options
specified in `TaskInvocationParameters`, you must provide again all the
`TaskInvocationParameters` values that you want to retain. The values you
do not specify again are removed. For example, suppose that when you
registered a Run Command task, you specified `TaskInvocationParameters`
values for `Comment`, `NotificationConfig`, and `OutputS3BucketName`. If
you update the maintenance window task and specify only a different
`OutputS3BucketName` value, the values for `Comment` and
`NotificationConfig` are removed.
</important>
"""
def update_maintenance_window_task(client, input, options \\ []) do
request(client, "UpdateMaintenanceWindowTask", input, options)
end
@doc """
Changes the Amazon Identity and Access Management (IAM) role that is
assigned to the on-premises instance or virtual machines (VM). IAM roles
are first assigned to these hybrid instances during the activation process.
For more information, see `CreateActivation`.
"""
def update_managed_instance_role(client, input, options \\ []) do
request(client, "UpdateManagedInstanceRole", input, options)
end
@doc """
Edit or change an OpsItem. You must have permission in AWS Identity and
Access Management (IAM) to update an OpsItem. For more information, see
[Getting started with
OpsCenter](https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html)
in the *AWS Systems Manager User Guide*.
Operations engineers and IT professionals use OpsCenter to view,
investigate, and remediate operational issues impacting the performance and
health of their AWS resources. For more information, see [AWS Systems
Manager
OpsCenter](https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html)
in the *AWS Systems Manager User Guide*.
"""
def update_ops_item(client, input, options \\ []) do
request(client, "UpdateOpsItem", input, options)
end
@doc """
Modifies an existing patch baseline. Fields not specified in the request
are left unchanged.
<note> For information about valid key and value pairs in `PatchFilters`
for each supported operating system type, see
[PatchFilter](http://docs.aws.amazon.com/systems-manager/latest/APIReference/API_PatchFilter.html).
</note>
"""
def update_patch_baseline(client, input, options \\ []) do
request(client, "UpdatePatchBaseline", input, options)
end
@doc """
Update a resource data sync. After you create a resource data sync for a
Region, you can't change the account options for that sync. For example, if
you create a sync in the us-east-2 (Ohio) Region and you choose the Include
only the current account option, you can't edit that sync later and choose
the Include all accounts from my AWS Organizations configuration option.
Instead, you must delete the first resource data sync, and create a new
one.
<note> This API action only supports a resource data sync that was created
with a SyncFromSource `SyncType`.
</note>
"""
def update_resource_data_sync(client, input, options \\ []) do
request(client, "UpdateResourceDataSync", input, options)
end
@doc """
`ServiceSetting` is an account-level setting for an AWS service. This
setting defines how a user interacts with or uses a service or a feature of
a service. For example, if an AWS service charges money to the account
based on feature or service usage, then the AWS service team might create a
default setting of "false". This means the user can't use this feature
unless they change the setting to "true" and intentionally opt in for a
paid feature.
Services map a `SettingId` object to a setting value. AWS services teams
define the default value for a `SettingId`. You can't create a new
`SettingId`, but you can overwrite the default value if you have the
`ssm:UpdateServiceSetting` permission for the setting. Use the
`GetServiceSetting` API action to view the current value. Or, use the
`ResetServiceSetting` to change the value back to the original value
defined by the AWS service team.
Update the service setting for the account.
"""
def update_service_setting(client, input, options \\ []) do
request(client, "UpdateServiceSetting", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "ssm"}
host = build_host("ssm", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AmazonSSM.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/ssm.ex
| 0.880361 | 0.643329 |
ssm.ex
|
starcoder
|
defmodule HAP.ValueStore do
@moduledoc """
Defines the behaviour required of a module that wishes to act as the backing data store
for a given HomeKit characteristic
# Simple Value Store
To implement a value store for a simple value whose value does not change asynchronously,
you must implement the `c:get_value/1` and `c:put_value/2` callbacks. These callbacks each
take a set of opts (specified in the initial configuration passed to `HAP.start_link/1`) to
allow your implementation to discriminate between various values within the same `HAP.ValueStore`
module.
# Supporting Asynchronous Notifications
To support notifying HomeKit of changes to an accessory's characteristics (such as a user pressing
a button or a flood sensor detecting water), implementations of `HAP.ValueStore` may choose to
implement the optional `c:set_change_token/2` callback. This callback will provide your implementation
with a change token to use when notifying HAP of changes to the corresponding value. To notify HAP of
changes to a value, pass this change token to the `HAP.value_changed/1` function. HAP will then query
your value store for the new value of the corresponding characteristic and notify any HomeKit controllers
of the change.
There are a number of things to be aware of when using Asynchronous Notifications:
* Your value store must be prepared to answer calls to `c:get_value/1` with the updated value before
calling `HAP.value_changed/1`.
* Do not call `HAP.value_changed/1` to notify HAP of changes which come from HAP itself (ie: do not call it in the course
of implementing `c:put_value/2`). Use it only for notifying HAP of changes which are truly asynchronous.
* If you have not yet received a `c:set_change_token/2` call, then you should not call `HAP.value_changed/1`; HAP will only
provide you with a change token for characteristics which a HomeKit controller has requested notifications on. Specifically,
do not retain change tokens between runs; they should maintain the same lifetime as the underlying HAP process.
* The call to `HAP.value_changed/1` is guaranteed to return quickly. It does no work beyond casting a message
to HAP to set the notification process in motion.
"""
@type t :: module()
@type opts :: keyword()
@opaque change_token :: {term(), term()}
@doc """
Return the value of a value hosted by this value store. The passed list of opts
is as specified in the hosting `HAP.Configuration` and can be used to distinguish a
particular value within a larger value store (perhaps by GPIO pin or similar)
Returns the value stored by this value store
"""
@callback get_value(opts :: opts()) :: {:ok, HAP.Characteristic.value()} | {:error, String.t()}
@doc """
Sets the value of a value hosted by this value store. The passed list of opts
is as specified in the hosting `HAP.Configuration` and can be used to distinguish a
particular value within a larger value store (perhaps by GPIO pin or similar)
Returns `:ok` or `{:error, reason}`
"""
@callback put_value(value :: HAP.Characteristic.value(), opts :: opts()) :: :ok | {:error, String.t()}
@doc """
Informs the value store of the change token to use when notifying HAP of asynchronous
changes to the value in this store. This token should be provided to `HAP.value_changed/1` as the
sole argument; HAP will make a subsequent call to `c:get_value/1` to obtain the changed value
Returns `:ok` or `{:error, reason}`
"""
@callback set_change_token(change_token :: change_token(), opts :: opts()) :: :ok | {:error, String.t()}
@optional_callbacks set_change_token: 2
end
|
lib/hap/value_store.ex
| 0.915167 | 0.80651 |
value_store.ex
|
starcoder
|
defmodule ClusterEC2.Strategy.Tags do
@moduledoc """
This clustering strategy works by loading all instances that have the given
tag associated with them.
All instances must be started with the same app name and have security groups
configured to allow inter-node communication.
config :libcluster,
topologies: [
tags_example: [
strategy: #{__MODULE__},
config: [
ec2_tagname: "mytag",
ec2_tagvalue: "tagvalue",
app_prefix: "app",
ip_to_nodename: &my_nodename_func/2,
ip_type: :private,
polling_interval: 10_000]]]
## Configuration Options
| Key | Required | Description |
| --- | -------- | ----------- |
| `:ec2_tagname` | yes | Name of the EC2 instance tag to look for. |
| `:ec2_tagvalue` | no | Can be passed a static value (string), a 0-arity function, or a 1-arity function (which will be passed the value of `:ec2_tagname` at invocation). |
| `:app_prefix` | no | Will be prepended to the node's private IP address to create the node name. |
| `:ip_type` | no | One of :private or :public, defaults to :private |
| `:ip_to_nodename` | no | defaults to `app_prefix@ip` but can be used to override the nodename |
| `:polling_interval` | no | Number of milliseconds to wait between polls to the EC2 api. Defaults to 5_000 |
"""
use GenServer
use Cluster.Strategy
import Cluster.Logger
import SweetXml, only: [sigil_x: 2]
alias Cluster.Strategy.State
@default_polling_interval 5_000
def start_link(opts) do
Application.ensure_all_started(:tesla)
Application.ensure_all_started(:ex_aws)
GenServer.start_link(__MODULE__, opts)
end
# libcluster ~> 3.0
@impl GenServer
def init([%State{} = state]) do
state = state |> Map.put(:meta, MapSet.new())
{:ok, load(state)}
end
# libcluster ~> 2.0
def init(opts) do
state = %State{
topology: Keyword.fetch!(opts, :topology),
connect: Keyword.fetch!(opts, :connect),
disconnect: Keyword.fetch!(opts, :disconnect),
list_nodes: Keyword.fetch!(opts, :list_nodes),
config: Keyword.fetch!(opts, :config),
meta: MapSet.new([])
}
{:ok, load(state)}
end
@impl GenServer
def handle_info(:timeout, state) do
handle_info(:load, state)
end
def handle_info(:load, %State{} = state) do
{:noreply, load(state)}
end
def handle_info(_, state) do
{:noreply, state}
end
defp load(%State{topology: topology, connect: connect, disconnect: disconnect, list_nodes: list_nodes} = state) do
case get_nodes(state) do
{:ok, new_nodelist} ->
added = MapSet.difference(new_nodelist, state.meta)
removed = MapSet.difference(state.meta, new_nodelist)
new_nodelist =
case Cluster.Strategy.disconnect_nodes(topology, disconnect, list_nodes, MapSet.to_list(removed)) do
:ok ->
new_nodelist
{:error, bad_nodes} ->
# Add back the nodes which should have been removed, but which couldn't be for some reason
Enum.reduce(bad_nodes, new_nodelist, fn {n, _}, acc ->
MapSet.put(acc, n)
end)
end
new_nodelist =
case Cluster.Strategy.connect_nodes(topology, connect, list_nodes, MapSet.to_list(added)) do
:ok ->
new_nodelist
{:error, bad_nodes} ->
# Remove the nodes which should have been added, but couldn't be for some reason
Enum.reduce(bad_nodes, new_nodelist, fn {n, _}, acc ->
MapSet.delete(acc, n)
end)
end
Process.send_after(self(), :load, Keyword.get(state.config, :polling_interval, @default_polling_interval))
%{state | :meta => new_nodelist}
_ ->
Process.send_after(self(), :load, Keyword.get(state.config, :polling_interval, @default_polling_interval))
state
end
end
@spec get_nodes(State.t()) :: {:ok, [atom()]} | {:error, []}
defp get_nodes(%State{topology: topology, config: config}) do
instance_id = ClusterEC2.local_instance_id()
region = ClusterEC2.instance_region()
tag_name = Keyword.fetch!(config, :ec2_tagname)
tag_value = Keyword.get(config, :ec2_tagvalue, &local_instance_tag_value(&1, instance_id, region))
app_prefix = Keyword.get(config, :app_prefix, "app")
ip_to_nodename = Keyword.get(config, :ip_to_nodename, &ip_to_nodename/2)
cond do
tag_name != nil and tag_value != nil and app_prefix != nil and instance_id != "" and region != "" ->
params = [filters: ["tag:#{tag_name}": fetch_tag_value(tag_name, tag_value), "instance-state-name": "running"]]
request = ExAws.EC2.describe_instances(params)
require Logger
Logger.debug("#{inspect(request)}")
case ExAws.request(request, region: region) do
{:ok, %{body: body}} ->
resp =
body
|> SweetXml.xpath(ip_xpath(Keyword.get(config, :ip_type, :private)))
|> ip_to_nodename.(app_prefix)
{:ok, MapSet.new(resp)}
_ ->
{:error, []}
end
instance_id == "" ->
warn(topology, "instance id could not be fetched!")
{:error, []}
region == "" ->
warn(topology, "region could not be fetched!")
{:error, []}
tag_name == nil ->
warn(topology, "ec2 tags strategy is selected, but :ec2_tagname is not configured!")
{:error, []}
:else ->
warn(topology, "ec2 tags strategy is selected, but is not configured!")
{:error, []}
end
end
defp local_instance_tag_value(tag_name, instance_id, region) do
ExAws.EC2.describe_instances(instance_id: instance_id)
|> local_instance_tags(region)
|> Map.get(tag_name)
end
defp local_instance_tags(body, region) do
case ExAws.request(body, region: region) do
{:ok, body} -> extract_tags(body)
{:error, _} -> %{}
end
end
defp extract_tags(%{body: xml}) do
xml
|> SweetXml.xpath(
~x"//DescribeInstancesResponse/reservationSet/item/instancesSet/item/tagSet/item"l,
key: ~x"./key/text()"s,
value: ~x"./value/text()"s
)
|> Stream.map(fn %{key: k, value: v} -> {k, v} end)
|> Enum.into(%{})
end
defp ip_xpath(:private),
do: ~x"//DescribeInstancesResponse/reservationSet/item/instancesSet/item/privateIpAddress/text()"ls
defp ip_xpath(:public),
do: ~x"//DescribeInstancesResponse/reservationSet/item/instancesSet/item/IpAddress/text()"ls
defp fetch_tag_value(_k, v) when is_function(v, 0), do: v.()
defp fetch_tag_value(k, v) when is_function(v, 1), do: v.(k)
defp fetch_tag_value(_k, v), do: v
defp ip_to_nodename(list, app_prefix) when is_list(list) do
list
|> Enum.map(fn ip ->
:"#{app_prefix}@#{ip}"
end)
end
end
|
lib/strategy/tags.ex
| 0.851351 | 0.547283 |
tags.ex
|
starcoder
|
defmodule Magpie.InteractiveRoomChannel do
@moduledoc """
Channel for maintaining lobbies in interactive experiments which require multiple participants to with each other.
The client should make use of the presence_diff event to decide if a game can be started.
"""
use MagpieWeb, :channel
alias Magpie.Experiments.AssignmentIdentifier
alias Magpie.Presence
@doc """
Let the participant join the lobby and wait in there.
One lobby is created for one combination of experiment_id:chain:variant:generation combination
"""
# I'm a bit confused here though: Apparently the "socket" is the real socket. Then what happened to the channel process itself?
def join("interactive_room:" <> room_identifier, _payload, socket) do
if room_identifier ==
AssignmentIdentifier.to_string(socket.assigns.assignment_identifier, false) do
send(self(), :after_participant_join)
{:ok, socket}
else
{:error, %{reason: "invalid_format"}}
end
end
def handle_info(:after_participant_join, socket) do
# Add this participant to the list of all participants waiting in the lobby of this experiment.
# Let me just make the following assumption: An interactive experiment must happen between participants of the same chain, variant and generation.
# If they need something more complicated in the future, change the structure by then.
# This Presence can also be helpful in informing participants when one participant drops out.
# Oh OK, I think I now understood how it works. With some magic under the hood, even though we're passing in "socket" as the first argument here, it automagically figures out that we're actually tracking this *channel*, which is of course room `experiment_id:chain:variant:generation`.
# There is another function Presence.track/4 which allows you to track any process by topic and key: track(pid, topic, key, meta)
# But still it seems to me that the grouping key should be the assignment_identifier instead of the particular participant_id? I'm a bit confused at how this worked. Let's see.
topic = AssignmentIdentifier.to_string(socket.assigns.assignment_identifier, false)
Presence.track(
socket,
"#{topic}",
%{
participant_id: socket.assigns.participant_id,
# This came from the official example.
online_at: inspect(System.system_time(:second))
}
)
# Note that the presence information will be returned as a map with presences *grouped by key*, together with the metadata.
# Example:
# %{
# "1:1:1:1" => %{
# metas: [
# %{
# online_at: "1629644124",
# participant_id: "b31e54b0e558d3a87fd3cd9530d07e297fe00d86",
# phx_ref: "Fp2osoV59TjSYAFE"
# },
# %{
# online_at: "1629644128",
# participant_id: "54080b46f09162477e36e05e22ad4f8aa9dda49f",
# phx_ref: "Fp2os3i8kgDSYAHE"
# }
# ]
# }
# }
# To get the participants, we first go by the topic name, then go under :meta
existing_participants =
socket
|> Presence.list()
|> Map.get(topic)
|> Map.get(:metas)
# Start the experiment if the predefined number of players is reached.
# We could also send a presence_state event to the clients. Though this is the easy way to do it.
if length(existing_participants) >= socket.assigns.num_players do
broadcast!(socket, "start_game", %{})
end
{:noreply, socket}
end
# This handles new messages from the clients and broadcast them to everybody susbcribed to the same topic (i.e. who joined the same lounge).
def handle_in("new_msg", payload, socket) do
broadcast(socket, "new_msg", payload)
{:noreply, socket}
end
# In many cases the game initialization needs to be handled by the client, since the server remains as generic as possible and just provides a channel for communication.
# The client can always use `new_msg` for everything, though specialized message types could help ease the job.
def handle_in("initialize_game", payload, socket) do
broadcast(socket, "initialize_game", payload)
{:noreply, socket}
end
# Message indicating that the game is to be advanced to the next round.
def handle_in("next_round", payload, socket) do
broadcast(socket, "next_round", payload)
{:noreply, socket}
end
# Message indicating that the game has ended.
def handle_in("end_game", payload, socket) do
broadcast(socket, "end_game", payload)
{:noreply, socket}
end
end
|
lib/magpie_web/channels/interactive_room_channel.ex
| 0.656328 | 0.412057 |
interactive_room_channel.ex
|
starcoder
|
use Croma
defmodule Antikythera.Memcache do
@moduledoc """
Easy-to-use in-memory cache for each executor pool.
#{inspect(__MODULE__)} behaves as a key-value storage.
Cached key-value pairs are internally stored in ETS.
It accepts arbitrary (but not too large) terms as both keys and values.
## Usage
iex> Antikythera.Memcache.write("foo", "bar", epool_id, 3_600)
:ok
iex> Antikythera.Memcache.read("foo", epool_id)
{:ok, "bar"}
## Limits
The number of records and the size of keys and values are limited.
- The maximum number of records for each executor pool
is #{AntikytheraCore.ExecutorPool.MemcacheWriter.max_records()}.
- If exceeds the limit, a record nearest to expiration is evicted so that a new record can be inserted.
- The maximum size of keys and values is defined in `Antikythera.Memcache.Key` and `Antikythera.Memcache.Value`.
- To know how the size of keys and values is calculated, see `Antikythera.TermUtil`.
- If exceeds the limit, `write/5` returns an error `:too_large_key` or `:too_large_value`.
## Lifetime of records
There are 2 cases where records in #{inspect(__MODULE__)} are evicted:
1. Records are expired (see [Mechanism of Expiration](#module-mechanism-of-expiration) below for more details)
2. Reach the maximum number of records for each executor pool (as mentioned in [Limits](#module-limits))
Please note that records in #{inspect(__MODULE__)} could be evicted anytime.
## Mechanism of Expiration
The lifetime of records must be set as `lifetime_in_sec` in `write/5`.
This lifetime does not guarantee that records remain in the entire specified lifetime.
To avoid the [thundering herd](https://en.wikipedia.org/wiki/Thundering_herd_problem), whether records are expired is decided probabilistically.
The probability of expiration is shown in the following.

If the thundering herd becomes a big problem, adjust `prob_lifetime_ratio` in `write/5`.
"""
@default_ratio 0.9
alias Croma.Result, as: R
alias AntikytheraCore.ExecutorPool.MemcacheWriter
alias Antikythera.ExecutorPool.Id, as: EPoolId
alias AntikytheraCore.Ets.Memcache
alias Antikythera.Memcache.{Key, Value, NormalizedFloat}
@doc """
Read the value associated with the `key` from #{inspect(__MODULE__)}.
Please note that records in #{inspect(__MODULE__)} could be evicted anytime so the error handling must be needed.
"""
defun read(key :: Key.t(), epool_id :: v[EPoolId.t()]) :: R.t(Value.t(), :not_found) do
Memcache.read(key, epool_id)
|> R.bind(fn {_, expire_at, prob_expire_at, value} ->
if expired?(expire_at, prob_expire_at) do
{:error, :not_found}
else
{:ok, value}
end
end)
end
@doc """
Try to read a value associated with the `key` from #{inspect(__MODULE__)} and if that fails,
write a value returned by `fun` to #{inspect(__MODULE__)}.
`fun` is evaluated only if a value is not found,
and the new value returned by `fun` is stored in #{inspect(__MODULE__)}.
If a value is found in #{inspect(__MODULE__)} or writing the new value to #{inspect(__MODULE__)} succeeds,
the value is returned as `{:ok, value}`, but if writing the new value fails, an error is returned in the same manner as `write/5`.
Parameters `lifetime_in_sec` and `prob_lifetime_ratio` are used to call `write/5` and the details are described above.
"""
defun read_or_else_write(
key :: Key.t(),
epool_id :: v[EPoolId.t()],
lifetime_in_sec :: v[non_neg_integer],
prob_lifetime_ratio :: v[NormalizedFloat.t()] \\ @default_ratio,
fun :: (() -> Value.t())
) :: R.t(Value.t(), :too_large_key | :too_large_value) do
case read(key, epool_id) do
{:ok, value} ->
{:ok, value}
{:error, _not_found} ->
value = fun.()
case write(key, value, epool_id, lifetime_in_sec, prob_lifetime_ratio) do
:ok -> {:ok, value}
err -> err
end
end
end
defp expired?(expire_at, prob_expire_at) do
case System.monotonic_time(:millisecond) do
now when now < prob_expire_at ->
false
now when expire_at < now ->
true
now ->
rnd = :rand.uniform()
t0 = expire_at - prob_expire_at
t1 = now - prob_expire_at
rnd < t1 / t0
end
end
@doc """
Write a key-value pair to #{inspect(__MODULE__)}.
See above descriptions for more details.
"""
defun write(
key :: Key.t(),
value :: Value.t(),
epool_id :: v[EPoolId.t()],
lifetime_in_sec :: v[non_neg_integer],
prob_lifetime_ratio :: v[NormalizedFloat.t()] \\ @default_ratio
) :: :ok | {:error, :too_large_key | :too_large_value} do
cond do
not Key.valid?(key) -> {:error, :too_large_key}
not Value.valid?(value) -> {:error, :too_large_value}
true -> MemcacheWriter.write(key, value, epool_id, lifetime_in_sec, prob_lifetime_ratio)
end
end
defmodule Key do
@max_size 128
@moduledoc """
A type module of keys for `Antikythera.Memcache`.
The maximum size of keys is #{@max_size} bytes.
To know how the size is calculated, see `Antikythera.TermUtil`.
"""
@type t :: any
defun valid?(key :: term) :: boolean do
Antikythera.TermUtil.size_smaller_or_equal?(key, @max_size)
end
defun max_size() :: non_neg_integer, do: @max_size
end
defmodule Value do
@max_size 65_536
@moduledoc """
A type module of values for `Antikythera.Memcache`.
The maximum size of values is #{@max_size} bytes.
To know how the size is calculated, see `Antikythera.TermUtil`.
"""
@type t :: any
defun valid?(value :: term) :: boolean do
Antikythera.TermUtil.size_smaller_or_equal?(value, @max_size)
end
defun max_size() :: non_neg_integer, do: @max_size
end
defmodule NormalizedFloat do
use Croma.SubtypeOfFloat, min: 0.0, max: 1.0
end
end
|
lib/util/memcache.ex
| 0.81283 | 0.51812 |
memcache.ex
|
starcoder
|
defmodule BitcoinSimulator.BitcoinCore do
alias BitcoinSimulator.BitcoinCore.{Blockchain, Mining, Network, RawTransaction, Wallet}
# Block Chain
def newBlockchain, do: Blockchain.newBlockchain()
def hashOfBestBlock(blockchain), do: Blockchain.hashOfBestBlock(blockchain)
def hashBlockheader(header), do: Blockchain.hashBlockheader(header)
def hashTransaction(tx), do: Blockchain.hashTransaction(tx)
def blockCheck(blockchain, block), do: Blockchain.blockCheck(blockchain, block)
def transactionCheck(blockchain, tx), do: Blockchain.transactionCheck(blockchain, tx)
def blockAdd(block, blockchain, wallet, mempool, mining_process \\ nil, mining_txs \\ nil) do
Blockchain.blockAdd(block, blockchain, wallet, mempool, mining_process, mining_txs)
end
# Mining
def newMempool, do: Mining.newMempool()
def findTopUnconfirmedTrans(mempool), do: Mining.findTopUnconfirmedTrans(mempool)
def blockTemplate(prev_hash, txs), do: Mining.blockTemplate(prev_hash, txs)
def doMine(block, coinbase_addr, self_id), do: Mining.doMine(block, coinbase_addr, self_id)
def unconfirmedTransAdd(mempool, tx, tx_hash), do: Mining.unconfirmedTransAdd(mempool, tx, tx_hash)
def coinbaseValueCalculate(blockchain, txs), do: Mining.coinbaseValueCalculate(blockchain, txs)
# Network
def newMessageRecord, do: %Network.MessageRecord{}
def findInitialNeighbors(id), do: Network.findInitialNeighbors(id)
def get_initial_blockchain(neighbors), do: Network.get_initial_blockchain(neighbors)
def exchange_neighbors(neighbors), do: Network.exchange_neighbors(neighbors)
def mix_neighbors(neighbors, self_id), do: Network.mix_neighbors(neighbors, self_id)
def message_seen?(record, type, hash), do: Network.message_seen?(record, type, hash)
def saw_message(record, type, hash), do: Network.saw_message(record, type, hash)
def clean_message_record(record), do: Network.clean_message_record(record)
def broadcast_message(type, message, neighbors, sender), do: Network.broadcast_message(type, message, neighbors, sender)
# Raw Transaction
def create_raw_transaction(in_addresses, out_addresses, out_values, change_address, change_value) do
RawTransaction.create_raw_transaction(in_addresses, out_addresses, out_values, change_address, change_value)
end
def create_coinbase_transaction(out_addresses, out_values), do: RawTransaction.create_coinbase_transaction(out_addresses, out_values)
# Wallet
def get_new_wallet, do: Wallet.get_new_wallet()
def get_new_address(wallet), do: Wallet.get_new_address(wallet)
def combine_unspent_addresses(wallet, target_value), do: Wallet.combine_unspent_addresses(wallet, target_value)
def spend_address(wallet, address), do: Wallet.spend_address(wallet, address)
def import_address(wallet, address), do: Wallet.import_address(wallet, address)
end
|
lib/APIsimulator/coreApp.ex
| 0.686265 | 0.529081 |
coreApp.ex
|
starcoder
|
defmodule Twirp.Plug do
@moduledoc """
Provides a plug that takes service and handler module. If the request is
directed at the "twirp" endpoint then the plug will intercept the conn and
process it. Otherwise it allows the conn to pass through. This is a deviation
from the twirp specification but it allows users to include twirp services
into their existing plug stacks.
You can use the plug like so:
```elixir
plug Twirp.Plug,
service: MyService,
handler: MyHandler,
```
"""
@content_type "content-type"
alias Twirp.Encoder
alias Twirp.Error
alias Twirp.Telemetry
import Plug.Conn
import Norm
def env_s do
schema(%{
content_type: spec(is_binary()),
method_name: spec(is_atom()),
handler_fn: spec(is_atom()),
input: spec(is_map()),
input_type: spec(is_atom()),
output_type: spec(is_atom()),
http_response_headers: map_of(spec(is_binary()), spec(is_binary())),
})
end
def hook_result_s do
alt(
env: selection(env_s()),
error: schema(%Twirp.Error{})
)
end
def init(args) do
handler =
args
|> Keyword.fetch!(:handler)
Code.ensure_compiled(handler)
service_def =
args
|> Keyword.fetch!(:service)
|> apply(:definition, [])
|> Norm.conform!(Twirp.Service.s())
hooks = %{
before: Keyword.get(args, :before, []),
on_success: Keyword.get(args, :on_success, []),
on_error: Keyword.get(args, :on_error, []),
on_exception: Keyword.get(args, :on_exception, []),
}
rpc_defs =
for rpc <- service_def.rpcs,
do: {"#{rpc.method}", rpc},
into: %{}
service_def =
service_def
|> Map.put(:rpcs, rpc_defs)
|> Map.put(:full_name, Twirp.Service.full_name(service_def))
{service_def, handler, hooks}
end
def call(%{path_info: ["twirp", full_name, method]}=conn, {%{full_name: full_name}=service, handler, hooks}) do
call(%{conn | path_info: [full_name, method]}, {service, handler, hooks})
end
def call(%{path_info: [full_name, method]}=conn, {%{full_name: full_name}=service, handler, hooks}) do
env = %{}
metadata = %{}
start = Telemetry.start(:call, metadata)
try do
with {:ok, env} <- validate_req(conn, method, service),
{:ok, env, conn} <- get_input(env, conn),
{:ok, env} <- call_before_hooks(env, conn, hooks),
{:ok, output} <- call_handler(handler, env)
do
# We're safe to just get the output because call_handler has handled
# the error case for us
resp = Encoder.encode(output, env.output_type, env.content_type)
env = Map.put(env, :output, resp)
call_on_success_hooks(env, hooks)
metadata =
metadata
|> Map.put(:content_type, env.content_type)
|> Map.put(:method, env.method_name)
Telemetry.stop(:call, start, metadata)
conn
|> put_resp_content_type(env.content_type)
|> send_resp(200, resp)
|> halt()
else
{:error, env, error} ->
metadata =
metadata
|> Map.put(:content_type, env.content_type)
|> Map.put(:method, env.method_name)
|> Map.put(:error, error)
Telemetry.stop(:call, start, metadata)
call_on_error_hooks(hooks, env, error)
send_error(conn, error)
end
rescue
exception ->
try do
call_on_exception_hooks(hooks, env, exception)
Telemetry.exception(:call, start, :error, exception, __STACKTRACE__, metadata)
error = Error.internal(Exception.message(exception))
call_on_error_hooks(hooks, env, error)
send_error(conn, error)
rescue
hook_e ->
Telemetry.exception(:call, start, :error, hook_e, __STACKTRACE__, metadata)
error = Error.internal(Exception.message(hook_e))
call_on_error_hooks(hooks, env, error)
send_error(conn, error)
end
end
end
def call(conn, _opts) do
conn
end
def validate_req(conn, method, %{rpcs: rpcs}) do
content_type = content_type(conn)
env = %{
content_type: content_type,
http_response_headers: %{},
method_name: method,
}
cond do
conn.method != "POST" ->
{:error, env, bad_route("HTTP request must be POST", conn)}
!Encoder.valid_type?(content_type) ->
{:error, env, bad_route("Unexpected Content-Type: #{content_type || "nil"}", conn)}
rpcs[method] == nil ->
{:error, env, bad_route("Invalid rpc method: #{method}", conn)}
true ->
rpc = rpcs[method]
env = Map.merge(env, %{
content_type: content_type,
http_response_headers: %{},
method_name: rpc.method,
input_type: rpc.input,
output_type: rpc.output,
handler_fn: rpc.handler_fn,
})
{:ok, conform!(env, env_s())}
end
end
defp get_input(env, conn) do
with {:ok, body, conn} <- get_body(conn, env) do
case Encoder.decode(body, env.input_type, env.content_type) do
{:ok, decoded} ->
{:ok, Map.put(env, :input, decoded), conn}
_error ->
msg = "Invalid request body for rpc method: #{env.method_name}"
error = bad_route(msg, conn)
{:error, env, error}
end
end
end
defp get_body(conn, env) do
# If we're in a phoenix endpoint or an established plug router than the
# user is probably already using a plug parser and the body will be
# empty. We need to check to see if we have body params which is an
# indication that our json has already been parsed. Limiting this to
# only json payloads since the user most likely doesn't have a protobuf
# parser already set up and I want to limit this potentially surprising
# behaviour.
if Encoder.json?(env.content_type) and body_params?(conn) do
{:ok, conn.body_params, conn}
else
case apply(Plug.Conn, :read_body, [conn]) do
{:ok, body, conn} ->
{:ok, body, conn}
_ ->
{:error, env, Error.internal("req_body has already been read or is too large to read")}
end
end
end
defp body_params?(conn) do
case conn.body_params do
%Plug.Conn.Unfetched{} -> false
_ -> true
end
end
defp call_handler(handler, %{output_type: output_type}=env) do
env = conform!(env, selection(env_s()))
if function_exported?(handler, env.handler_fn, 2) do
case apply(handler, env.handler_fn, [env, env.input]) do
%Error{}=error ->
{:error, env, error}
%{__struct__: s}=resp when s == output_type ->
{:ok, resp}
other ->
msg = "Handler method #{env.handler_fn} expected to return one of #{env.output_type} or Twirp.Error but returned #{inspect other}"
{:error, env, Error.internal(msg)}
end
else
{:error, env, Error.unimplemented("Handler function #{env.handler_fn} is not implemented")}
end
end
def call_before_hooks(env, conn, hooks) do
result = Enum.reduce_while(hooks.before, env, fn f, updated_env ->
result = f.(conn, updated_env)
case conform!(result, hook_result_s()) do
{:error, err} -> {:halt, {:error, updated_env, err}}
{:env, next_env} -> {:cont, next_env}
end
end)
case result do
{:error, env, err} ->
{:error, env, err}
env ->
{:ok, env}
end
end
def call_on_success_hooks(env, hooks) do
for hook <- hooks.on_success do
hook.(env)
end
end
def call_on_error_hooks(hooks, env, error) do
for hook <- hooks.on_error do
hook.(env, error)
end
end
def call_on_exception_hooks(hooks, env, exception) do
for hook <- hooks.on_exception do
hook.(env, exception)
end
end
defp content_type(conn) do
Enum.at(get_req_header(conn, @content_type), 0)
end
defp send_error(conn, error) do
content_type = Encoder.type(:json)
body = Encoder.encode(error, nil, content_type)
conn
|> put_resp_content_type(content_type)
|> send_resp(Error.code_to_status(error.code), body)
|> halt()
end
defp bad_route(msg, conn) do
Error.bad_route(msg, %{"twirp_invalid_route" => "#{conn.method} #{conn.request_path}"})
end
end
|
lib/twirp/plug.ex
| 0.666497 | 0.638046 |
plug.ex
|
starcoder
|
defmodule Phoenix.PubSub.Local do
@moduledoc """
PubSub implementation for handling local-node process groups.
This module is used by Phoenix pubsub adapters to handle
their local node subscriptions and it is usually not accessed
directly. See `Phoenix.PubSub.PG2` for an example integration.
"""
use GenServer
@doc """
Starts the server.
* `server_name` - The name to register the server under
"""
def start_link(server_name, gc_name) do
GenServer.start_link(__MODULE__, {server_name, gc_name}, name: server_name)
end
@doc """
Subscribes the pid to the topic.
* `pubsub_server` - The registered server name
* `pool_size` - The size of the pool
* `pid` - The subscriber pid
* `topic` - The string topic, for example "users:123"
* `opts` - The optional list of options. Supported options
only include `:link` to link the subscriber to local
## Examples
iex> subscribe(MyApp.PubSub, 1, self(), "foo")
:ok
"""
def subscribe(pubsub_server, pool_size, pid, topic, opts \\ []) when is_atom(pubsub_server) do
{local, gc} =
pid
|> :erlang.phash2(pool_size)
|> pools_for_shard(pubsub_server)
:ok = GenServer.call(local, {:monitor, pid, opts})
true = :ets.insert(gc, {pid, topic})
true = :ets.insert(local, {topic, {pid, opts[:fastlane]}})
:ok
end
@doc """
Unsubscribes the pid from the topic.
* `pubsub_server` - The registered server name
* `pool_size` - The size of the pool
* `pid` - The subscriber pid
* `topic` - The string topic, for example "users:123"
## Examples
iex> unsubscribe(MyApp.PubSub, 1, self(), "foo")
:ok
"""
def unsubscribe(pubsub_server, pool_size, pid, topic) when is_atom(pubsub_server) do
{local, gc} =
pid
|> :erlang.phash2(pool_size)
|> pools_for_shard(pubsub_server)
true = :ets.match_delete(gc, {pid, topic})
true = :ets.match_delete(local, {topic, {pid, :_}})
case :ets.select_count(gc, [{{pid, :_}, [], [true]}]) do
0 -> :ok = GenServer.call(local, {:demonitor, pid})
_ -> :ok
end
end
@doc """
Sends a message to all subscribers of a topic.
* `pubsub_server` - The registered server name
* `pool_size` - The size of the pool
* `topic` - The string topic, for example "users:123"
## Examples
iex> broadcast(MyApp.PubSub, 1, self(), "foo")
:ok
iex> broadcast(MyApp.PubSub, 1, :none, "bar")
:ok
"""
def broadcast(fastlane, pubsub_server, 1 = _pool_size, from, topic, msg) when is_atom(pubsub_server) do
do_broadcast(fastlane, pubsub_server, _shard = 0, from, topic, msg)
:ok
end
def broadcast(fastlane, pubsub_server, pool_size, from, topic, msg) when is_atom(pubsub_server) do
for shard <- 0..(pool_size - 1) do
do_broadcast(fastlane, pubsub_server, shard, from, topic, msg)
end
:ok
end
defp do_broadcast(nil, pubsub_server, shard, from, topic, msg) do
pubsub_server
|> subscribers_with_fastlanes(topic, shard)
|> Enum.each(fn
{pid, _} when pid == from -> :noop
{pid, _} -> send(pid, msg)
end)
end
defp do_broadcast(fastlane, pubsub_server, shard, from, topic, msg) do
pubsub_server
|> subscribers_with_fastlanes(topic, shard)
|> fastlane.fastlane(from, msg) # TODO: Test this contract
end
@doc """
Returns a set of subscribers pids for the given topic.
* `pubsub_server` - The registered server name or pid
* `topic` - The string topic, for example "users:123"
* `shard` - The shard, for example `1`
## Examples
iex> subscribers(:pubsub_server, "foo", 1)
[#PID<0.48.0>, #PID<0.49.0>]
"""
def subscribers(pubsub_server, topic, shard) when is_atom(pubsub_server) do
pubsub_server
|> subscribers_with_fastlanes(topic, shard)
|> Enum.map(fn {pid, _fastlanes} -> pid end)
end
@doc """
Returns a set of subscribers pids for the given topic with fastlane tuples.
See `subscribers/1` for more information.
"""
def subscribers_with_fastlanes(pubsub_server, topic, shard) when is_atom(pubsub_server) do
try do
shard
|> local_for_shard(pubsub_server)
|> :ets.lookup_element(topic, 2)
catch
:error, :badarg -> []
end
end
@doc false
# This is an expensive and private operation. DO NOT USE IT IN PROD.
def list(pubsub_server, shard) when is_atom(pubsub_server) do
shard
|> local_for_shard(pubsub_server)
|> :ets.select([{{:'$1', :_}, [], [:'$1']}])
|> Enum.uniq
end
@doc false
# This is an expensive and private operation. DO NOT USE IT IN PROD.
def subscription(pubsub_server, pool_size, pid) when is_atom(pubsub_server) do
{local, _gc} =
pid
|> :erlang.phash2(pool_size)
|> pools_for_shard(pubsub_server)
GenServer.call(local, {:subscription, pid})
end
@doc false
def local_name(pubsub_server, shard) do
Module.concat(["#{pubsub_server}.Local#{shard}"])
end
@doc false
def gc_name(pubsub_server, shard) do
Module.concat(["#{pubsub_server}.GC#{shard}"])
end
def init({local, gc}) do
^local = :ets.new(local, [:duplicate_bag, :named_table, :public,
read_concurrency: true, write_concurrency: true])
^gc = :ets.new(gc, [:duplicate_bag, :named_table, :public,
read_concurrency: true, write_concurrency: true])
Process.flag(:trap_exit, true)
{:ok, %{monitors: %{}, gc: gc}}
end
def handle_call({:monitor, pid, opts}, _from, state) do
if opts[:link], do: Process.link(pid)
{:reply, :ok, put_new_monitor(state, pid)}
end
def handle_call({:demonitor, pid}, _from, state) do
{:reply, :ok, drop_monitor(state, pid)}
end
def handle_call({:subscription, pid}, _from, state) do
topics = GenServer.call(state.gc, {:subscription, pid})
{:reply, {state.monitors[pid], topics}, state}
end
def handle_info({:DOWN, _ref, _type, pid, _info}, state) do
Phoenix.PubSub.GC.down(state.gc, pid)
{:noreply, drop_monitor(state, pid)}
end
def handle_info(_, state) do
{:noreply, state}
end
defp local_for_shard(shard, pubsub_server) do
{local_server, _gc_server} = pools_for_shard(shard, pubsub_server)
local_server
end
defp pools_for_shard(shard, pubsub_server) do
{_, _} = servers = :ets.lookup_element(pubsub_server, shard, 2)
servers
end
defp put_new_monitor(%{monitors: monitors} = state, pid) do
case Map.fetch(monitors, pid) do
{:ok, _ref} -> state
:error -> %{state | monitors: Map.put(monitors, pid, Process.monitor(pid))}
end
end
defp drop_monitor(%{monitors: monitors} = state, pid) do
case Map.fetch(monitors, pid) do
{:ok, ref} ->
Process.demonitor(ref)
%{state | monitors: Map.delete(monitors, pid)}
:error -> state
end
end
end
|
lib/phoenix/pubsub/local.ex
| 0.771542 | 0.408159 |
local.ex
|
starcoder
|
defmodule Day2 do
@doc """
This will calculate the wrapping paper needed for presents.
## Examples
iex> Day2.main("2x3x4")
58
iex> Day2.main("1x1x10")
43
"""
def main(input) do
input
|> strip_whitespace
|> get_measurements([])
|> get_total_square_footage(0)
end
@doc """
This will calculate the ribbon needed for presents.
## Examples
iex> Day2.secondary("2x3x4")
34
iex> Day2.secondary("1x1x10")
14
"""
def secondary(input) do
input
|> strip_whitespace
|> get_measurements([])
|> get_total_ribbon_needs(0)
end
def test_main do
{ :ok, input } = File.read("input/day2.txt")
main(input)
end
def test_secondary do
{ :ok, input } = File.read("input/day2.txt")
secondary(input)
end
@doc """
This will split our input and return a list of measurement strings.
## Examples
iex> Day2.strip_whitespace("2x3x4")
["2x3x4"]
iex> Day2.strip_whitespace("4x23x21 22x29x19 11x4x11")
["4x23x21", "22x29x19", "11x4x11"]
"""
def strip_whitespace(input) do
String.split(input)
end
@doc """
This will split our inputs further and return a list of maps that contain each measurement.
## Examples
iex> Day2.get_measurements(["4x23x21", "22x29x19", "11x4x11"], [])
[%{length: 4, width: 23, height: 21}, %{length: 22, width: 29, height: 19}, %{length: 11, width: 4, height: 11}]
"""
def get_measurements([head | tail], result) do
[l, w, h] = String.split(head, "x")
{ length, _ } = :string.to_integer(to_charlist(l))
{ width, _ } = :string.to_integer(to_charlist(w))
{ height, _ } = :string.to_integer(to_charlist(h))
get_measurements(tail, result ++ [%{length: length, width: width, height: height}])
end
def get_measurements([], result) do
result
end
@doc """
This will split our input and return a list of elements.
## Examples
iex> Day2.get_total_square_footage([%{length: 2, width: 3, height: 4}], 0)
58
"""
def get_total_square_footage([head | tail], accumulator) do
current = get_paper_amount(head)
accumulator = accumulator + current
get_total_square_footage(tail, accumulator)
end
def get_total_square_footage([], accumulator) do
accumulator
end
@doc """
This will split our input and return a list of elements.
## Examples
iex> Day2.get_total_ribbon_needs([%{length: 2, width: 3, height: 4}], 0)
34
iex> Day2.get_total_ribbon_needs([%{length: 1, width: 1, height: 10}], 0)
14
"""
def get_total_ribbon_needs([head | tail], accumulator) do
current = get_ribbon_amount(head)
accumulator = accumulator + current
get_total_ribbon_needs(tail, accumulator)
end
def get_total_ribbon_needs([], accumulator) do
accumulator
end
def get_paper_amount(measurement) do
sides = [(measurement.length * measurement.width), (measurement.width * measurement.height), (measurement.height * measurement.length)]
smallest_side = Enum.min(sides)
sides = Enum.map(sides, fn(x) -> x * 2 end)
sides = sides ++ [smallest_side]
sum_sides(sides, 0)
end
def get_ribbon_amount(measurement) do
((measurement.length*2) + (measurement.width*2) + (measurement.length*measurement.width*measurement.height))
end
def sum_sides([head | tail], accumulator) do
sum_sides(tail, accumulator + head)
end
def sum_sides([], accumulator) do
accumulator
end
end
|
advent2015/lib/day2.ex
| 0.61115 | 0.499573 |
day2.ex
|
starcoder
|
defmodule <%= components_module %>.Icon do
use <%= web_module %>, :component
@moduledoc ~S"""
## icons
The `<.icons name="..." />` component is an adapter function to easily use other
icon packages within heex.
the `icons/0` function is only used for the catalog.
## icon default type
The icon default type is specific to the icen package you use. For hero icons there are the types `"solid"` and `"outline"`. Configure the the default type as followes:
```elixir
config :gen_components, :semantic_icon, [
{:delete, :danger, "trash"},
{:edit, :primary, "pencil"},
{:add, :success, "plus"}
]
```
## semantic icons
"Code what you mean, not what you need to achieve it."
So you can configure meaningfull icon components like `<.icon_detele />`.
You can configure the semantic icons like:
```elixir
config :gen_components, :semantic_icon, [
{:delete, :danger, "trash"},
{:edit, :primary, "pencil"},
{:add, :success, "plus"}
]
```
This will generate 3 icon components. The first will call `icon/0` like that,
and will be named `<.icon_delete/>`:
```heex
<.icon name="trash" class={"text-danger #{@class}"} {attrs}/>
```
"""
@hero_icons_path "priv/solid"
alias Heroicons.Solid
alias Heroicons.Outline
@icons Application.app_dir(:heroicons, @hero_icons_path)
|> File.ls!()
|> Enum.map(&Path.basename/1)
|> Enum.map(&Path.rootname/1)
|> Enum.map(&{&1, String.replace(&1, "-", "_") |> String.to_atom()})
|> Map.new()
@doc """
List all vendor icon names. Used for the catalogue.
"""
def icons(), do: Map.keys(@icons)
@doc """
A general icon component for hero icons.
## Attributes
* name: the name of the hero icon
* type: "outline" | "solid" (specific to hero icons)
"""
def icon(assigns) do
assigns = assign_new(assigns, :type, fn -> <%= inspect(default_icon_type) %> end)
attrs = assigns_to_attributes(assigns, [:name, :type])
~H"""
<%%= case @type do %>
<%% "outline" -> %><%%= apply(Outline, iconf_by_name(@name), [attrs]) %>
<%% "solid" -> %><%%= apply(Solid, iconf_by_name(@name), [attrs]) %>
<%% end %>
"""
end
<%= for {semantic_name, style, icon_name} <- semantic_icons do %>
def icon_<%= semantic_name %>(assigns) do
assigns = assign_new(assigns, :class, fn -> "" end)
attrs = assigns_to_attributes(assigns, [:name, :class])
~H"""
<.icon name="<%= icon_name %>" class={"text-<%= style %> #{@class}"} {attrs}/>
"""
end
<% end %>
# icon function name by icon name
defp iconf_by_name(name), do: Map.fetch!(@icons, name)
end
|
priv/templates/gen.components/components/icon.ex
| 0.745213 | 0.736969 |
icon.ex
|
starcoder
|
defmodule JaSerializer.EctoErrorSerializer do
alias JaSerializer.Formatter.Utils
@moduledoc """
The EctoErrorSerializer is used to transform Ecto changeset errors to JSON API standard
error objects.
If a changeset is past in without optional error members then the object returned will
only contain: source, title, and detail.
```
%{"errors" => [
%{
source: %{pointer: "/data/attributes/monies"},
title: "must be more than 10",
detail: "Monies must be more than 10"
}
]
}
```
Additional error members can be set by passing in an options list.
These include: id, status, code, meta, and links.
For more information on the JSON API standard for handling error objects check_origin:
[jsonapi.org](http://jsonapi.org/examples/#error-objects)
"""
def format(errors), do: format(errors, [])
def format(errors, conn) when is_map(conn), do: format(errors, [])
def format(%{__struct__: Ecto.Changeset} = cs, o), do: format(cs.errors, o)
def format(errors, opts) do
errors
|> Enum.map(&(format_each(&1, opts[:opts])))
|> JaSerializer.ErrorSerializer.format
end
def format(%{__struct__: Ecto.Changeset} = cs, _c, o), do: format(cs.errors, o)
defp format_each({field, {message, vals}}, opts) do
# See https://github.com/elixir-ecto/ecto/blob/34a1012dd1f6d218c0183deb512b6c084afe3b6f/lib/ecto/changeset.ex#L1836-L1838
title = Enum.reduce(vals, message, fn {key, value}, acc ->
case key do
:type -> acc
:fields -> acc
_ -> String.replace(acc, "%{#{key}}", to_string(value))
end
end)
%{
source: %{pointer: pointer_for(field)},
title: title,
detail: "#{Utils.humanize(field)} #{title}"
} |> merge_opts(opts)
end
defp format_each({field, message}, opts) do
%{
source: %{pointer: pointer_for(field)},
title: message,
detail: "#{Utils.humanize(field)} #{message}"
} |> merge_opts(opts)
end
defp merge_opts(error, nil), do: error
defp merge_opts(error, opts) when is_list(opts) do
opts = Enum.into(opts, %{})
Map.merge(error, opts)
end
defp merge_opts(error, _opts), do: error
# Assumes relationship name is the same as the field name without the id.
# This is a fairly large and incorrect assumption, but until we have better
# ideas this will work for most relationships.
defp pointer_for(field) do
case Regex.run(~r/(.*)_id$/, to_string(field)) do
nil -> "/data/attributes/#{Utils.format_key(field)}"
[_, rel] -> "/data/relationships/#{Utils.format_key(rel)}"
end
end
end
|
lib/ja_serializer/ecto_error_serializer.ex
| 0.811377 | 0.713307 |
ecto_error_serializer.ex
|
starcoder
|
defmodule Commanded.Assertions.EventAssertions do
@moduledoc """
Provides test assertion and wait for event functions to help test applications
built using Commanded.
The default assert and refute receive timeouts are one second.
You can override the default timeout in config (e.g. `config/test.exs`):
config :commanded,
assert_receive_event_timeout: 1_000,
refute_receive_event_timeout: 1_000
"""
import ExUnit.Assertions
alias Commanded.EventStore
alias Commanded.EventStore.RecordedEvent
@doc """
Assert that events matching their respective predicates have a matching
correlation id.
Useful when there is a chain of events that is connected through event handlers.
## Example
assert_correlated(
BankApp,
BankAccountOpened, fn opened -> opened.id == 1 end,
InitialAmountDeposited, fn deposited -> deposited.id == 2 end
)
"""
def assert_correlated(application, event_type_a, predicate_a, event_type_b, predicate_b) do
assert_receive_event(application, event_type_a, predicate_a, fn _event_a, metadata_a ->
assert_receive_event(application, event_type_b, predicate_b, fn _event_b, metadata_b ->
assert metadata_a.correlation_id == metadata_b.correlation_id
end)
end)
end
@doc """
Assert that an event of the given event type is published.
Verify that event using the assertion function.
## Example
assert_receive_event(BankApp, BankAccountOpened, fn opened ->
assert opened.account_number == "ACC123"
end)
"""
def assert_receive_event(application, event_type, assertion_fn)
when is_function(assertion_fn, 1) or is_function(assertion_fn, 2) do
assert_receive_event(application, event_type, fn _event -> true end, assertion_fn)
end
@doc """
Assert that an event of the given event type, matching the predicate, is
published. Verify that event using the assertion function.
## Example
assert_receive_event(BankApp, BankAccountOpened,
fn opened -> opened.account_number == "ACC123" end,
fn opened ->
assert opened.balance == 1_000
end)
"""
def assert_receive_event(application, event_type, predicate_fn, assertion_fn)
when is_function(assertion_fn, 1) or is_function(assertion_fn, 2) do
unless Code.ensure_loaded?(event_type) do
raise ExUnit.AssertionError, "Event #{inspect(event_type)} not found"
end
with_subscription(application, fn subscription ->
do_assert_receive(application, subscription, event_type, predicate_fn, assertion_fn)
end)
end
@doc """
Refute that an event of the given type has been received.
An optional predicate may be provided to filter events matching the refuted
type.
## Examples
Refute that `ExampleEvent` is produced by given anonymous function:
refute_receive_event(ExampleApp, ExampleEvent, fn ->
:ok = MyApp.dispatch(command)
end)
Refute that `ExampleEvent` is produced by `some_func/0` function:
refute_receive_event(ExampleApp, ExampleEvent, &some_func/0)
Refute that `ExampleEvent` matching given `event_matches?/1` predicate function
is produced by `some_func/0` function:
refute_receive_event(ExampleApp, ExampleEvent, &some_func/0,
predicate: &event_matches?/1
)
Refute that `ExampleEvent` matching given anonymous predicate function
is produced by `some_func/0` function:
refute_receive_event(ExampleApp, ExampleEvent, &some_func/0,
predicate: fn event -> event.value == 1 end
)
Refute that `ExampleEvent` produced by `some_func/0` function is published to
a given stream:
refute_receive_event(ExampleApp, ExampleEvent, &some_func/0,
predicate: fn event -> event.value == 1 end,
stream: "foo-1234"
)
"""
def refute_receive_event(application, event_type, refute_fn, opts \\ [])
when is_function(refute_fn, 0) do
predicate_fn = Keyword.get(opts, :predicate) || fn _event -> true end
timeout = Keyword.get(opts, :timeout, default_refute_receive_timeout())
subscription_opts = Keyword.take(opts, [:stream]) |> Keyword.put(:start_from, :current)
reply_to = self()
ref = make_ref()
# Start a task to subscribe and verify received events
task =
Task.async(fn ->
with_subscription(
application,
fn subscription ->
send(reply_to, {:subscribed, ref})
do_refute_receive_event(application, subscription, event_type, predicate_fn)
end,
subscription_opts
)
end)
# Wait until subscription has subscribed before executing refute function,
# otherwise we might not receive a matching event.
assert_receive {:subscribed, ^ref}, default_receive_timeout()
refute_fn.()
case Task.yield(task, timeout) || Task.shutdown(task) do
{:ok, :ok} -> :ok
{:ok, {:error, event}} -> flunk("Unexpectedly received event: " <> inspect(event))
{:exit, error} -> flunk("Encountered an error: " <> inspect(error))
nil -> :ok
end
end
@doc """
Wait for an event of the given event type to be published.
## Examples
wait_for_event(BankApp, BankAccountOpened)
"""
def wait_for_event(application, event_type) do
wait_for_event(application, event_type, fn _event -> true end)
end
@doc """
Wait for an event of the given event type, matching the predicate, to be
published.
## Examples
wait_for_event(BankApp, BankAccountOpened, fn opened ->
opened.account_number == "ACC123"
end)
"""
def wait_for_event(application, event_type, predicate_fn) when is_function(predicate_fn) do
with_subscription(application, fn subscription ->
do_wait_for_event(application, subscription, event_type, predicate_fn)
end)
end
@doc false
def with_subscription(application, callback_fn, opts \\ [])
when is_function(callback_fn, 1) do
subscription_name = UUID.uuid4()
stream = Keyword.get(opts, :stream, :all)
start_from = Keyword.get(opts, :start_from, :origin)
{:ok, subscription} =
EventStore.subscribe_to(application, stream, subscription_name, self(), start_from)
assert_receive {:subscribed, ^subscription}, default_receive_timeout()
try do
callback_fn.(subscription)
after
:ok = EventStore.unsubscribe(application, subscription)
:ok = EventStore.delete_subscription(application, stream, subscription_name)
end
end
defp do_assert_receive(application, subscription, event_type, predicate_fn, assertion_fn) do
assert_receive {:events, received_events}, default_receive_timeout()
case find_expected_event(received_events, event_type, predicate_fn) do
%RecordedEvent{data: data} = expected_event ->
args =
cond do
is_function(assertion_fn, 1) -> [data]
is_function(assertion_fn, 2) -> [data, expected_event]
end
apply(assertion_fn, args)
nil ->
:ok = ack_events(application, subscription, received_events)
do_assert_receive(application, subscription, event_type, predicate_fn, assertion_fn)
end
end
defp do_refute_receive_event(application, subscription, event_type, predicate_fn) do
receive do
{:events, events} ->
case find_expected_event(events, event_type, predicate_fn) do
%RecordedEvent{data: data} ->
{:error, data}
nil ->
:ok = ack_events(application, subscription, events)
do_refute_receive_event(application, subscription, event_type, predicate_fn)
end
end
end
defp do_wait_for_event(application, subscription, event_type, predicate_fn) do
assert_receive {:events, received_events}, default_receive_timeout()
case find_expected_event(received_events, event_type, predicate_fn) do
%RecordedEvent{} = expected_event ->
expected_event
nil ->
:ok = ack_events(application, subscription, received_events)
do_wait_for_event(application, subscription, event_type, predicate_fn)
end
end
defp find_expected_event(received_events, event_type, predicate_fn) do
Enum.find(received_events, fn
%RecordedEvent{data: %{__struct__: ^event_type} = data} = received_event ->
args =
cond do
is_function(predicate_fn, 1) -> [data]
is_function(predicate_fn, 2) -> [data, received_event]
end
apply(predicate_fn, args)
%RecordedEvent{} ->
false
end)
end
defp ack_events(_application, _subscription, []), do: :ok
defp ack_events(application, subscription, [event]),
do: EventStore.ack_event(application, subscription, event)
defp ack_events(application, subscription, [_event | events]),
do: ack_events(application, subscription, events)
defp default_receive_timeout,
do: Application.get_env(:commanded, :assert_receive_event_timeout, 1_000)
defp default_refute_receive_timeout,
do: Application.get_env(:commanded, :refute_receive_event_timeout, 1_000)
end
|
lib/commanded/assertions/event_assertions.ex
| 0.903715 | 0.743494 |
event_assertions.ex
|
starcoder
|
defmodule Benchee.Scenario do
@moduledoc """
Core data structure representing one particular case (combination of function and input).
Represents the combination of a particular function to benchmark (also called "job" defined
by `job_name` and `function`) in combination with a specific input (`input_name` and `input`).
When no input is given, the combined value is representative of "no input".
A scenario then further gathers all data collected for this particular combination during
`Benchee.Benchmark.collect/3`, which are then used later in the process by `Benchee.Statistics`
to compute the relevant statistics which are then also added to the scenario.
It is the home of the aggregated knowledge regarding this particular case/scenario.
`name` is the name that should be used by formatters to display scenarios as
it potentially includes the `tag` present when loading scenarios that were
saved before. See `display_name/1`.
"""
alias Benchee.Benchmark.Hooks
alias Benchee.CollectionData
defstruct [
:name,
:job_name,
:function,
:input_name,
:input,
:before_each,
:after_each,
:before_scenario,
:after_scenario,
:tag,
run_time_data: %CollectionData{},
memory_usage_data: %CollectionData{}
]
@typedoc """
The main function executed while benchmarking.
No arguments if no inputs are used, one argument if inputs are used.
"""
@type benchmarking_function :: (() -> any) | (any -> any)
@typedoc """
All the data collected for a scenario (combination of function and input)
Among all the data required to execute the scenario (function, input, all the hooks aka
after_*/before_*), data needed to display (name, job_name, input_name, tag) and of course
run_time_data and memory_data with all the samples and computed statistics.
"""
@type t :: %__MODULE__{
name: String.t(),
job_name: String.t(),
function: benchmarking_function,
input_name: String.t() | nil,
input: any | nil,
run_time_data: CollectionData.t(),
memory_usage_data: CollectionData.t(),
before_each: Hooks.hook_function() | nil,
after_each: Hooks.hook_function() | nil,
before_scenario: Hooks.hook_function() | nil,
after_scenario: Hooks.hook_function() | nil,
tag: String.t() | nil
}
@doc """
Returns the correct name to display of the given scenario data.
In the normal case this is `job_name`, however when scenarios are loaded they
are tagged and these tags should be shown for disambiguation.
## Examples
iex> alias Benchee.Scenario
iex> Scenario.display_name(%Scenario{job_name: "flat_map"})
"flat_map"
iex> Scenario.display_name(%Scenario{job_name: "flat_map", tag: "master"})
"flat_map (master)"
iex> Scenario.display_name(%{job_name: "flat_map"})
"flat_map"
"""
@spec display_name(t) :: String.t()
def display_name(%{job_name: job_name, tag: nil}), do: job_name
def display_name(%{job_name: job_name, tag: tag}), do: "#{job_name} (#{tag})"
def display_name(%{job_name: job_name}), do: job_name
@doc """
Returns `true` if data of the provided type has been fully procsessed, `false` otherwise.
Current available types are `run_time` and `memory`. Reasons they might not have been processed
yet are:
* Suite wasn't configured to collect them at all
* `Benchee.statistics/1` hasn't been called yet so that data was collected but statistics
aren't present yet
## Examples
iex> alias Benchee.Scenario
iex> alias Benchee.Statistics
iex> scenario = %Scenario{run_time_data: %Benchee.CollectionData{statistics: %Statistics{sample_size: 100}}}
iex> Scenario.data_processed?(scenario, :run_time)
true
iex> scenario = %Scenario{memory_usage_data: %Benchee.CollectionData{statistics: %Statistics{sample_size: 1}}}
iex> Scenario.data_processed?(scenario, :memory)
true
iex> scenario = %Scenario{memory_usage_data: %Benchee.CollectionData{statistics: %Statistics{sample_size: 0}}}
iex> Scenario.data_processed?(scenario, :memory)
false
"""
@spec data_processed?(t, :run_time | :memory) :: boolean
def data_processed?(scenario, :run_time) do
scenario.run_time_data.statistics.sample_size > 0
end
def data_processed?(scenario, :memory) do
scenario.memory_usage_data.statistics.sample_size > 0
end
end
|
lib/benchee/scenario.ex
| 0.883494 | 0.831964 |
scenario.ex
|
starcoder
|
defmodule Distillery.Releases.Appup.Transform do
@moduledoc """
A transform is an appup compilation pass which receives a list of appup instructions,
along with metadata about those instructions, such as the source application,
the source and target versions involved, and an optional list of configuration options
for the transform.
The job of a transform is to, well, apply a transformation to the instruction set, in
order to accomplish some objective that one desires to be automated. A trivial example
of one such transform would be a transform which ensures the purge mode is set to `:soft_purge`
for all `:update` instructions. To see an example of such a transform, look in `test/support/purge_transform.ex`
"""
alias Distillery.Releases.Appup
alias Distillery.Releases.Appup.Utils
alias Distillery.Releases.Appup.TransformError
@type app :: Appup.app()
@type version :: Appup.appup_ver()
@type options :: [term]
@type instruction :: Appup.instruction()
@type transform :: module | {module, options}
@callback up(app, version, version, [instruction], options) :: [instruction]
@callback down(app, version, version, [instruction], options) :: [instruction]
defmacro __using__(_) do
quote do
@behaviour unquote(__MODULE__)
@impl unquote(__MODULE__)
def up(_app, _v1, _v2, instructions, _opts) do
instructions
end
@impl unquote(__MODULE__)
def down(_app, _v1, _v2, instructions, _opts) do
instructions
end
defoverridable up: 5, down: 5
end
end
@doc """
Applies all transforms against the current upgrade instruction.
Additional information required as arguments and passed to transforms are
the app the instruction applies to, and the source and target versions involved.
"""
@spec up([instruction], app, version, version, [transform]) :: [instruction]
def up(instructions, _app, _v1, _v2, []) do
instructions
end
def up(instructions, app, v1, v2, [mod | rest]) when is_atom(mod) do
up(instructions, app, v1, v2, [{mod, []} | rest])
end
def up(instructions, app, v1, v2, [{mod, opts} | rest]) when is_atom(mod) and is_list(opts) do
case mod.up(app, v1, v2, instructions, opts) do
ixs when is_list(ixs) ->
# Validate
validate_instructions!(mod, :up, ixs)
up(ixs, app, v1, v2, rest)
invalid ->
# Invalid return value
raise TransformError, module: mod, callback: :up, error: {:invalid_return, invalid}
end
end
@doc """
Applies all transforms against the current downgrade instruction.
Additional information required as arguments and passed to transforms are
the app the instruction applies to, and the source and target versions involved.
"""
@spec down([instruction], app, version, version, [transform]) :: [instruction]
def down(instructions, _app, _v1, _v2, []) do
instructions
end
def down(instructions, app, v1, v2, [mod | rest]) when is_atom(mod) do
down(instructions, app, v1, v2, [{mod, []} | rest])
end
def down(instructions, app, v1, v2, [{mod, opts} | rest]) do
case mod.down(app, v1, v2, instructions, opts) do
ixs when is_list(ixs) ->
# Validate
validate_instructions!(mod, :down, ixs)
down(ixs, app, v1, v2, rest)
invalid ->
# Invalid return value
raise TransformError, module: mod, callback: :down, error: {:invalid_return, invalid}
end
end
defp validate_instructions!(mod, type, ixs) do
case Utils.validate_instructions(ixs) do
:ok ->
:ok
{:invalid, i} ->
raise TransformError, module: mod, callback: type, error: {:invalid_instruction, i}
end
end
end
|
lib/distillery/releases/appup/transform.ex
| 0.905123 | 0.546375 |
transform.ex
|
starcoder
|
defmodule ResxJSON.Decoder do
@moduledoc """
Decode JSON string resources into erlang terms.
### Media Types
Only JSON types are valid. This can either be a JSON subtype or suffix.
Valid: `application/json`, `application/geo+json`, `application/json-seq`
If an error is being returned when attempting to open a data URI due to
`{ :invalid_reference, "invalid media type: \#{type}" }`, the MIME type
will need to be added to the config.
To add additional media types to be decoded, that can be done by configuring
the `:json_types` option.
config :resx_json,
json_types: [
{ "application/x.my-type", "application/x.erlang.native", :json }
]
The `:json_types` field should contain a list of 3 element tuples with the
format `{ pattern :: String.pattern | Regex.t, replacement :: String.t, decoder :: :json | :json_seq }`.
The `pattern` and `replacement` are arguments to `String.replace/3`. While the
decoder specifies the JSON decoder to be used. The current decoder are:
* `:json` - Decodes standard JSON using the `Jaxon` library (see `Jaxon.Stream.query/2`).
* `:json_seq` - Decodes [JSON text sequences](https://tools.ietf.org/html/rfc7464) using the `Jaxon` library (see `Jaxon.Stream.query/2`).
The replacement becomes the new media type of the transformed resource. Nested
media types will be preserved. By default the current matches will be replaced
(where the `json` type part is), with `x.erlang.native`, in order to denote
that the content is now a native erlang type. If this behaviour is not desired
simply override the match with `:json_types` for the media types that should
not be handled like this.
### Query
A query can be performed in the transformation, to only return a resource with
the result of that query. The query format is either a string (`Jaxon.Path`) or
a regular query as expected by `Jaxon.Stream.query/2`.
Resx.Resource.transform(resource, ResxJSON.Decoder, query: "[*].foo")
"""
use Resx.Transformer
alias Resx.Resource.Content
@impl Resx.Transformer
def transform(resource = %{ content: content }, opts) do
case format_query(opts[:query]) do
{ :ok, query } ->
case validate_type(content.type) do
{ :ok, { type, :json } } ->
content = Content.Stream.new(content)
{ :ok, %{ resource | content: %{ content | type: type, data: content |> Jaxon.Stream.query(query) } } }
{ :ok, { type, :json_seq } } ->
content = Content.Stream.new(content)
{ :ok, %{ resource | content: %{ content | type: type, data: Stream.concat([["["], Stream.transform(content, true, &format_sequence/2), ["]"]]) |> Jaxon.Stream.query(query) } } }
error -> error
end
{ :error, error } -> { :error, { :internal, "Invalid query format: " <> error.message } }
end
end
defp format_query(nil), do: { :ok, [:root] }
defp format_query(query) when is_list(query), do: { :ok, query }
defp format_query(query), do: Jaxon.Path.parse(query)
defp format_sequence(sequence, false), do: { [Regex.scan(~r/[\x1e\n]/, sequence, return: :index) |> convert_sequence_to_array(sequence, false) |> IO.iodata_to_binary], false }
defp format_sequence(sequence, true) do
Regex.scan(~r/[\x1e\n]/, sequence, return: :index)
|> convert_sequence_to_array(sequence, true)
|> case do
formatted when is_list(formatted) -> { [IO.iodata_to_binary(formatted)], false }
formatted -> { [IO.iodata_to_binary(formatted)], true }
end
end
defp convert_sequence_to_array(indexes, sequence, first, index \\ 0)
defp convert_sequence_to_array([], sequence, _, _), do: sequence
defp convert_sequence_to_array([[{ start, 1 }]|indexes], sequence, false, index) do
part_length = start - index
case sequence do
<<part :: binary-size(part_length), "\x1e", sequence :: binary>> -> [part, ","|convert_sequence_to_array(indexes, sequence, false, start + 1)]
<<part :: binary-size(part_length), "\n", sequence :: binary>> -> [part|convert_sequence_to_array(indexes, sequence, false, start + 1)]
end
end
defp convert_sequence_to_array([[{ start, 1 }]|indexes], sequence, true, index) do
part_length = start - index
case sequence do
<<part :: binary-size(part_length), "\x1e", sequence :: binary>> -> [part|convert_sequence_to_array(indexes, sequence, false, start + 1)]
end
end
@default_json_types [
{ ~r/\/(json(\+json)?|(.*?\+)json)(;|$)/, "/\\3x.erlang.native\\4", :json },
{ ~r/\/(json-seq|(.*?\+)json-seq)(;|$)/, "/\\2x.erlang.native\\3", :json_seq }
]
defp validate_type(types) do
cond do
new_type = validate_type(types, Application.get_env(:resx_json, :json_types, [])) -> { :ok, new_type }
new_type = validate_type(types, @default_json_types) -> { :ok, new_type }
true -> { :error, { :internal, "Invalid resource type" } }
end
end
defp validate_type(_, []), do: nil
defp validate_type(type_list = [type|types], [{ match, replacement, decoder }|matches]) do
if type =~ match do
{ [String.replace(type, match, replacement)|types], decoder }
else
validate_type(type_list, matches)
end
end
end
|
lib/resx_json/decoder.ex
| 0.915978 | 0.629561 |
decoder.ex
|
starcoder
|
defmodule Segments do
defstruct decoder: %{},
encoder: %{},
output: 0
@type t :: %__MODULE__{
decoder: %{MapSet.t() => integer()},
encoder: %{integer() => MapSet.t()},
output: integer()
}
@spec t_m2(String.t())::[MapSet.t]
defp t_m2(input) do
String.split(input)
|> Enum.map(fn x -> String.split(x, "", trim: true) end)
|> Enum.map(&MapSet.new/1)
end
@spec to_mapsets(String.t())::{[MapSet.t()],[MapSet.t()]}
defp to_mapsets(inputline) do
[input, output] = String.split(inputline, " | ", trim: true)
{t_m2(input), t_m2(output)}
end
defp f_and_s_reducer({target, filter_fun}, {segments, input}) do
[wires] = Enum.filter(input, fn x -> filter_fun.(x, segments) end)
output = input -- [wires]
dec = Map.put(segments.decoder, wires, target)
enc = Map.put(segments.encoder, target, wires)
{%{segments | decoder: dec, encoder: enc}, output}
end
@spec find_and_set(Segments.t(), [MapSet.t()])::Segments.t()
defp find_and_set(segments, input) do
[
{1, fn x, _s -> MapSet.size(x) == 2 end},
{4, fn x, _s -> MapSet.size(x) == 4 end},
{7, fn x, _s -> MapSet.size(x) == 3 end},
{8, fn x, _s -> MapSet.size(x) == 7 end},
{9, fn x, s -> MapSet.size(x) == 6 and MapSet.subset?(s.encoder[4], x) end},
{0, fn x, s -> MapSet.size(x) == 6 and MapSet.subset?(s.encoder[1], x) end},
{6, fn x, _s -> MapSet.size(x) == 6 end},
{3, fn x, s -> MapSet.size(x) == 5 and MapSet.subset?(s.encoder[1], x) end},
{5, fn x, s -> MapSet.size(x) == 5 and MapSet.subset?(x, s.encoder[6]) end},
{2, fn _x, _s -> true end}
]
|> Enum.reduce({segments, input}, &f_and_s_reducer/2)
|> elem(0)
end
defp calculate(_segments, [], _multiplier) do
0
end
defp calculate(segments, [hd|tl], multiplier) do
segments.decoder[hd] * multiplier + calculate(segments, tl, multiplier*10)
end
@spec decode_from_input(String.t())::SEGMENTS.t()
def decode_from_input(inputline) do
{input, output} = to_mapsets(inputline)
segments = find_and_set(%Segments{}, input)
%{segments | output: calculate(segments, Enum.reverse(output), 1)}
end
end
|
lib/segments.ex
| 0.769773 | 0.630557 |
segments.ex
|
starcoder
|
defmodule Rig.Config do
@moduledoc """
Rig module configuration that provides `settings/0`.
There are two ways to use this module
### Specify a list of expected keys
```
defmodule Rig.MyExample do
use Rig.Config, [:some_key, :other_key]
end
```
`Rig.Config` expects a config entry similar to this:
```
config :rig, Rig.MyExample,
some_key: ...,
other_key: ...
```
If one of the specified keys is not found, an error is thrown _at compile time_.
Otherwise, `Rig.MyExample` gets a `config/0` function that returns the
configuration converted to a map.
If there are other keys present, they'll be added to that map as well.
### Specify `:custom_validation` instead
```
defmodule Rig.MyExample do
use Rig.Config, :custom_validation
defp validate_config!(config) do
...
end
end
```
If you use :custom_validation, you should deal with the raw keyword list
by implementing `validate_config!/1` in the module.
"""
defmodule SyntaxError do
defexception [:cause]
def message(%__MODULE__{cause: cause}) when is_list(cause),
do: "could not parse JSON: #{inspect(cause)}"
def message(%__MODULE__{cause: cause}) when byte_size(cause) > 0,
do: "could not parse JSON: #{cause}"
def message(%__MODULE__{cause: cause}),
do: "could not parse JSON: #{Exception.message(cause)}"
end
require Logger
alias Jason
alias Result
defmacro __using__(:custom_validation) do
__MODULE__.__everything_but_validation__()
end
defmacro __using__(required_keys) do
quote do
unquote(__MODULE__.__everything_but_validation__())
unquote(__MODULE__.__only_validation__(required_keys))
end
end
def __everything_but_validation__ do
quote do
use Confex, otp_app: :rig
@after_compile __MODULE__
def __after_compile__(env, _bytecode) do
# Make sure missing configuration values are caught early by evaluating the values here
env.module.config()
end
end
end
def __only_validation__(required_keys) do
quote do
defp validate_config!(nil), do: validate_config!([])
defp validate_config!(config) do
# Convert to map and make sure all required keys are present
config = Enum.into(config, %{})
required_keys = unquote(required_keys)
missing_keys = for k <- required_keys, not Map.has_key?(config, k), do: k
case missing_keys do
[] ->
config
_ ->
raise "Missing required settings for module #{inspect(__ENV__.module)}: #{
inspect(missing_keys)
}"
end
end
end
end
# ---
defp uppercase_http_method(apis) when is_list(apis) do
Enum.map(apis, fn api ->
if Map.has_key?(api, "version_data") do
%{"version_data" => version_data} = api
updated_version_data =
Enum.into(version_data, %{}, fn {key, value} = api ->
endpoints = Map.get(value, "endpoints")
if is_list(endpoints) do
updated_endpoints =
Enum.map(endpoints, fn endpoint ->
if Map.has_key?(endpoint, "method") do
Map.update!(endpoint, "method", &Plug.Router.Utils.normalize_method/1)
else
endpoint
end
end)
{key, Map.update!(value, "endpoints", fn _ -> updated_endpoints end)}
else
api
end
end)
Map.update!(api, "version_data", fn _ -> updated_version_data end)
else
api
end
end)
end
defp uppercase_http_method(parsed_json), do: parsed_json
# ---
# pub
# ---
@spec parse_json_env(String.t()) :: {:ok, any} | {:error, %SyntaxError{}}
def parse_json_env(path_or_encoded) do
decode_json_file(path_or_encoded)
|> Result.or_else(fn file_error ->
from_encoded(path_or_encoded)
|> Result.map_err(fn decode_error -> [file_error, decode_error] end)
end)
|> Result.map(&uppercase_http_method/1)
|> Result.map_err(&%SyntaxError{cause: &1})
end
# ---
@spec check_and_update_https_config(Keyword.t()) :: Keyword.t()
def check_and_update_https_config(config) do
certfile = resolve_path_or_abort("HTTPS_CERTFILE", config[:https][:certfile])
keyfile = resolve_path_or_abort("HTTPS_KEYFILE", config[:https][:keyfile])
password = config[:https][:password] |> String.to_charlist()
case set_https(config, certfile, keyfile, password) do
{:ok, {config, :https_enabled}} ->
Logger.debug(fn ->
certfile = "certfile=" <> (config |> get_in([:https, :certfile]) |> inspect())
keyfile = "keyfile=" <> (config |> get_in([:https, :keyfile]) |> inspect())
"SSL enabled: #{certfile} #{keyfile}"
end)
config
{:ok, {config, :https_disabled}} ->
Logger.warn(fn ->
"""
HTTPS is *disabled*. To enable it, set the HTTPS_CERTFILE and HTTPS_KEYFILE environment variables \
(see https://accenture.github.io/reactive-interaction-gateway/docs/rig-ops-guide.html for details). \
Note that we strongly recommend enabling HTTPS (unless you've employed TLS termination elsewhere). \
"""
end)
config
{:error, :only_password} ->
Logger.error("Please also set HTTPS_CERTFILE and HTTPS_KEYFILE to enable HTTPS.")
System.stop(1)
{:error, :only_keyfile} ->
Logger.error("Please also set HTTPS_CERTFILE to enable HTTPS.")
System.stop(1)
{:error, :only_certfile} ->
Logger.error("Please also set HTTPS_KEYFILE to enable HTTPS.")
System.stop(1)
end
end
# ----
# priv
# ----
defp set_https(config, certfile, keyfile, password)
defp set_https(config, :empty, :empty, ''), do: {:ok, {disable_https(config), :https_disabled}}
defp set_https(_, :empty, :empty, _), do: {:error, :only_password}
defp set_https(_, :empty, _, _), do: {:error, :only_keyfile}
defp set_https(_, _, :empty, _), do: {:error, :only_certfile}
defp set_https(config, certfile, keyfile, password),
do: {:ok, {enable_https(config, certfile, keyfile, password), :https_enabled}}
# ---
defp enable_https(config, certfile, keyfile, password),
do:
config
|> put_in([:https, :certfile], certfile)
|> put_in([:https, :keyfile], keyfile)
|> put_in([:https, :password], password)
# ---
defp disable_https(config), do: put_in(config, [:https], false)
# ---
@spec decode_json_file(String.t()) :: {:ok, any} | {:error, reason :: any}
defp decode_json_file(path) do
path
|> resolve_path()
|> case do
{:error, err} ->
{:error, err}
{:ok, path} ->
with {:ok, content} <- File.read(path),
{:ok, config} <- from_encoded(content) do
{:ok, config}
else
{:error, _reason} = err -> err
end
end
end
# ---
defp resolve_path_or_abort(var_name, value) do
case resolve_path(value) do
{:ok, path} ->
path
{:error, :empty} ->
:empty
{:error, {:not_found, path}} ->
Logger.error("Could not resolve #{var_name}: #{inspect(path)}")
# Under normal circumstances this stops the VM:
System.stop(1)
# When running in mix test, the code will simply continue, which leads to
# strange errors down the road :( Instead, we're gonna wait for the log message
# to print out and then forcefully stop the world.
:timer.sleep(1_000)
System.halt(1)
end
end
# ---
defp resolve_path(path)
defp resolve_path(nil), do: {:error, :empty}
defp resolve_path(""), do: {:error, :empty}
defp resolve_path(path) do
%{found?: false, path: path}
|> check_path_as_is()
|> check_relative_to_priv()
|> case do
%{found?: false} -> Result.err({:not_found, path})
%{path: path} -> Result.ok(path)
end
end
# ---
defp check_path_as_is(%{found?: false, path: path} = ctx) when byte_size(path) > 0,
do: if(File.exists?(path), do: %{ctx | found?: true}, else: ctx)
defp check_path_as_is(ctx), do: ctx
# ---
defp check_relative_to_priv(%{found?: false, path: path} = ctx) when byte_size(path) > 0 do
priv_dir()
|> Result.map(fn priv_dir ->
path = Path.join(priv_dir, path)
if File.exists?(path) do
%{found?: true, path: path}
else
ctx
end
end)
# If the app is not yet loaded this errors, so let's ignore that:
|> Result.unwrap_or(ctx)
end
defp check_relative_to_priv(ctx), do: ctx
# ---
defp priv_dir do
case :code.priv_dir(:rig) do
{:error, _} = err -> err
priv_dir -> {:ok, priv_dir}
end
end
# ---
@spec from_encoded(String.t()) :: {:ok, any} | {:error, Jason.DecodeError.t() | any}
defp from_encoded(encoded) when byte_size(encoded) > 0 do
Jason.decode(encoded)
end
defp from_encoded(_), do: {:error, :not_a_nonempty_string}
# ---
@spec parse_socket_list([String.t(), ...]) :: [{String.t(), pos_integer()}, ...]
def parse_socket_list(socket_list) do
socket_list
|> Enum.map(fn broker ->
[host, port] = for part <- String.split(broker, ":"), do: String.trim(part)
{host, String.to_integer(port)}
end)
end
end
|
lib/rig/config.ex
| 0.864925 | 0.798796 |
config.ex
|
starcoder
|
defmodule Mnemonex do
use Application
@process :mnx_coder
@moduledoc """
Mnemonex application
"""
@typedoc """
A keyword list with output formatting options
- `name`: registered process name (default: `:mnx_coder`)
- `as_list`: return a list of unformatted words (default: `false`)
- `words_per_group`: words per output group (default: `3`)
- `word_separator`: combining words in a group (default: `-`)
- `groups_per_line`: groups per output line (default: `2`)
- `group_separator`: combining groups in a line (default: `--`)
- `line_prefix`: prepended to each output line (default: empty string)
- `line_suffix`: appended to each output line (default: `\n`)
"""
@type coder_options :: [
name: atom,
as_list: boolean,
words_per_group: pos_integer,
word_separator: String.t(),
groups_per_line: pos_integer,
group_separator: String.t(),
line_prefix: String.t(),
line_suffix: String.t()
]
@spec parse_coder_options(coder_options) :: coder_options
@doc false
def parse_coder_options(options) do
[
name: Keyword.get(options, :name, @process),
as_list: Keyword.get(options, :as_list, false),
words_per_group: Keyword.get(options, :words_per_group, 3),
word_separator: Keyword.get(options, :word_separator, "-"),
groups_per_line: Keyword.get(options, :groups_per_line, 2),
group_separator: Keyword.get(options, :group_separator, "--"),
line_prefix: Keyword.get(options, :line_prefix, ""),
line_suffix: Keyword.get(options, :line_suffix, "\n")
]
end
@doc """
application start
"""
def start(_type, opts \\ []) do
import Supervisor.Spec, warn: false
filled_out = parse_coder_options(opts)
children = [
worker(Mnemonex.Coder, [filled_out, filled_out[:name]])
]
opts = [strategy: :one_for_one, name: Mnemonex.Supervisor]
Supervisor.start_link(children, opts)
end
@doc """
encode a binary
Unsigned big-endian integers may also be encoded, but note that there is presently
no affordance to decode them back to same.
The output format depends on configuration variables (described therein.)
"""
@spec encode(binary | pos_integer, term) :: binary
def encode(input, server \\ @process)
def encode(input, server) when is_binary(input), do: GenServer.call(server, {:encode, input})
def encode(input, server) when is_integer(input) and input > 0 do
GenServer.call(server, {:encode, :binary.encode_unsigned(input)})
end
@doc """
decode a mnemonicoded word list
All non-alphabetic (ASCII) characters are treated as word breaks. There is
presently no graceful handling of improperly entered words.
"""
@spec decode(binary, term) :: binary
def decode(input, server \\ @process) when is_binary(input),
do: GenServer.call(server, {:decode, input})
end
|
lib/mnemonex.ex
| 0.883179 | 0.471588 |
mnemonex.ex
|
starcoder
|
defmodule Data.Event do
@moduledoc """
In game events that NPCs will be listening for
Valid kinds of events:
- "room/entered": When a character enters a room
- "room/heard": When a character hears something in a room
- "combat/tick": What the character will do during combat
"""
import Data.Type
import Ecto.Changeset
alias Data.Effect
alias Data.Type
@type t :: map
@behaviour Ecto.Type
@impl Ecto.Type
def type, do: :map
@impl Ecto.Type
def cast(stats) when is_map(stats), do: {:ok, stats}
def cast(_), do: :error
@doc """
Load an event from a stored map
Cast it properly
"""
@impl Ecto.Type
def load(event) do
event = for {key, val} <- event, into: %{}, do: {String.to_atom(key), val}
event =
event
|> load_condition()
|> load_action()
|> load_actions()
|> ensure(:id, UUID.uuid4())
{:ok, event}
end
defp load_condition(event = %{condition: condition}) when condition != nil do
condition = for {key, val} <- event.condition, into: %{}, do: {String.to_atom(key), val}
%{event | condition: condition}
end
defp load_condition(event), do: event
defp load_action(event) do
case event do
%{action: action} when action != nil ->
%{event | action: _load_action(action)}
_ ->
event
end
end
defp load_actions(event) do
case event do
%{actions: actions} when actions != nil ->
actions = Enum.map(event.actions, &_load_action/1)
%{event | actions: actions}
_ ->
event
end
end
defp _load_action(action) do
action = for {key, val} <- action, into: %{}, do: {String.to_atom(key), val}
_load_action_type(action)
end
defp _load_action_type(action = %{type: "target/effects"}) do
effects =
action.effects
|> Enum.map(fn effect ->
case Effect.load(effect) do
{:ok, effect} -> effect
_ -> effect
end
end)
%{action | effects: effects}
end
defp _load_action_type(action = %{type: "emote"}) do
case action do
%{status: status} ->
status = for {key, val} <- status, into: %{}, do: {String.to_atom(key), val}
%{action | status: status}
_ ->
action
end
end
defp _load_action_type(action), do: action
@impl Ecto.Type
def dump(stats) when is_map(stats), do: {:ok, Map.delete(stats, :__struct__)}
def dump(_), do: :error
@doc """
Get a starting event, to fill out in the web interface. Just the structure,
the values won't mean anyhting.
"""
@spec starting_event(String.t()) :: t()
def starting_event("combat/tick") do
%{
type: "combat/tick",
action: %{type: "target/effects", effects: [], delay: 1.5, weight: 10, text: ""}
}
end
def starting_event("room/entered") do
%{type: "room/entered", action: %{type: "say", message: "Welcome!"}}
end
def starting_event("room/heard") do
%{
type: "room/heard",
condition: %{regex: "hello"},
action: %{type: "say", message: "Welcome!"}
}
end
def starting_event("tick") do
%{
type: "tick",
action: %{type: "move", max_distance: 3, chance: 25, wait: 10}
}
end
@doc """
Validate an event to get errors out of the validation
"""
def validate_event(event) do
event
|> validate()
|> validate_keys(
required: required_event_keys(event.type),
one_of: one_of_event_keys(event.type)
)
|> validate_action_for_type()
|> validate_event_action()
|> validate_event_condition()
end
# alphabetical
defp required_event_keys("room/heard") do
[:condition, :id, :type]
end
defp required_event_keys(_type) do
[:action, :id, :type]
end
defp one_of_event_keys("room/heard") do
[:action, :actions]
end
defp one_of_event_keys(_type) do
[]
end
defp validate_action_for_type(changeset) do
case valid_action_for_type?(changeset.data) do
true ->
changeset
false ->
Type.Changeset.add_error(changeset, :action, "invalid type for event")
end
end
defp validate_event_action(changeset = %{data: event}) do
case event do
%{type: type, actions: actions} ->
actions
|> Enum.map(&validate_action(type, &1))
|> merge_changesets(changeset)
%{type: type, action: action} ->
_validate_event_action(changeset, type, action)
_ ->
Type.Changeset.add_error(changeset, :action, "missing an action")
end
end
defp merge_changeset(action_changeset, field, changeset) do
Enum.reduce(action_changeset.errors, changeset, fn {key, val}, changeset ->
Type.Changeset.add_error(changeset, field, "#{key}: #{Enum.join(val, ", ")}")
end)
end
defp merge_changesets(changesets, changeset) do
changesets
|> Enum.with_index()
|> Enum.reduce(changeset, fn {action_changeset, i}, changeset ->
case action_changeset.valid? do
true ->
changeset
false ->
merge_changeset(action_changeset, :"action_#{i}", changeset)
end
end)
end
defp _validate_event_action(changeset, type, action) do
case validate_action(type, action) do
%{valid?: true} ->
changeset
action_changeset ->
merge_changeset(action_changeset, :action, changeset)
end
end
defp validate_event_condition(changeset = %{data: event}) do
case validate_condition(event) do
%{valid?: true} ->
changeset
condition_changeset ->
Enum.reduce(condition_changeset.errors, changeset, fn {key, val}, changeset ->
Type.Changeset.add_error(changeset, :condition, "#{key}: #{Enum.join(val, ", ")}")
end)
end
end
@doc """
Validate the action matches the type
"""
@spec valid_action_for_type?(t()) :: boolean()
def valid_action_for_type?(event = %{action: action}) do
event.type
|> valid_type_actions()
|> Enum.member?(action.type)
end
def valid_action_for_type?(event = %{actions: actions}) do
types = event.type |> valid_type_actions()
actions
|> Enum.all?(fn action ->
Enum.member?(types, action.type)
end)
end
def valid_action_for_type?(_), do: false
defp valid_type_actions(type) do
case type do
"combat/tick" ->
["target/effects"]
"room/entered" ->
["emote", "say", "say/random", "target"]
"room/heard" ->
["emote", "say"]
"tick" ->
["emote", "move", "say", "say/random"]
_ ->
[]
end
end
def valid_condition?(event) do
validate_condition(event).valid?
end
@doc """
Validate the arguments matches the action
"""
def validate_condition(event) do
case event.type do
"room/heard" ->
condition = Map.get(event, :condition, %{}) || %{}
condition
|> validate()
|> validate_keys(required: [:regex])
|> validate_values(&validate_condition_values/1)
_ ->
case !Map.has_key?(event, :condition) do
true ->
event
|> validate()
false ->
event
|> validate()
|> Map.put(:valid?, false)
end
end
end
defp validate_condition_values({key, value}) do
case key do
:regex ->
is_binary(value)
_ ->
false
end
end
@doc """
Validate the arguments matches the action
"""
def validate_action(event_type, action) do
case event_type do
"tick" ->
validate_tick_action(action)
_ ->
validate_action(action)
end
end
@doc """
Validate tick actions
"""
@spec validate_tick_action(map()) :: boolean()
def validate_tick_action(action = %{type: "say"}) do
action
|> validate()
|> validate_keys(required: [:chance, :message, :type, :wait])
|> validate_values(&validate_say_action_values/1)
end
def validate_tick_action(action = %{type: "say/random"}) do
action
|> validate()
|> validate_keys(required: [:chance, :messages, :type, :wait])
|> validate_values(&validate_say_random_action_values/1)
end
def validate_tick_action(action = %{type: "emote"}) do
action
|> validate()
|> validate_keys(required: [:message, :chance, :wait, :type], optional: [:status])
|> validate_values(&validate_emote_action_values/1)
end
def validate_tick_action(action = %{type: "move"}) do
validate_action(action)
end
def validate_tick_action(action) do
action
|> validate()
|> Map.put(:valid?, false)
end
@doc """
Validate all other event type actions
"""
@spec validate_action(map()) :: boolean()
def validate_action(action = %{type: "emote"}) do
action
|> validate()
|> validate_keys(required: [:message, :type], optional: [:delay, :status])
|> validate_values(&validate_emote_action_values/1)
end
def validate_action(action = %{type: "move"}) do
action
|> validate()
|> validate_keys(required: [:chance, :max_distance, :type, :wait])
|> validate_values(&validate_move_action_values/1)
end
def validate_action(action = %{type: "say"}) do
action
|> validate()
|> validate_keys(required: [:message, :type], optional: [:delay])
|> validate_values(&validate_say_action_values/1)
end
def validate_action(action = %{type: "say/random"}) do
action
|> validate()
|> validate_keys(required: [:messages, :type], optional: [:delay])
|> validate_values(&validate_say_random_action_values/1)
end
def validate_action(action = %{type: "target"}) do
action
|> validate()
|> validate_keys(required: [:type])
end
def validate_action(action = %{type: "target/effects"}) do
action
|> validate()
|> validate_keys(required: [:delay, :effects, :weight, :text, :type])
|> validate_values(&validate_target_effects_action_values/1)
end
def validate_action(action) do
action
|> validate()
|> Map.put(:valid?, false)
end
defp validate_emote_action_values({key, value}) do
case key do
:chance ->
is_integer(value)
:delay ->
is_float(value)
:message ->
is_binary(value)
:status ->
valid_status?(value)
:type ->
value == "emote"
:wait ->
is_integer(value)
_ ->
false
end
end
defp validate_move_action_values({key, value}) do
case key do
:max_distance ->
is_integer(value)
:chance ->
is_integer(value)
:type ->
value == "move"
:wait ->
is_integer(value)
_ ->
false
end
end
defp validate_say_action_values({key, value}) do
case key do
:message ->
is_binary(value)
:chance ->
is_integer(value)
:delay ->
is_float(value)
:type ->
value == "say"
:wait ->
is_integer(value)
_ ->
false
end
end
defp validate_say_random_action_values({key, value}) do
case key do
:messages ->
is_list(value) && length(value) > 0 && Enum.all?(value, &is_binary/1)
:delay ->
is_float(value)
:chance ->
is_integer(value)
:type ->
value == "say/random"
:wait ->
is_integer(value)
_ ->
false
end
end
defp validate_target_effects_action_values({key, value}) do
case key do
:delay ->
is_float(value)
:effects ->
is_list(value) && Enum.all?(value, &Effect.valid?/1)
:text ->
is_binary(value)
:type ->
value == "target/effects"
:weight ->
is_integer(value)
_ ->
false
end
end
@doc """
Validate status changing attributes
"""
@spec valid_status?(map()) :: boolean()
def valid_status?(action) do
case keys(action) do
[:reset] ->
action.reset
keys ->
:key in keys && Enum.all?(action, &validate_status_key_value/1)
end
end
defp validate_status_key_value({key, value}) do
case key do
:key ->
is_binary(value)
:line ->
is_binary(value)
:listen ->
is_binary(value)
_ ->
false
end
end
@doc """
Validate events of the NPC
"""
@spec validate_events(Ecto.Changeset.t()) :: Ecto.Changeset.t()
def validate_events(changeset) do
case get_change(changeset, :events) do
nil ->
changeset
events ->
_validate_events(changeset, events)
end
end
defp _validate_events(changeset, events) do
case events |> Enum.all?(&valid?/1) do
true ->
changeset
false ->
add_error(changeset, :events, "are invalid")
end
end
@doc """
Validate an event based on type
"""
@spec valid?(t()) :: boolean
def valid?(event) do
validate_event(event).valid?
end
end
|
lib/data/event.ex
| 0.848157 | 0.511656 |
event.ex
|
starcoder
|
require Utils
require Program
defmodule D9 do
@moduledoc """
--- Day 9: Sensor Boost ---
You've just said goodbye to the rebooted rover and left Mars when you receive a faint distress signal coming from the asteroid belt. It must be the Ceres monitoring station!
In order to lock on to the signal, you'll need to boost your sensors. The Elves send up the latest BOOST program - Basic Operation Of System Test.
While BOOST (your puzzle input) is capable of boosting your sensors, for tenuous safety reasons, it refuses to do so until the computer it runs on passes some checks to demonstrate it is a complete Intcode computer.
Your existing Intcode computer is missing one key feature: it needs support for parameters in relative mode.
Parameters in mode 2, relative mode, behave very similarly to parameters in position mode: the parameter is interpreted as a position. Like position mode, parameters in relative mode can be read from or written to.
The important difference is that relative mode parameters don't count from address 0. Instead, they count from a value called the relative base. The relative base starts at 0.
The address a relative mode parameter refers to is itself plus the current relative base. When the relative base is 0, relative mode parameters and position mode parameters with the same value refer to the same address.
The relative base is modified with the relative base offset instruction:
Opcode 9 adjusts the relative base by the value of its only parameter. The relative base increases (or decreases, if the value is negative) by the value of the parameter.
Your Intcode computer will also need a few other capabilities:
The computer's available memory should be much larger than the initial program. Memory beyond the initial program starts with the value 0 and can be read or written like any other memory. (It is invalid to try to access memory at a negative address, though.)
The computer should have support for large numbers. Some instructions near the beginning of the BOOST program will verify this capability.
The BOOST program will ask for a single input; run it in test mode by providing it the value 1. It will perform a series of checks on each opcode, output any opcodes (and the associated parameter modes) that seem to be functioning incorrectly, and finally output a BOOST keycode.
Once your Intcode computer is fully functional, the BOOST program should report no malfunctioning opcodes when run in test mode; it should only output a single value, the BOOST keycode. What BOOST keycode does it produce?
--- Part Two ---
You now have a complete Intcode computer.
Finally, you can lock on to the Ceres distress signal! You just need to boost your sensors using the BOOST program.
The program runs in sensor boost mode by providing the input instruction the value 2. Once run, it will boost the sensors automatically, but it might take a few seconds to complete the operation on slower hardware. In sensor boost mode, the program will output a single value: the coordinates of the distress signal.
Run the BOOST program in sensor boost mode. What are the coordinates of the distress signal?
"""
@behaviour Day
def solve(input) do
input = input |> Utils.to_ints()
%Program{output: [part_1]} = Program.run(Program.new(input, 1))
part_2_entry =
(input
|> Enum.with_index()
|> Enum.filter(fn {v, _i} -> v == 99 end)
|> Enum.map(fn {_v, i} -> i end)
|> Enum.at(-2)) + 1
part_2_static =
input
|> Enum.chunk_every(6, 1)
|> Enum.find(fn
[_, _, 1, 204, 1, 99] -> true
_ -> false
end)
|> Enum.take(2)
|> Enum.max()
part_2_hack =
"21101,0,2,1,21101,0,3,2,21101,0,5,3,21101,0,1,26,109,1,22201,0,2,3,1206,4,920,21201,3,#{
part_2_static
},4,204,4,99"
|> Utils.to_ints()
part_2_hacked =
Program.new(input, 2)
|> Program.hack(part_2_entry, part_2_hack)
%Program{output: [part_2]} = Program.run(part_2_hacked)
{
part_1,
part_2
}
end
end
|
lib/days/09.ex
| 0.62395 | 0.761937 |
09.ex
|
starcoder
|
defmodule Xgit.Repository.Storage do
@moduledoc ~S"""
Represents the persistent storage for a git repository.
Unless you are implementing an alternative storage architecture or implementing
plumbing-level commands, this module is probably not of interest to you.
## Design Goals
Xgit intends to allow repositories to be stored in multiple different mechanisms.
While it includes built-in support for local on-disk repositories
(see `Xgit.Repository.OnDisk`), and in-member repositories (see `Xgit.Repository.InMemory`),
you could envision repositories stored entirely on a remote file system or database.
## Implementing a Storage Architecture
To define a new mechanism for storing a git repo, create a new module that `use`s
this module and implements the required callbacks. Consider the information stored
in a typical `.git` directory in a local repository. You will be building an
alternative to that storage mechanism.
"""
use GenServer
import Xgit.Util.ForceCoverage
alias Xgit.ConfigEntry
alias Xgit.Object
alias Xgit.ObjectId
alias Xgit.Ref
alias Xgit.Repository.InvalidRepositoryError
alias Xgit.Repository.WorkingTree
require Logger
@typedoc ~S"""
The process ID for an `Xgit.Repository.Storage` process.
"""
@type t :: pid | {:xgit_repo, pid}
@doc """
Starts an `Xgit.Repository.Storage` process linked to the current process.
_IMPORTANT:_ You should not invoke this function directly unless you are
implementing a new storage implementation module that implements this behaviour.
## Parameters
`module` is the name of a module that implements the callbacks defined in this module.
`init_arg` is passed to the `init/1` function of `module`.
`options` are passed to `GenServer.start_link/3`.
## Return Value
See `GenServer.start_link/3`.
"""
@spec start_link(module :: module, init_arg :: term, GenServer.options()) ::
GenServer.on_start()
def start_link(module, init_arg, options) when is_atom(module) and is_list(options),
do: GenServer.start_link(__MODULE__, {module, init_arg}, options)
@impl true
def init({mod, mod_init_arg}) do
case mod.init(mod_init_arg) do
{:ok, mod_state} -> cover {:ok, %{mod: mod, mod_state: mod_state, working_tree: nil}}
{:stop, reason} -> cover {:stop, reason}
end
end
@doc ~S"""
Returns `true` if the argument is a PID representing a valid `Xgit.Repository.Storage` process.
"""
@spec valid?(repository :: term) :: boolean
def valid?(repository) when is_pid(repository) do
Process.alive?(repository) &&
GenServer.call(repository, :valid_repository?) == :valid_repository
end
def valid?({:xgit_repo, repository}) when is_pid(repository), do: cover(true)
def valid?(_), do: cover(false)
@doc ~S"""
Raises `Xgit.Repository.InvalidRepositoryError` if the value provided is anything
other than the process ID for a valid `Xgit.Repository.Storage` process.
"""
@spec assert_valid(repository :: t) :: t | no_return
def assert_valid({:xgit_repo, repository} = checked_repo) when is_pid(repository),
do: cover(checked_repo)
def assert_valid(repository) do
if is_pid(repository) && valid?(repository) do
cover {:xgit_repo, repository}
else
raise InvalidRepositoryError
end
end
## --- Working Tree ---
@doc ~S"""
Get the default working tree if one has been attached.
Other working trees may also be attached to this repository, but do not have
special status with regard to the repository.
"""
@spec default_working_tree(repository :: t) :: WorkingTree.t() | nil
def default_working_tree(repository) when is_pid(repository) do
repository
|> assert_valid()
|> default_working_tree()
end
def default_working_tree({:xgit_repo, repository}) when is_pid(repository) do
GenServer.call(repository, :default_working_tree)
end
@doc ~S"""
Attach a working tree to this repository as the default working tree.
Future plumbing and API commands that target this repository will use this
working tree unless otherwise dictated.
## Return Value
`:ok` if the working tree was successfully attached.
`:error` if a working tree was already attached or the proposed working tree
was not valid.
"""
@spec set_default_working_tree(repository :: t, working_tree :: WorkingTree.t()) :: :ok | :error
def set_default_working_tree({:xgit_repo, repository}, working_tree)
when is_pid(repository) and is_pid(working_tree) do
GenServer.call(repository, {:set_default_working_tree, working_tree})
end
def set_default_working_tree(repository, working_tree)
when is_pid(repository) and is_pid(working_tree) do
repository
|> assert_valid()
|> set_default_working_tree(working_tree)
end
## --- Objects ---
@doc ~S"""
Returns `true` if all objects in the list are present in the object dictionary.
This limit is not enforced, but it's recommended to query for no more than ~100 object
IDs at a time.
"""
@spec has_all_object_ids?(repository :: t, object_ids :: [ObjectId.t()]) :: boolean
def has_all_object_ids?({:xgit_repo, repository}, object_ids)
when is_pid(repository) and is_list(object_ids) do
GenServer.call(repository, {:has_all_object_ids?, object_ids})
end
def has_all_object_ids?(repository, object_ids)
when is_pid(repository) and is_list(object_ids) do
repository
|> assert_valid()
|> has_all_object_ids?(object_ids)
end
@doc ~S"""
Checks for presence of multiple object Ids.
Called when `has_all_object_ids?/2` is called.
## Return Value
Should return `{:ok, has_all_object_ids?, state}` where `has_all_object_ids?` is `true`
if all object IDs can be found in the object dictionary; `false` otherwise.
"""
@callback handle_has_all_object_ids?(state :: any, object_ids :: [ObjectId.t()]) ::
{:ok, has_all_object_ids? :: boolean, state :: any}
@typedoc ~S"""
Error codes that can be returned by `get_object/2`.
"""
@type get_object_reason :: :not_found | :invalid_object
@doc ~S"""
Retrieves an object from the repository.
## Return Value
`{:ok, object}` if the object exists in the database.
`{:error, :not_found}` if the object does not exist in the database.
`{:error, :invalid_object}` if object was found, but invalid.
"""
@spec get_object(repository :: t, object_id :: ObjectId.t()) ::
{:ok, object :: Object.t()} | {:error, reason :: get_object_reason}
def get_object({:xgit_repo, repository}, object_id)
when is_pid(repository) and is_binary(object_id) do
GenServer.call(repository, {:get_object, object_id})
end
def get_object(repository, object_id) when is_pid(repository) and is_binary(object_id) do
repository
|> assert_valid()
|> get_object(object_id)
end
@doc ~S"""
Retrieves an object from the repository.
Called when `get_object/2` is called.
## Return Value
Should return `{:ok, object, state}` if read successfully.
Should return `{:error, :not_found, state}` if unable to find the object.
Should return `{:error, :invalid_object, state}` if object was found, but invalid.
"""
@callback handle_get_object(state :: any, object_id :: ObjectId.t()) ::
{:ok, object :: Object.t(), state :: any}
| {:error, reason :: get_object_reason, state :: any}
@typedoc ~S"""
Error codes that can be returned by `put_loose_object/2`.
"""
@type put_loose_object_reason :: :cant_create_file | :object_exists
@doc ~S"""
Writes a loose object to the repository.
## Return Value
`:ok` if written successfully.
`{:error, :cant_create_file}` if unable to create the storage for the loose object.
`{:error, :object_exists}` if the object already exists in the database.
"""
@spec put_loose_object(repository :: t, object :: Object.t()) ::
:ok | {:error, reason :: put_loose_object_reason}
def put_loose_object({:xgit_repo, repository}, %Object{} = object) when is_pid(repository) do
GenServer.call(repository, {:put_loose_object, object})
end
def put_loose_object(repository, %Object{} = object) when is_pid(repository) do
repository
|> assert_valid()
|> put_loose_object(object)
end
@doc ~S"""
Writes a loose object to the repository.
Called when `put_loose_object/2` is called.
## Return Value
Should return `{:ok, state}` if written successfully.
Should return `{:error, :cant_create_file}` if unable to create the storage for
the loose object.
Should return `{:error, :object_exists}` if the object already exists in the database.
"""
@callback handle_put_loose_object(state :: any, object :: Object.t()) ::
{:ok, state :: any} | {:error, reason :: put_loose_object_reason, state :: any}
## --- References ---
@typedoc ~S"""
Error codes that can be returned by `list_refs/1`.
"""
@type list_refs_reason :: File.posix()
@doc ~S"""
Lists all references in the repository.
## Return Value
`{:ok, refs}` if successful. `refs` will be a list of `Xgit.Ref` structs.
The sequence of the list is unspecified.
`{:error, reason}` if unable. See `list_refs_reason`.
"""
@spec list_refs(repository :: t) ::
{:ok, refs :: [Ref.t()]} | {:error, reason :: list_refs_reason}
def list_refs({:xgit_repo, repository}) when is_pid(repository) do
GenServer.call(repository, :list_refs)
end
def list_refs(repository) when is_pid(repository) do
repository
|> assert_valid()
|> list_refs()
end
@doc ~S"""
Lists all references in the repository.
Called when `list_refs/1` is called.
## Return Value
Should return `{:ok, refs, state}` if read successfully. `refs` should be a list
of `Xgit.Ref` structs.
Should return `{:error, reason}` if unable. Currently only `File.posix` reasons
are expected.
"""
@callback handle_list_refs(state :: any) ::
{:ok, refs :: [Ref], state :: any}
| {:error, reason :: list_refs_reason, state :: any}
@typedoc ~S"""
Error codes that can be returned by `put_ref/3`.
"""
@type put_ref_reason ::
:invalid_ref
| :cant_create_file
| :target_not_found
| :old_target_not_matched
@doc ~S"""
Writes or updates a reference in the repository.
If any existing reference exists with this name, it will be replaced.
## Options
`follow_link?`: (default: `true`) `true` to follow symbolic refs
`old_target`: If present, a ref with this name must already exist and the `target`
value must match the object ID provided in this option. (There is a special value `:new`
which instead requires that the named ref must **not** exist.)
## TO DO
Support for ref log. https://github.com/elixir-git/xgit/issues/224
Support for `--no-deref` option. https://github.com/elixir-git/xgit/issues/226
## Return Value
`:ok` if written successfully.
`{:error, :invalid_ref}` if the `Xgit.Ref` structure is invalid.
`{:error, :cant_create_file}` if unable to create the storage for the reference.
`{:error, :target_not_found}` if the target object does not exist in the repository.
`{:error, :old_target_not_matched}` if `old_target` was specified and the target ref points
to a different object ID.
"""
@spec put_ref(repository :: t, ref :: Ref.t(), follow_link?: boolean, old_target: ObjectId.t()) ::
:ok | {:error, reason :: put_ref_reason}
def put_ref(repository, ref, opts \\ [])
def put_ref({:xgit_repo, repository}, %Ref{} = ref, opts)
when is_pid(repository) and is_list(opts) do
if Ref.valid?(ref) do
GenServer.call(repository, {:put_ref, ref, opts})
else
cover {:error, :invalid_ref}
end
end
def put_ref(repository, ref, opts) when is_pid(repository) and is_list(opts) do
repository
|> assert_valid()
|> put_ref(ref, opts)
end
@doc ~S"""
Writes or updates a reference in the repository.
Called when `put_ref/3` is called.
The implementation must validate that the referenced object exists and is of
type `commit`. It does not need to validate that the reference is otherwise
valid.
## Options
`follow_link?`: (default: `true`) `true` to follow symbolic refs
`old_target`: If present, a ref with this name must already exist and the `target`
value must match the object ID provided in this option. (There is a special value `:new`
which instead requires that the named ref must **not** exist.)
## Return Value
Should return `{:ok, state}` if written successfully.
Should return `{:error, :cant_create_file}` if unable to create the storage for
the ref.
Should return `{:error, :target_not_found}` if the target object does not
exist in the repository.
Should return `{:error, :old_target_not_matched}` if `old_target` was specified and the
target ref points to a different object ID.
"""
@callback handle_put_ref(state :: any, ref :: Ref.t(),
follow_link?: boolean,
old_target: ObjectId.t()
) ::
{:ok, state :: any} | {:error, reason :: put_ref_reason, state :: any}
@typedoc ~S"""
Error codes that can be returned by `delete_ref/3`.
"""
@type delete_ref_reason :: :invalid_ref | :cant_delete_file | :old_target_not_matched
@doc ~S"""
Deletes a reference from the repository.
## Options
`follow_link?`: (default: `true`) `true` to follow symbolic refs
`old_target`: If present, a ref with this name must already exist and the `target`
value must match the object ID provided in this option.
## TO DO
Support for ref log. https://github.com/elixir-git/xgit/issues/224
Support for `--no-deref` option. https://github.com/elixir-git/xgit/issues/226
## Return Value
`:ok` if deleted successfully or the reference did not exist.
`{:error, :invalid_ref}` if `name` is not a valid ref name.
`{:error, :cant_delete_file}` if unable to delete the storage for the reference.
`{:error, :old_target_not_matched}` if `old_target` was specified and the target ref points
to a different object ID or did not exist.
"""
@spec delete_ref(repository :: t, name :: Ref.name(),
follow_link?: boolean,
old_target: ObjectId.t()
) ::
:ok | {:error, reason :: delete_ref_reason}
def delete_ref(repository, name, opts \\ [])
def delete_ref({:xgit_repo, repository}, name, opts)
when is_pid(repository) and is_binary(name) and is_list(opts) do
if Ref.valid_name?(name) do
GenServer.call(repository, {:delete_ref, name, opts})
else
cover {:error, :invalid_ref}
end
end
def delete_ref(repository, name, opts)
when is_pid(repository) and is_binary(name) and is_list(opts) do
repository
|> assert_valid()
|> delete_ref(name, opts)
end
@doc ~S"""
Deletes a reference in the repository.
Called when `delete_ref/3` is called.
## Options
`follow_link?`: `true` to follow symbolic refs
`old_target`: If present, a ref with this name must already exist and the `target`
value must match the object ID provided in this option.
## Return Value
Should return `{:ok, state}` if deleted successfully or the ref did not exist.
Should return `{:error, :cant_delete_file}` if unable to delete the storage for
the ref.
Should return `{:error, :old_target_not_matched}` if `old_target` was specified and the
target ref points to a different object ID or the ref did not exist.
"""
@callback handle_delete_ref(state :: any, name :: Ref.name(),
follow_link?: boolean,
old_target: ObjectId.t()
) ::
{:ok, state :: any} | {:error, reason :: delete_ref_reason, state :: any}
@typedoc ~S"""
Error codes that can be returned by `get_ref/2`.
"""
@type get_ref_reason :: File.posix() | :invalid_name | :not_found
@doc ~S"""
Reads a reference from the repository.
If any existing reference exists with this name, it will be returned.
## Parameters
`name` is the name of the reference to be found. It must be a valid name
as per `Xgit.Ref.valid_name?/1`.
## Options
`follow_link?`: (default: `true`) `true` to follow symbolic refs
## TO DO
Dereference tags? https://github.com/elixir-git/xgit/issues/228
## Return Value
`{:ok, ref}` if the reference was found successfully. `ref` will be an
`Xgit.Ref` struct.
`{:error, :invalid_name}` if `name` is not a valid ref name.
`{:error, :not_found}` if no such reference exists.
"""
@spec get_ref(repository :: t, name :: String.t(), follow_link?: boolean) ::
{:ok, ref :: Ref.t()} | {:error, reason :: get_ref_reason}
def get_ref(repository, name, opts \\ [])
def get_ref({:xgit_repo, repository}, name, opts)
when is_pid(repository) and is_binary(name) and is_list(opts) do
if valid_ref_name?(name) do
GenServer.call(repository, {:get_ref, name, opts})
else
cover {:error, :invalid_name}
end
end
def get_ref(repository, name, opts)
when is_pid(repository) and is_binary(name) and is_list(opts) do
repository
|> assert_valid()
|> get_ref(name, opts)
end
defp valid_ref_name?("HEAD"), do: cover(true)
defp valid_ref_name?(name), do: Ref.valid_name?(name)
@doc ~S"""
Reads a reference from the repository.
Called when `get_ref/3` is called.
## Options
`follow_link?`: (default: `true`) `true` to follow symbolic refs
## Return Value
Should return `{:ok, ref, state}` if the reference was found successfully.
`ref` must be an `Xgit.Ref` struct.
Should return `{:error, :not_found, state}` if no such reference exists.
"""
@callback handle_get_ref(state :: any, name :: String.t(), follow_link?: boolean) ::
{:ok, ref :: Xgit.Ref.t(), state :: any}
| {:error, reason :: get_ref_reason, state :: any}
# TO DO: Add a `pack_refs` function. https://github.com/elixir-git/xgit/issues/223
## --- Config ---
@doc ~S"""
Return any configuration entries that match the requested search.
The entries are not necessarily sorted; the order in which they are returned is up to
the underlying storage mechanism.
## Options
* `section:` (`String`) if provided, only returns entries in the named section
* `subsection:` (`String`) if provided, only returns entries in the named subsection
* `name:` (`String`) if provided, only returns entries with the given variable name
If no options are provided, returns all entries.
## Return Values
A list of `Xgit.ConfigEntry` structs that match the search parameters.
"""
@spec get_config_entries(repository :: t,
section: String.t(),
subsection: String.t(),
name: String.t()
) :: [Xgit.ConfigEntry.t()]
def get_config_entries(repository, opts \\ []) when is_pid(repository) and is_list(opts) do
{:ok, entries} = GenServer.call(repository, {:get_config_entries, opts})
entries
end
@doc ~S"""
Return any configuration entries that match the requested search.
Called when `get_config_entries/2` is called.
The entries need not be sorted.
## Options
* `section:` (`String`) if provided, only returns entries in the named section
* `subsection:` (`String`) if provided, only returns entries in the named subsection
* `name:` (`String`) if provided, only returns entries with the given variable name
If no options are provided, returns all entries.
## Return Value
Should return `{:ok, entries, state}` where `entries` is a list of `Xgit.ConfigEntry`
structs that match the search parameters.
"""
@callback handle_get_config_entries(state :: any,
section: String.t(),
subsection: String.t(),
name: String.t()
) ::
{:ok, entries :: [Xgit.ConfigEntry.t()], state :: any}
@typedoc ~S"""
Error codes that can be returned by `add_config_entry/3`.
"""
@type add_config_entry_reason :: File.posix()
@doc ~S"""
Add an entry to an existing config.
## Parameters
`entry` (`Xgit.ConfigEntry`) entry to be added
## Options
`add?`: if `true`, adds this entry to any that may already exist
`replace_all?`: if `true`, removes all existing entries that match any keys provided
before adding the existing one
## Return Values
`:ok` if successful.
`{:error, TBD}` if unable.
"""
@spec add_config_entry(repository :: t, entry :: Xgit.ConfigEntry.t(),
add?: boolean,
replace_all?: boolean
) ::
:ok | {:error, reason :: add_config_entry_reason}
def add_config_entry(repository, %ConfigEntry{} = entry, opts \\ [])
when is_pid(repository) and is_list(opts) do
if ConfigEntry.valid?(entry) do
GenServer.call(repository, {:add_config_entry, entry, opts})
else
raise ArgumentError,
"Xgit.Repository.Storage.add_config_entry/3: entry is invalid"
end
end
@doc ~S"""
Add a new entry to an existing config.
Called when `add_config_entry/3` is called.
## Parameters
`entry` (`Xgit.ConfigEntry`) entry to be added
## Options
`add?`: if `true`, adds this entry to any that may already exist
`replace_all?`: if `true`, removes all existing entries that match any keys provided
before adding this one
## Return Value
Should return `{:ok, state}` if successful.
Should return `{:error, reason, state}` if unable to complete the update.
"""
@callback handle_add_config_entry(
state :: any,
entry :: Xgit.ConfigEntry.t(),
add?: boolean,
replace_all?: boolean
) ::
{:ok, state :: any}
| {:error, reason :: add_config_entry_reason, state :: any}
@doc ~S"""
Remove any configuration entries that match the requested search.
## Options
* `section:` (`String`) if provided, only removes entries in the named section
* `subsection:` (`String`) if provided, only removes entries in the named subsection
* `name:` (`String`) if provided, only removes entries with the given variable name
**WARNING:** If no options are provided, removes all entries.
## Return Values
`:ok` regardless of whether any matching items were found to remove.
"""
@spec remove_config_entries(repository :: t,
section: String.t(),
subsection: String.t(),
name: String.t()
) :: :ok
def remove_config_entries(repository, opts \\ []) when is_pid(repository) and is_list(opts) do
GenServer.call(repository, {:remove_config_entries, opts})
end
@doc ~S"""
Remove any configuration entries that match the requested search.
Called when `remove_config_entries/2` is called.
## Options
* `section:` (`String`) if provided, only removes entries in the named section
* `subsection:` (`String`) if provided, only removes entries in the named subsection
* `name:` (`String`) if provided, only removes entries with the given variable name
If no options are provided, removes all entries.
## Return Value
Should return `{:ok, state}` if successful. (This _could_ mean no matching items were
found to remove.)
"""
@callback handle_remove_config_entries(state :: any,
section: String.t(),
subsection: String.t(),
name: String.t()
) :: {:ok, state :: any}
## --- Callbacks ---
@impl true
def handle_call(:valid_repository?, _from, state), do: {:reply, :valid_repository, state}
def handle_call(:default_working_tree, _from, %{working_tree: working_tree} = state),
do: {:reply, working_tree, state}
def handle_call({:set_default_working_tree, working_tree}, _from, %{working_tree: nil} = state) do
if WorkingTree.valid?(working_tree) do
{:reply, :ok, %{state | working_tree: working_tree}}
else
{:reply, :error, state}
end
end
def handle_call({:set_default_working_tree, _working_tree}, _from, state),
do: {:reply, :error, state}
def handle_call({:has_all_object_ids?, object_ids}, _from, state),
do: delegate_boolean_call_to(state, :handle_has_all_object_ids?, [object_ids])
def handle_call({:get_object, object_id}, _from, state),
do: delegate_call_to(state, :handle_get_object, [object_id])
def handle_call({:put_loose_object, %Object{} = object}, _from, state),
do: delegate_call_to(state, :handle_put_loose_object, [object])
def handle_call(:list_refs, _from, state),
do: delegate_call_to(state, :handle_list_refs, [])
def handle_call({:put_ref, %Ref{} = ref, opts}, _from, state),
do: delegate_call_to(state, :handle_put_ref, [ref, opts])
def handle_call({:delete_ref, name, opts}, _from, state),
do: delegate_call_to(state, :handle_delete_ref, [name, opts])
def handle_call({:get_ref, name, opts}, _from, state),
do: delegate_call_to(state, :handle_get_ref, [name, opts])
def handle_call({:get_config_entries, opts}, _from, state),
do: delegate_call_to(state, :handle_get_config_entries, [opts])
def handle_call({:add_config_entry, entry, opts}, _from, state),
do: delegate_call_to(state, :handle_add_config_entry, [entry, opts])
def handle_call({:remove_config_entries, opts}, _from, state),
do: delegate_call_to(state, :handle_remove_config_entries, [opts])
def handle_call(message, _from, state) do
Logger.warn("Repository received unrecognized call #{inspect(message)}")
{:reply, {:error, :unknown_message}, state}
end
defp delegate_call_to(%{mod: mod, mod_state: mod_state} = state, function, args) do
case apply(mod, function, [mod_state | args]) do
{:ok, mod_state} -> {:reply, :ok, %{state | mod_state: mod_state}}
{:ok, response, mod_state} -> {:reply, {:ok, response}, %{state | mod_state: mod_state}}
{:error, reason, mod_state} -> {:reply, {:error, reason}, %{state | mod_state: mod_state}}
end
end
defp delegate_boolean_call_to(state, function, args) do
{:reply, {:ok, response}, state} = delegate_call_to(state, function, args)
cover {:reply, response, state}
end
defmacro __using__(opts) do
quote location: :keep, bind_quoted: [opts: opts] do
use GenServer, opts
alias Xgit.Repository.Storage
@behaviour Storage
end
end
end
|
lib/xgit/repository/storage.ex
| 0.898707 | 0.521654 |
storage.ex
|
starcoder
|
defmodule TinyColor.RGB do
@moduledoc """
Represents a color in the for of red, green, blue, and optional alpha
"""
import TinyColor.Normalize
defstruct red: 0.0, green: 0.0, blue: 0.0, alpha: 1.0
@doc ~S"""
Returns a string representation of this color. hex is only supported if alpha == 1.0
## Examples
iex> TinyColor.RGB.to_string(%TinyColor.RGB{red: 128.0, green: 129.0, blue: 190.0, alpha: 1.0})
"#8081BE"
iex> TinyColor.RGB.to_string(%TinyColor.RGB{red: 128.0, green: 129.0, blue: 190.0})
"#8081BE"
iex> TinyColor.RGB.to_string(%TinyColor.RGB{red: 128.0, green: 129.0, blue: 190.0, alpha: 0.5})
"rgba(128, 129, 190, 0.5)"
iex> TinyColor.RGB.to_string(%TinyColor.RGB{red: 128.0, green: 129.0, blue: 190.0}, :hex)
"#8081BE"
iex> TinyColor.RGB.to_string(%TinyColor.RGB{red: 128.0, green: 129.0, blue: 190.0, alpha: 1.0}, :hex)
"#8081BE"
iex> TinyColor.RGB.to_string(%TinyColor.RGB{red: 128.0, green: 129.0, blue: 190.0, alpha: 0.5}, :hex)
"#8081BE80"
iex> TinyColor.RGB.to_string(%TinyColor.RGB{red: 128.0, green: 129.0, blue: 190.0}, :rgb)
"rgb(128, 129, 190)"
iex> TinyColor.RGB.to_string(%TinyColor.RGB{red: 128.0, green: 129.0, blue: 190.0, alpha: 1.0}, :rgb)
"rgb(128, 129, 190)"
iex> TinyColor.RGB.to_string(%TinyColor.RGB{red: 128.0, green: 129.0, blue: 190.0}, :rgba)
"rgba(128, 129, 190, 1.0)"
iex> TinyColor.RGB.to_string(%TinyColor.RGB{red: 128.0, green: 129.0, blue: 190.0, alpha: 1.0}, :rgba)
"rgba(128, 129, 190, 1.0)"
iex> TinyColor.RGB.to_string(%TinyColor.RGB{red: 128.0, green: 129.0, blue: 190.0, alpha: 0.5}, :rgba)
"rgba(128, 129, 190, 0.5)"
"""
def to_string(struct, type \\ nil)
def to_string(%__MODULE__{} = struct, nil) do
type =
case struct.alpha do
1.0 -> :hex
_ -> :rgba
end
to_string(struct, type)
end
def to_string(%__MODULE__{red: r, green: g, blue: b, alpha: alpha}, :rgba) do
"rgba(#{round(r)}, #{round(g)}, #{round(b)}, #{Float.round(alpha, 4)})"
end
def to_string(%__MODULE__{red: r, green: g, blue: b, alpha: 1.0}, :rgb) do
"rgb(#{round(r)}, #{round(g)}, #{round(b)})"
end
def to_string(%__MODULE__{red: r, green: g, blue: b, alpha: 1.0}, _) do
"#" <> to_hex(r) <> to_hex(g) <> to_hex(b)
end
def to_string(%__MODULE__{red: r, green: g, blue: b, alpha: alpha}, :hex) do
"#" <> to_hex(r) <> to_hex(g) <> to_hex(b) <> to_hex(alpha * 255)
end
defp to_hex(value) when is_float(value), do: to_hex(round(value))
defp to_hex(value) when value < 16 and value >= 0, do: "0" <> Integer.to_string(value, 16)
defp to_hex(value) when is_integer(value), do: Integer.to_string(value, 16)
def new(red, green, blue, alpha \\ 1.0) do
%__MODULE__{
red: cast(red, :red),
green: cast(green, :green),
blue: cast(blue, :blue),
alpha: cast(alpha, :alpha)
}
end
def percentages(%TinyColor.RGB{red: r, green: g, blue: b, alpha: a}) do
{
r / 255,
g / 255,
b / 255,
a
}
end
defimpl String.Chars do
def to_string(struct) do
TinyColor.RGB.to_string(struct)
end
end
defimpl Jason.Encoder do
def encode(value, opts) do
Jason.Encode.string(TinyColor.RGB.to_string(value), opts)
end
end
defimpl Phoenix.HTML.Safe do
def to_iodata(value), do: to_string(value)
end
end
|
lib/tiny_color/spaces/rgb.ex
| 0.881155 | 0.640313 |
rgb.ex
|
starcoder
|
defmodule TicTacToe.Game do
@moduledoc """
The state of a tic tac toe game.
Create a new state with `new_game`. Once a game has been created you
can `query` a position on the board, place new marks on the board or query
whose turn it is. If the game has finished `winner` should return the winning
player, otherwise it returns `:nobody`.
## Example
```
# after creating a new game position 1, 2 is empty
iex> new_game() |> query(1, 2)
:empty
# there isn't a winner either
iex> new_game() |> winner()
:nobody
# place an X on the top left corner
iex> new_game() |> update(0, 0) |> query(0, 0)
:player_x
# after a move it's the opposite players turn
iex> new_game() |> update(0, 0) |> whos_next
:player_o
```
"""
defmodule State do
@moduledoc false
# construct the initial board
keys = for x <- 0..2, y <- 0..2, do: {x, y}
board = keys |> Enum.reduce(%{}, fn(k, h) -> Map.put(h, k, :empty) end)
defstruct next_player: :player_x, counter: 0, board: board
end
@doc "Creates a new game with an empty board and `:player_x` to make a move."
def new_game do
%State{}
end
@doc "Returns the board content on the specified location."
def query(state, x, y) do
state.board[{x, y}]
end
@doc "Returns the next player to make a move."
def whos_next(state) do
state.next_player
end
@doc "Returns the winner if there is one. Otherwise returns `:nobody`."
def winner(state) do
x_won = 0..2 |> Enum.map(fn _ -> :player_x end)
y_won = 0..2 |> Enum.map(fn _ -> :player_o end)
cond do
state |> lines |> Enum.any?(&(&1 == x_won)) -> :player_x
state |> lines |> Enum.any?(&(&1 == y_won)) -> :player_o
true -> :nobody
end
end
@doc """
Makes a move for the player who is next.
Raises an error if the move is not possible because the position is occupied
or the game has ended.
"""
def update(state, {x, y}), do: update(state, x, y)
def update(state, x, y) do
cond do
state |> winner != :nobody -> raise "game has already ended"
state |> query(x, y) != :empty ->
who = state |> query(x, y)
raise "position #{x}, #{y} is already occupied by #{who}"
true -> state |> make_update!(x, y)
end
end
defp make_update!(state, x, y) do
state
|> Map.put(:counter, state.counter + 1)
|> Map.put(:next_player, opponent(state.next_player))
|> Map.put(:board, %{state.board | {x, y} => state.next_player } )
end
@doc """
Is the game finished
The game is finished either becasue one of the players has won or the board
is full.
"""
def finished?(state) do
state.counter == 9 || winner(state) != :nobody
end
@doc """
The possible moves on a given board
"""
def moves(state) do
if finished?(state) do
[]
else
coords = for x <- 0..2, y <- 0..2, do: {x, y}
coords |> Enum.filter(fn {x, y} -> state |> query(x, y) == :empty end)
end
end
defp opponent(:player_x), do: :player_o
defp opponent(:player_o), do: :player_x
defp rows(state) do
for y <- 0..2 do
for x <- 0..2 do
state |> query(x, y)
end
end
end
defp columns(state) do
for x <- 0..2 do
for y <- 0..2 do
state |> query(x, y)
end
end
end
defp diagonals(state) do
a = for x <- 0..2, do: state |> query(x, x)
b = for x <- 0..2, do: state |> query(x, 2-x)
[a,b]
end
defp lines(state) do
rows(state) ++ columns(state) ++ diagonals(state)
end
end
|
lib/tictactoe/game.ex
| 0.897006 | 0.953013 |
game.ex
|
starcoder
|
defmodule JOSEUtils.JWKS do
@moduledoc """
Convenience function to work with JWK sets
"""
alias JOSEUtils.{JWA, JWK}
@type t :: [JWK.t()]
@doc """
Filters the JWKS using a key selector `t:JWK.key_selector/0`
"""
@spec filter(t(), JWK.key_selector()) :: t()
def filter(jwks, key_selector),
do: Enum.filter(jwks, fn jwk -> JWK.match_key_selector?(jwk, key_selector) end)
@doc """
Returns the keys suitable for signature from a JWK set
Note that it does **not** return the keys suitable only for signature verification.
MAC keys are considered signature keys, and are returned as well.
"""
@spec signature_keys(
t(),
alg_or_algs :: JWA.sig_alg() | [JWA.sig_alg()] | nil
) :: t()
def signature_keys(jwks, alg_or_algs \\ nil)
def signature_keys(jwks, nil), do: filter(jwks, use: "sig", key_ops: "sign")
def signature_keys(jwks, alg_or_algs),
do: filter(jwks, use: "sig", key_ops: "sign", alg: alg_or_algs)
@doc """
Returns the keys suitable for signature **verification** from a JWK set
MAC keys are considered verification keys, and are returned as well.
"""
@spec verification_keys(
t(),
alg_or_algs :: JWA.sig_alg() | [JWA.sig_alg()] | nil
) :: t()
def verification_keys(jwks, alg_or_algs \\ nil)
def verification_keys(jwks, nil), do: filter(jwks, use: "sig", key_ops: "sign")
def verification_keys(jwks, alg_or_algs),
do: filter(jwks, use: "sig", key_ops: "verify", alg: alg_or_algs)
@doc """
Returns the keys suitable for encryption from a JWK set
"""
@spec encryption_keys(
t(),
alg_or_algs :: JWA.enc_alg() | [JWA.enc_alg()] | nil,
enc_or_encs :: JWA.enc_enc() | [JWA.enc_enc()] | nil
) :: t()
def encryption_keys(jwks, alg_or_algs \\ nil, enc_or_encs \\ nil)
def encryption_keys(jwks, nil, nil),
do: filter(jwks, use: "enc", key_ops: ["encrypt", "deriveKey"])
def encryption_keys(jwks, algs, nil),
do: filter(jwks, use: "enc", key_ops: ["encrypt", "deriveKey"], alg: algs)
def encryption_keys(jwks, algs, encs),
do: filter(jwks, use: "enc", key_ops: ["encrypt", "deriveKey"], alg: algs, enc: encs)
@doc """
Returns the keys suitable for decryption from a JWK set
"""
@spec decryption_keys(
t(),
alg_or_algs :: JWA.enc_alg() | [JWA.enc_alg()] | nil,
enc_or_encs :: JWA.enc_enc() | [JWA.enc_enc()] | nil
) :: t()
def decryption_keys(jwks, alg_or_algs \\ nil, enc_or_encs \\ nil)
def decryption_keys(jwks, nil, nil),
do: filter(jwks, use: "enc", key_ops: "decrypt")
def decryption_keys(jwks, algs, nil),
do: filter(jwks, use: "enc", key_ops: "decrypt", alg: algs)
def decryption_keys(jwks, algs, encs),
do: filter(jwks, use: "enc", key_ops: "decrypt", alg: algs, enc: encs)
end
|
lib/jose_utils/jwks.ex
| 0.887064 | 0.427935 |
jwks.ex
|
starcoder
|
defmodule Zap do
@moduledoc """
Native ZIP archive creation with chunked input and output.
Erlang/OTP provides the powerful `:zip` and `:zlib` modules, but they can only create an archive
all at once. That requires _all_ of the data to be kept in memory or written to disk. What if
you don't have enough space to keep the file in memory or on disk? With Zap you can add files
one at a time while writing chunks of data at the same time.
## Examples
Create a ZIP by adding a single entry at a time:
```elixir
iodata =
Zap.new()
|> Zap.entry("a.txt", a_binary)
|> Zap.entry("b.txt", some_iodata)
|> Zap.entry("c.txt", more_iodata)
|> Zap.to_iodata()
File.write!("archive.zip", iodata, [:binary, :raw])
```
Use `into` support from the `Collectable` protocol to build a ZIP dynamically:
```elixir
iodata =
"*.*"
|> Path.wildcard()
|> Enum.map(fn path -> {Path.basename(path), File.read!(path)} end)
|> Enum.into(Zap.new())
|> Zap.to_iodata()
File.write!("files.zip", iodata, [:binary, :raw])
```
Use `Zap.into_stream/2` to incrementally build a ZIP by chunking files into an archive:
```elixir
one_mb = 1024 * 1024
write_fun = &File.write!("streamed.zip", &1, [:append, :binary, :raw])
file_list
|> Stream.map(fn path -> {Path.basename(path), File.read!(path)} end)
|> Zap.into_stream(one_mb)
|> Stream.each(write_fun)
|> Stream.run()
```
## Glossary
The entry and header bytes are composed based on the [ZIP specification provided by
PKWare](https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT). Some helpful terms
that you may encounter in the function documentation:
* LFH (Local File Header) — Included before each file in the archive. The header contains
details about the name and size of the entry.
* CDH (Central Directory Header) — The final bits of an archive, this contains summary
information about the files contained within the archive.
"""
alias Zap.{Directory, Entry}
@type t :: %__MODULE__{entries: [Entry.t()]}
defstruct compression: 0, entries: []
@doc """
Initialize a new Zap struct.
The struct is used to accumulate entries, which can then be flushed as parts of a zip file.
"""
@doc since: "0.1.0"
@spec new() :: t()
def new(opts \\ []) do
struct!(__MODULE__, opts)
end
@doc """
Add a named entry to a zap struct.
## Example
Zap.new()
|> Zap.entry("a.jpg", jpg_data)
|> Zap.entry("b.png", png_data)
"""
@doc since: "0.1.0"
@spec entry(zap :: t(), name :: binary(), data :: binary()) :: t()
def entry(%__MODULE__{} = zap, name, data) when is_binary(name) and is_binary(data) do
%{zap | entries: List.flatten([zap.entries | [Entry.new(name, data)]])}
end
@doc """
Check the total number of un-flushed bytes available.
## Example
iex> Zap.bytes(Zap.new())
0
iex> Zap.new()
...> |> Zap.entry("a.txt", "a")
...> |> Zap.bytes()
52
iex> zap = Zap.new()
...> zap = Zap.entry(zap, "a.txt", "a")
...> {zap, _bytes} = Zap.flush(zap, :all)
...> Zap.bytes(zap)
0
"""
@doc since: "0.1.0"
@spec bytes(zap :: t()) :: non_neg_integer()
def bytes(%__MODULE__{entries: entries}) do
Enum.reduce(entries, 0, &(&1.size + &2))
end
@doc """
Output a complete iolist of data from a Zap struct.
This is a convenient way of combining the output from `flush/1` and `final/1`.
Though the function is called `to_iodata` it _also_ returns a zap struct because the struct is
modified when it is flushed.
## Example
iex> Zap.new()
...> |> Zap.entry("a.txt", "aaaa")
...> |> Zap.entry("b.txt", "bbbb")
...> |> Zap.to_iodata()
...> |> IO.iodata_length()
248
"""
@doc since: "0.1.0"
@spec to_iodata(zap :: t()) :: iolist()
def to_iodata(%__MODULE__{} = zap) do
{zap, flush} = flush(zap)
{_ap, final} = final(zap)
[flush, final]
end
@doc """
Flush a fixed number of bytes from the stored entries.
Flushing is stateful, meaning the same data won't be flushed on successive calls.
## Example
iex> Zap.new()
...> |> Zap.entry("a.txt", "aaaa")
...> |> Zap.entry("b.txt", "bbbb")
...> |> Zap.flush()
...> |> elem(1)
...> |> IO.iodata_length()
110
"""
@doc since: "0.1.0"
@spec flush(zap :: t(), bytes :: pos_integer() | :all) :: {t(), iodata()}
def flush(%__MODULE__{entries: entries} = zap, bytes \\ :all) do
{flushed, entries, _} =
Enum.reduce(entries, {[], [], bytes}, fn entry, {iodata, entries, bytes} ->
{entry, binary} = Entry.consume(entry, bytes)
next_bytes =
cond do
bytes == :all -> :all
bytes - byte_size(binary) > 0 -> bytes - byte_size(binary)
true -> 0
end
{[iodata | binary], [entry | entries], next_bytes}
end)
{%{zap | entries: Enum.reverse(entries)}, flushed}
end
@doc """
Generate the final CDH (Central Directory Header), required to complete an archive.
"""
@doc since: "0.1.0"
@spec final(zap :: t()) :: {t(), iodata()}
def final(%__MODULE__{entries: entries} = zap) do
{zap, Directory.encode(entries)}
end
@doc """
Stream an enumerable of `name`/`data` pairs into a zip structure and emit chunks of zip data.
The chunked output will be _at least_ the size of `chunk_size`, but they may be much larger. The
last emitted chunk automatically includes the central directory header, the closing set of
bytes.
## Example
iex> %{"a.txt" => "aaaa", "b.txt" => "bbbb"}
...> |> Zap.into_stream(8)
...> |> Enum.to_list()
...> |> IO.iodata_to_binary()
...> |> :zip.table()
...> |> elem(0)
:ok
"""
@doc since: "0.1.0"
@spec into_stream(enum :: Enumerable.t(), chunk_size :: pos_integer()) :: Enumerable.t()
def into_stream(enum, chunk_size \\ 1024 * 1024) when is_integer(chunk_size) do
chunk_fun = fn {name, data}, zap ->
zap = entry(zap, name, data)
if bytes(zap) >= chunk_size do
{zap, flushed} = flush(zap, :all)
{:cont, flushed, zap}
else
{:cont, zap}
end
end
after_fun = fn zap ->
iodata = to_iodata(zap)
{:cont, iodata, zap}
end
Stream.chunk_while(enum, Zap.new(), chunk_fun, after_fun)
end
defimpl Collectable do
def into(original) do
fun = fn
zap, {:cont, {name, data}} -> Zap.entry(zap, name, data)
zap, :done -> zap
_zap, :halt -> :ok
end
{original, fun}
end
end
defimpl Inspect do
import Inspect.Algebra
alias Inspect.{List, Opts}
def inspect(zap, opts) do
opts = %Opts{opts | charlists: :as_lists}
concat(["#Zap<", List.inspect(names(zap), opts), ">"])
end
defp names(%{entries: entries}) do
entries
|> Enum.reverse()
|> Enum.map(& &1.header.name)
end
end
end
|
lib/zap.ex
| 0.912194 | 0.878939 |
zap.ex
|
starcoder
|
defmodule FormatParser.Audio do
alias __MODULE__
@moduledoc """
An Audio struct and functions.
The Audio struct contains the fields format, sample_rate_hz, num_audio_channels, intrinsics and nature.
"""
defstruct [
:format,
:sample_rate_hz,
:num_audio_channels,
nature: :audio,
intrinsics: %{}
]
@doc """
Parses a file and extracts some information from it.
Takes a `binary file` as argument.
Returns a struct which contains all information that has been extracted from the file if the file is recognized.
Returns the following tuple if file not recognized: `{:error, file}`.
"""
def parse({:error, file}) when is_binary(file) do
parse_audio(file)
end
def parse(file) when is_binary(file) do
parse_audio(file)
end
def parse(result) do
result
end
defp parse_audio(file) do
case file do
<<"RIFF", x :: binary>> -> parse_wav(x)
<<"OggS", x :: binary>> -> parse_ogg(x)
<<"FORM", 0x00, x :: binary>> -> parse_aiff(x)
<<"fLaC", x :: binary>> -> parse_flac(x)
<<"ID3", x :: binary>> -> parse_mp3(x)
_ -> {:error, file}
end
end
defp parse_mp3(<<_ :: binary>>) do
%Audio{format: :mp3}
end
defp parse_flac(<<_ :: size(112), sample_rate_hz :: size(20), num_audio_channels :: size(3), _ :: size(5), _ :: size(36), _ :: binary>>) do
%Audio{
format: :flac,
sample_rate_hz: sample_rate_hz,
num_audio_channels: num_audio_channels
}
end
defp parse_ogg(<<_ :: size(280), channels :: little-integer-size(8), sample_rate_hz :: little-integer-size(32), _ :: binary>>) do
%Audio{
format: :ogg,
sample_rate_hz: sample_rate_hz,
num_audio_channels: channels
}
end
defp parse_wav(<<_ :: size(144), channels :: little-integer-size(16), sample_rate_hz :: little-integer-size(32), byte_rate :: little-integer-size(32), block_align :: little-integer-size(16), bits_per_sample :: little-integer-size(16), _ :: binary>>) do
intrinsics = %{
byte_rate: byte_rate,
block_align: block_align,
bits_per_sample: bits_per_sample
}
%Audio{
format: :wav,
sample_rate_hz: sample_rate_hz,
num_audio_channels: channels,
intrinsics: intrinsics
}
end
defp parse_aiff(<<_ :: size(56), "COMM", _ :: size(32), channels :: size(16), frames :: size(32), bits_per_sample :: size(16), _sample_rate_components :: size(80), _ :: binary>>) do
intrinsics = %{num_frames: frames, bits_per_sample: bits_per_sample}
%Audio{format: :aiff, num_audio_channels: channels, intrinsics: intrinsics}
end
end
|
lib/format_parser/audio.ex
| 0.862308 | 0.606702 |
audio.ex
|
starcoder
|
defmodule HashRing.Utils do
@moduledoc false
require Logger
@type pattern_list :: [String.t() | Regex.t()]
@type blacklist :: pattern_list
@type whitelist :: pattern_list
@doc """
An internal function for determining if a given node should be
included in the ring or excluded, based on a provided blacklist
and/or whitelist. Both lists should contain either literal strings,
literal regexes, or regex strings.
This function only works with nodes which are atoms or strings, and
will raise an exception if other node name types are used, such as tuples.
"""
@spec ignore_node?(term(), blacklist, whitelist) :: boolean
def ignore_node?(node, blacklist, whitelist)
def ignore_node?(_node, [], []),
do: false
def ignore_node?(node, blacklist, whitelist) when is_atom(node),
do: ignore_node?(Atom.to_string(node), blacklist, whitelist)
def ignore_node?(node, blacklist, []) when is_binary(node) and is_list(blacklist) do
Enum.any?(blacklist, fn
^node ->
true
%Regex{} = pattern ->
Regex.match?(pattern, node)
pattern when is_binary(pattern) ->
case Regex.compile(pattern) do
{:ok, rx} ->
Regex.match?(rx, node)
{:error, reason} ->
:ok =
Logger.warn(
"[libring] ignore_node?/3: invalid blacklist pattern (#{inspect(pattern)}): #{inspect(reason)}"
)
false
end
end)
end
def ignore_node?(node, [], whitelist) when is_binary(node) and is_list(whitelist) do
not Enum.any?(whitelist, fn
^node ->
true
%Regex{} = pattern ->
Regex.match?(pattern, node)
pattern when is_binary(pattern) ->
case Regex.compile(pattern) do
{:ok, rx} ->
Regex.match?(rx, node)
{:error, reason} ->
:ok =
Logger.warn(
"[libring] ignore_node?/3: invalid whitelist pattern (#{inspect(pattern)}): #{inspect(reason)}"
)
false
end
end)
end
def ignore_node?(node, blacklist, whitelist) when is_list(whitelist) and is_list(blacklist) do
# Criteria for ignoring nodes when both blacklisting and whitelisting is active
blacklisted? = ignore_node?(node, blacklist, [])
whitelisted? = not ignore_node?(node, [], whitelist)
cond do
# If it is blacklisted and also whitelisted, then do not ignore
blacklisted? and whitelisted? ->
false
# If it is blacklisted and not also whitelisted, then ignore
blacklisted? ->
true
# If it is not blacklisted and is whitelisted, then do not ignore
whitelisted? ->
false
# If it is not blacklisted and not whitelisted, then ignore
:else ->
true
end
end
end
|
lib/utils.ex
| 0.691914 | 0.412382 |
utils.ex
|
starcoder
|
defmodule ISOBMFFLang do
@moduledoc """
ISO based media file format derived formats such as Apples MOV and MPEG's MP4 pack language infromation in 16bit binaries.
How these language codes are packed varies though from format to format, Apple has a table of QuickTime language codes that
they enforce in the MOV format while MP4 uses a technique of packing 3 character ISO 639-2/T language codes into 3 unsigned
5 bit integers that they then shift and add.
Sounds complicated? You would be right, this hex is meant to abstract away the complexity and get right down to business.
At your disposal you have several convenience functions, they all do the same thing but take different types of input.
If you have the language int packed as a 16 bit bitstring straight from a MP4 container use `#int16_to_lang`, if you've unpacked
it to a Elixir integer use `#int_to_lang` or if you are presented with the raw 15 bit bitstring use `#int15_to_lang`.
You get the picture the result will always be the same.
## Examples
# QuickTime language code value for swedish (MOV container)
iex> ISOBMFFLang.int_to_lang(5)
{:ok, "swe"}
# Packed ISO Language Code for undefined language (MP4 container)
iex> ISOBMFFLang.int16_to_lang(<<0::1, 21956::15>>)
{:ok, "und"}
"""
# Quicktime language codes as per Table 5-1
# https://developer.apple.com/library/content/documentation/QuickTime/QTFF/QTFFChap4/qtff4.html#//apple_ref/doc/uid/TP40000939-CH206-34353
@quicktime_language_codes %{
eng: 000, # English
fre: 001, # French
deu: 002, # German
ita: 003, # Italian
nld: 004, # Dutch
swe: 005, # Swedish
spa: 006, # Spanish
dan: 007, # Danish
por: 008, # Portuguese
nor: 009, # Norwegian
heb: 010, # Hebrew
jpn: 011, # Japanese
ara: 012, # Arabic
fin: 013, # Finnish
ell: 014, # Greek
isl: 015, # Icelandic
mlt: 016, # Maltese
tur: 017, # Turkish
hrv: 018, # Croatian
zho: 019, # Traditional Chinese (ISO 639-2 can't express script differences, so zho)
urd: 020, # Urdu
hin: 021, # Hindi
tha: 022, # Thai
kor: 023, # Korean
lit: 024, # Lithuanian
pol: 025, # Polish
hun: 026, # Hungarian
est: 027, # Estonian
lav: 028, # Latvian / Lettish
sme: 029, # Lappish / Saamish (used code for Nothern Sami)
fao: 030, # Faeroese
fas: 031, # Farsi
rus: 032, # Russian
zho: 033, # Simplified Chinese (ISO 639-2 can't express script differences, so zho)
nld: 034, # Flemish (no ISO 639-2 code, used Dutch code)
gle: 035, # Irish
sqi: 036, # Albanian
ron: 037, # Romanian
ces: 038, # Czech
slk: 039, # Slovak
slv: 040, # Slovenian
yid: 041, # Yiddish
srp: 042, # Serbian
mkd: 043, # Macedonian
bul: 044, # Bulgarian
ukr: 045, # Ukrainian
bel: 046, # Byelorussian
uzb: 047, # Uzbek
kaz: 048, # Kazakh
aze: 049, # Azerbaijani
aze: 050, # AzerbaijanAr (presumably script difference? used aze here)
hye: 051, # Armenian
kat: 052, # Georgian
mol: 053, # Moldavian
kir: 054, # Kirghiz
tgk: 055, # Tajiki
tuk: 056, # Turkmen
mon: 057, # Mongolian
mon: 058, # MongolianCyr (presumably script difference? used mon here)
pus: 059, # Pashto
kur: 060, # Kurdish
kas: 061, # Kashmiri
snd: 062, # Sindhi
bod: 063, # Tibetan
nep: 064, # Nepali
san: 065, # Sanskrit
mar: 066, # Marathi
ben: 067, # Bengali
asm: 068, # Assamese
guj: 069, # Gujarati
pan: 070, # Punjabi
ori: 071, # Oriya
mal: 072, # Malayalam
kan: 073, # Kannada
tam: 074, # Tamil
tel: 075, # Telugu
sin: 076, # Sinhalese
mya: 077, # Burmese
khm: 078, # Khmer
lao: 079, # Lao
vie: 080, # Vietnamese
ind: 081, # Indonesian
tgl: 082, # Tagalog
msa: 083, # MalayRoman
msa: 084, # MalayArabic
amh: 085, # Amharic
orm: 087, # Galla (same as Oromo?)
orm: 087, # Oromo
som: 088, # Somali
swa: 089, # Swahili
kin: 090, # Ruanda
run: 091, # Rundi
nya: 092, # Chewa
mlg: 093, # Malagasy
epo: 094, # Esperanto
cym: 128, # Welsh
eus: 129, # Basque
cat: 130, # Catalan
lat: 131, # Latin
que: 132, # Quechua
grn: 133, # Guarani
aym: 134, # Aymara
tat: 135, # Tatar
uig: 136, # Uighur
dzo: 137, # Dzongkha
jav: 138, # JavaneseRom
und: 32767, # Unspecified,
}
@doc """
Converts a 15 bit ISOBMFF integer language representation into ISO 639-2 language code.
Returns an error if integer value is not within range for a uint15
## Examples
# QuickTime language code value for swedish (MOV container)
iex> ISOBMFFLang.int_to_lang(5)
{:ok, "swe"}
# Packed ISO Language Code for undefined language (MP4 container)
iex> ISOBMFFLang.int_to_lang(21956)
{:ok, "und"}
iex> ISOBMFFLang.int_to_lang(-1)
{:error, "Failed to parse integer value: Integer out of bounds."}
iex> ISOBMFFLang.int_to_lang(32768)
{:error, "Failed to parse integer value: Integer out of bounds."}
"""
@max_int15 32767
def int_to_lang(i) when i <= @max_int15 and i >= 0, do: <<i::15>> |> int15_to_lang
def int_to_lang(_), do: {:error, "Failed to parse integer value: Integer out of bounds."}
@doc """
Converts a 16 bit, zero padded, uint15 ISOBMFF language representation into ISO 639-2 language code.
Returns an error if integer value is not within range for a uint15
## Examples
# QuickTime language code value for swedish (MOV container)
iex> ISOBMFFLang.int16_to_lang(<<0::1, 5::15>>)
{:ok, "swe"}
# Packed ISO Language Code for undefined language (MP4 container)
iex> ISOBMFFLang.int16_to_lang(<<0::1, 21956::15>>)
{:ok, "und"}
iex> ISOBMFFLang.int16_to_lang(<<-1::14>>)
{:error, "Failed to parse 16 bit integer value: Integer out of bounds."}
iex> ISOBMFFLang.int16_to_lang(<<32768::17>>)
{:error, "Failed to parse 16 bit integer value: Integer out of bounds."}
"""
def int16_to_lang(<<0::1, i::15>>), do: i |> int_to_lang
def int16_to_lang(_), do: {:error, "Failed to parse 16 bit integer value: Integer out of bounds."}
@doc """
Converts a 15 bit ISOBMFF integer language representation into ISO 639-2 language code.
Returns an error if integer value is not within range for a uint15
## Examples
# QuickTime language code value for swedish (MOV container)
iex> ISOBMFFLang.int15_to_lang(<<5::15>>)
{:ok, "swe"}
# Packed ISO Language Code for undefined language (MP4 container)
iex> ISOBMFFLang.int15_to_lang(<<21956::15>>)
{:ok, "und"}
iex> ISOBMFFLang.int15_to_lang(<<1::14>>)
{:error, "Failed to parse 15 bit integer value: Integer out of bounds."}
iex> ISOBMFFLang.int15_to_lang(<<32768::17>>)
{:error, "Failed to parse 15 bit integer value: Integer out of bounds."}
"""
Enum.each @quicktime_language_codes, fn {name, number} ->
def int15_to_lang(<<unquote(number)::15>>), do: {:ok, unquote(name |> to_string)}
end
def int15_to_lang(<<a::5, b::5, c::5>>), do: {:ok, <<a + 0x60, b + 0x60, c + 0x60>>}
def int15_to_lang(_), do: {:error, "Failed to parse 15 bit integer value: Integer out of bounds."}
end
|
lib/isobmff_lang.ex
| 0.856242 | 0.466967 |
isobmff_lang.ex
|
starcoder
|
defmodule MishkaDatabase.CRUD do
@moduledoc """
## Simplified CRUD macro using Ecto
With this module, you can easily implement CRUD-related items in your file wherever you need to build a query.
These modules and their sub-macros were created more to create a one-piece structure, and you can implement your own custom items in umbrella projects.
In the first step, to use the following macros, you must bind the requested information in the relevant module that you have already created as follows.
```elixir
use MishkaDatabase.CRUD,
module: YOURschemaMODULE,
error_atom: :your_error_tag,
repo: Your.Repo
```
It should be noted that the following three parameters must be sent and also make sure you are connected to the database.
```elixir
module
error_atom
repo
```
"""
# MishkaUser.User custom Typespecs
@type data_uuid() :: Ecto.UUID.t
@type record_input() :: map()
@type error_tag() :: atom()
@type email() :: String.t()
@type username() :: String.t()
@type repo_data() :: Ecto.Schema.t()
@type repo_error() :: Ecto.Changeset.t()
@callback create(record_input()) ::
{:error, :add, error_tag(), repo_error()} |
{:ok, :add, error_tag(), repo_data()}
@callback edit(record_input()) ::
{:error, :edit, :uuid, error_tag()} |
{:error, :edit, :get_record_by_id, error_tag()} |
{:error, :edit, error_tag(), repo_error()} |
{:ok, :edit, error_tag(), repo_data()}
@callback delete(data_uuid()) ::
{:error, :delete, :uuid, error_tag()} |
{:error, :delete, :get_record_by_id, error_tag()} |
{:error, :delete, :forced_to_delete, error_tag()} |
{:error, :delete, error_tag(), repo_error()} |
{:ok, :delete, error_tag(), repo_data()}
@callback show_by_id(data_uuid()) ::
{:error, :get_record_by_id, error_tag()} |
{:ok, :get_record_by_id, error_tag(), repo_data()}
defmacro __using__(opts) do
quote(bind_quoted: [opts: opts]) do
import MishkaDatabase.CRUD
@interface_module opts
end
end
@doc """
### Creating a record macro
## Example
```elixir
crud_add(map_of_info like: %{name: "trangell"})
```
The input of this macro is a map and its output are a map. For example
```elixir
{:ok, :add, error_atom, data}
{:error, :add, error_atom, changeset}
```
If you want only the selected parameters to be separated from the list of submitted parameters and sent to the database, use the same macro with input 2
### Example
```elixir
crud_add(map_of_info like: %{name: "trangell"}, [:name])
```
"""
defmacro crud_add(attrs) do
quote do
module_selected = Keyword.get(@interface_module, :module)
error_atom = Keyword.get(@interface_module, :error_atom)
repo = Keyword.get(@interface_module, :repo)
add = module_selected.__struct__
|> module_selected.changeset(unquote(attrs))
|> repo.insert()
case add do
{:ok, data} -> {:ok, :add, error_atom, data}
{:error, changeset} -> {:error, :add, error_atom, changeset}
end
end
end
defmacro crud_add(attrs, allowed_fields) do
quote do
module_selected = Keyword.get(@interface_module, :module)
error_atom = Keyword.get(@interface_module, :error_atom)
repo = Keyword.get(@interface_module, :repo)
add = module_selected.__struct__
|> module_selected.changeset(Map.take(unquote(attrs), unquote(allowed_fields)))
|> repo.insert()
case add do
{:ok, data} -> {:ok, :add, error_atom, data}
{:error, changeset} -> {:error, :add, error_atom, changeset}
end
end
end
@doc """
### Edit a record in a database Macro
With the help of this macro, you can edit a record in the database with its ID. For this purpose, you must send the requested record ID along with the new Map parameters. Otherwise the macro returns the ID error.
## Example
```elixir
crud_edit(map_of_info like: %{id: "6d80d5f4-781b-4fa8-9796-1821804de6ba",name: "trangell"})
```
> Note that the sending ID must be of UUID type.
The input of this macro is a map and its output are a map. For example
```elixir
# If your request has been saved successfully
{:ok, :edit, error_atom, info}
# If your ID is not uuid type
{:error, :edit, error_atom, :uuid}
# If there is an error in sending the data
{:error, :edit, error_atom, changeset}
# If no record is found for your ID
{:error, :delete, error_atom, :get_record_by_id}
```
It should be noted that if you want only the selected fields to be separated from the submitted parameters and sent to the database, use the macro with dual input.
## Example
```elixir
crud_edit(map_of_info like: %{id: "6d80d5f4-781b-4fa8-9796-1821804de6ba", name: "trangell"}, [:id, :name])
```
"""
defmacro crud_edit(attr) do
quote do
module_selected = Keyword.get(@interface_module, :module)
error_atom = Keyword.get(@interface_module, :error_atom)
repo = Keyword.get(@interface_module, :repo)
MishkaDatabase.CRUD.edit_record(unquote(attr), module_selected, error_atom, repo)
end
end
defmacro crud_edit(attrs, allowed_fields) do
quote do
module_selected = Keyword.get(@interface_module, :module)
error_atom = Keyword.get(@interface_module, :error_atom)
repo = Keyword.get(@interface_module, :repo)
MishkaDatabase.CRUD.edit_record(Map.take(unquote(attrs), unquote(allowed_fields)), module_selected, error_atom, repo)
end
end
@doc """
### delete a record from the database with the help of ID Macro
With the help of this macro, you can delete your requested record from the database.
The input of this macro is a UUID and its output is a map
## Example
```elixir
crud_delete("6d80d5f4-781b-4fa8-9796-1821804de6ba")
```
Output:
You should note that this macro prevents the orphan data of the record requested to be deleted. So, use this macro when the other data is not dependent on the data with the ID sent by you.
Outputs:
```elixir
# This message will be returned when your data has been successfully deleted
{:ok, :delete, error_atom, struct}
# This error will be returned if the ID sent by you is not a UUID
{:error, :delete, error_atom, :uuid}
# This error is reversed when an error occurs while sending data
{:error, :delete, error_atom, changeset}
# This error will be reversed when there is no submitted ID in the database
{:error, :delete, error_atom, :get_record_by_id}
# This error is reversed when another record is associated with this record
{:error, :delete, error_atom, :forced_to_delete}
```
"""
defmacro crud_delete(id) do
quote do
module_selected = Keyword.get(@interface_module, :module)
error_atom = Keyword.get(@interface_module, :error_atom)
repo = Keyword.get(@interface_module, :repo)
MishkaDatabase.CRUD.delete_record(unquote(id), module_selected, error_atom, repo)
end
end
@doc """
### Macro Finding a record in a database with the help of ID
With the help of this macro, you can send an ID that is of UUID type and call it if there is a record in the database.
The output of this macro is map.
# Example
```elixir
crud_get_record("6d80d5f4-781b-4fa8-9796-1821804de6ba")
```
Outputs:
```
{:error, error_atom, :get_record_by_id}
{:ok, error_atom, :get_record_by_id, record_info}
```
"""
defmacro crud_get_record(id) do
quote do
module_selected = Keyword.get(@interface_module, :module)
error_atom = Keyword.get(@interface_module, :error_atom)
repo = Keyword.get(@interface_module, :repo)
MishkaDatabase.CRUD.get_record_by_id(unquote(id), module_selected, error_atom, repo)
end
end
@doc """
### Macro Find a record in the database with the help of the requested field
With the help of this macro, you can find a field with the value you want, if it exists in the database. It should be noted that the field name must be entered as a String.
# Example
```elixir
crud_get_by_field("email", "<EMAIL>")
```
Outputs:
```
{:error, error_atom, :get_record_by_field}
{:ok, error_atom, :get_record_by_field, record_info}
```
"""
defmacro crud_get_by_field(field, value) do
quote do
module_selected = Keyword.get(@interface_module, :module)
error_atom = Keyword.get(@interface_module, :error_atom)
repo = Keyword.get(@interface_module, :repo)
MishkaDatabase.CRUD.get_record_by_field(unquote(field), unquote(value), module_selected, error_atom, repo)
end
end
# functions to create macro
@doc false
def update(changeset, attrs, module, repo) do
module.changeset(changeset, attrs)
|> repo.update
end
@doc false
def uuid(id) do
case Ecto.UUID.cast(id) do
{:ok, record_id} -> {:ok, :uuid, record_id}
_ -> {:error, :uuid}
end
end
@doc false
def get_record_by_id(id, module, error_atom, repo) do
case repo.get(module, id) do
nil -> {:error, :get_record_by_id, error_atom}
record_info -> {:ok, :get_record_by_id, error_atom, record_info}
end
rescue
_ -> {:error, :get_record_by_id, error_atom}
end
@doc false
def get_record_by_field(field, value, module, error_atom, repo) do
case repo.get_by(module, "#{field}": value) do
nil -> {:error, :get_record_by_field, error_atom}
record_info -> {:ok, :get_record_by_field, error_atom, record_info}
end
rescue
_ -> {:error, :get_record_by_id, error_atom}
end
@doc false
def edit_record(attrs, module, error_atom, repo) do
with {:ok, :uuid, record_id} <- uuid(if Map.has_key?(attrs, :id), do: attrs.id, else: attrs["id"]),
{:ok, :get_record_by_id, error_atom, record_info} <- get_record_by_id(record_id, module, error_atom, repo),
{:ok, info} <- update(record_info, attrs, module, repo) do
{:ok, :edit, error_atom, info}
else
{:error, :uuid} ->
{:error, :edit, :uuid, error_atom}
{:error, changeset} ->
{:error, :edit, error_atom, changeset}
_ ->
{:error, :edit, :get_record_by_id, error_atom}
end
end
@doc false
def delete_record(id, module, error_atom, repo) do
try do
with {:ok, :uuid, record_id} <- uuid(id),
{:ok, :get_record_by_id, error_atom, record_info} <- get_record_by_id(record_id, module, error_atom, repo),
{:ok, struct} <- repo.delete(record_info) do
{:ok, :delete, error_atom, struct}
else
{:error, :uuid} ->
{:error, :delete, :uuid, error_atom}
{:error, changeset} ->
{:error, :delete, error_atom, changeset}
_ ->
{:error, :delete, :get_record_by_id, error_atom}
end
rescue
_e in Ecto.ConstraintError -> {:error, :delete, :forced_to_delete, error_atom}
end
end
end
|
apps/mishka_database/lib/helpers/crud.ex
| 0.725065 | 0.748812 |
crud.ex
|
starcoder
|
defmodule ETS.Set do
@moduledoc """
Module for creating and interacting with :ets tables of the type `:set` and `:ordered_set`.
Sets contain "records" which are tuples. Sets are configured with a key position via the `keypos: integer` option.
If not specified, the default key position is 1. The element of the tuple record at the key position is that records key.
For example, setting the `keypos` to 2 means the key of an inserted record `{:a, :b}` is `:b`:
iex> {:ok, set} = Set.new(keypos: 2)
iex> Set.put!(set, {:a, :b})
iex> Set.get(set, :a)
{:ok, nil}
iex> Set.get(set, :b)
{:ok, {:a, :b}}
When a record is added to the table with `put`, it will overwrite an existing record
with the same key. `put_new` will only put the record if a matching key doesn't already exist.
## Examples
iex> {:ok, set} = Set.new(ordered: true)
iex> Set.put_new!(set, {:a, :b, :c})
iex> Set.to_list!(set)
[{:a, :b, :c}]
iex> Set.put_new!(set, {:d, :e, :f})
iex> Set.to_list!(set)
[{:a, :b, :c}, {:d, :e, :f}]
iex> Set.put_new!(set, {:a, :g, :h})
iex> Set.to_list!(set)
[{:a, :b, :c}, {:d, :e, :f}]
`put` and `put_new` take either a single tuple or a list of tuples. When inserting multiple records,
they are inserted in an atomic an isolated manner. `put_new` doesn't insert any records if any of
the new keys already exist in the set.
To make your set ordered (which maps to the `:ets` table type `:ordered_set`), specify `ordered: true`
in the options list. An ordered set will store records in term order of the key of the record. This is
helpful when using things like `first`, `last`, `previous`, `next`, and `to_list`, but comes with the penalty of
log(n) insert time vs consistent insert time of an unordered set.
## Working with named tables
The functions on `ETS.Set` require that you pass in an `ETS.Set` as the first argument. In some design patterns,
you may have the table name but an instance of an `ETS.Set` may not be available to you. If this is the case,
you should use `wrap_existing/1` to turn your table name atom into an `ETS.Set`. For example, a `GenServer` that
handles writes within the server, but reads in the client process would be implemented like this:
```
defmodule MyExampleGenServer do
use GenServer
# Client Functions
def get_token_for_user(user_id) do
:my_token_table
|> ETS.Set.wrap_existing!()
|> ETS.Set.get!(user_id)
|> elem(1)
end
def set_token_for_user(user_id, token) do
GenServer.call(__MODULE__, {:set_token_for_user, user_id, token})
end
# Server Functions
def init(_) do
{:ok, %{set: ETS.Set.new!(name: :my_token_table)}}
end
def handle_call({:set_token_for_user, user_id, token}, _from, %{set: set}) do
ETS.Set.put(set, user_id, token)
end
end
```
"""
use ETS.Utils
alias ETS.{
Base,
Set
}
@type t :: %__MODULE__{
info: keyword(),
ordered: boolean(),
table: ETS.table_reference()
}
@type set_options :: [ETS.Base.option() | {:ordered, boolean()}]
defstruct table: nil, info: nil, ordered: nil
@doc """
Creates new set module with the specified options.
Note that the underlying :ets table will be attached to the process that calls `new` and will be destroyed
if that process dies.
Possible options:
* `name:` when specified, creates a named table with the specified name
* `ordered:` when true, records in set are ordered (default false)
* `protection:` :private, :protected, :public (default :protected)
* `heir:` :none | {heir_pid, heir_data} (default :none)
* `keypos:` integer (default 1)
* `read_concurrency:` boolean (default false)
* `write_concurrency:` boolean (default false)
* `compressed:` boolean (default false)
## Examples
iex> {:ok, set} = Set.new(ordered: true, keypos: 3, read_concurrency: true, compressed: false)
iex> Set.info!(set)[:read_concurrency]
true
# Named :ets tables via the name keyword
iex> {:ok, set} = Set.new(name: :my_ets_table)
iex> Set.info!(set)[:name]
:my_ets_table
"""
@spec new(set_options) :: {:error, any()} | {:ok, Set.t()}
def new(opts \\ []) when is_list(opts) do
{opts, ordered} = take_opt(opts, :ordered, false)
if is_boolean(ordered) do
case Base.new_table(type(ordered), opts) do
{:error, reason} -> {:error, reason}
{:ok, {table, info}} -> {:ok, %Set{table: table, info: info, ordered: ordered}}
end
else
{:error, {:invalid_option, {:ordered, ordered}}}
end
end
@doc """
Same as `new/1` but unwraps or raises on error.
"""
@spec new!(set_options) :: Set.t()
def new!(opts \\ []), do: unwrap_or_raise(new(opts))
defp type(true), do: :ordered_set
defp type(false), do: :set
@doc """
Returns information on the set.
Second parameter forces updated information from ets, default (false) uses in-struct cached information.
Force should be used when requesting size and memory.
## Examples
iex> {:ok, set} = Set.new(ordered: true, keypos: 3, read_concurrency: true, compressed: false)
iex> {:ok, info} = Set.info(set)
iex> info[:read_concurrency]
true
iex> {:ok, _} = Set.put(set, {:a, :b, :c})
iex> {:ok, info} = Set.info(set)
iex> info[:size]
0
iex> {:ok, info} = Set.info(set, true)
iex> info[:size]
1
"""
@spec info(Set.t(), boolean()) :: {:ok, keyword()} | {:error, any()}
def info(set, force_update \\ false)
def info(%Set{table: table}, true), do: Base.info(table)
def info(%Set{info: info}, false), do: {:ok, info}
@doc """
Same as `info/1` but unwraps or raises on error.
"""
@spec info!(Set.t(), boolean()) :: keyword()
def info!(%Set{} = set, force_update \\ false) when is_boolean(force_update),
do: unwrap_or_raise(info(set, force_update))
@doc """
Returns underlying `:ets` table reference.
For use in functions that are not yet implemented. If you find yourself using this, please consider
submitting a PR to add the necessary function to `ETS`.
## Examples
iex> set = Set.new!(name: :my_ets_table)
iex> {:ok, table} = Set.get_table(set)
iex> info = :ets.info(table)
iex> info[:name]
:my_ets_table
"""
@spec get_table(Set.t()) :: {:ok, ETS.table_reference()}
def get_table(%Set{table: table}), do: {:ok, table}
@doc """
Same as `get_table/1` but unwraps or raises on error
"""
@spec get_table!(Set.t()) :: ETS.table_reference()
def get_table!(%Set{} = set), do: unwrap(get_table(set))
@doc """
Puts tuple record or list of tuple records into table. Overwrites records for existing key(s).
Inserts multiple records in an [atomic and isolated](http://erlang.org/doc/man/ets.html#concurrency) manner.
## Examples
iex> {:ok, set} = Set.new(ordered: true)
iex> {:ok, _} = Set.put(set, [{:a, :b, :c}, {:d, :e, :f}])
iex> {:ok, _} = Set.put(set, {:g, :h, :i})
iex> {:ok, _} = Set.put(set, {:d, :x, :y})
iex> Set.to_list(set)
{:ok, [{:a, :b, :c}, {:d, :x, :y}, {:g, :h, :i}]}
"""
@spec put(Set.t(), tuple() | list(tuple())) :: {:ok, Set.t()} | {:error, any()}
def put(%Set{table: table} = set, record) when is_tuple(record),
do: Base.insert(table, record, set)
def put(%Set{table: table} = set, records) when is_list(records),
do: Base.insert_multi(table, records, set)
@doc """
Same as `put/2` but unwraps or raises on error.
"""
@spec put!(Set.t(), tuple() | list(tuple())) :: Set.t()
def put!(%Set{} = set, record_or_records)
when is_tuple(record_or_records) or is_list(record_or_records),
do: unwrap_or_raise(put(set, record_or_records))
@doc """
Same as `put/2` but doesn't put any records if one of the given keys already exists.
## Examples
iex> set = Set.new!(ordered: true)
iex> {:ok, _} = Set.put_new(set, [{:a, :b, :c}, {:d, :e, :f}])
iex> {:ok, _} = Set.put_new(set, [{:a, :x, :y}, {:g, :h, :i}]) # skips due to duplicate :a key
iex> {:ok, _} = Set.put_new(set, {:d, :z, :zz}) # skips due to duplicate :d key
iex> Set.to_list!(set)
[{:a, :b, :c}, {:d, :e, :f}]
"""
@spec put_new(Set.t(), tuple() | list(tuple())) :: {:ok, Set.t()} | {:error, any()}
def put_new(%Set{table: table} = set, record) when is_tuple(record),
do: Base.insert_new(table, record, set)
def put_new(%Set{table: table} = set, records) when is_list(records),
do: Base.insert_multi_new(table, records, set)
@doc """
Same as `put_new/2` but unwraps or raises on error.
"""
@spec put_new!(Set.t(), tuple() | list(tuple())) :: Set.t()
def put_new!(%Set{} = set, record_or_records)
when is_tuple(record_or_records) or is_list(record_or_records),
do: unwrap_or_raise(put_new(set, record_or_records))
@doc """
Returns record with specified key or an error if no record found.
## Examples
iex> Set.new!()
iex> |> Set.put!({:a, :b, :c})
iex> |> Set.put!({:d, :e, :f})
iex> |> Set.fetch(:d)
{:ok, {:d, :e, :f}}
iex> Set.new!()
iex> |> Set.put!({:a, :b, :c})
iex> |> Set.put!({:d, :e, :f})
iex> |> Set.fetch(:g)
{:error, :key_not_found}
"""
@spec fetch(Set.t(), any()) :: {:ok, tuple() | nil} | {:error, any()}
def fetch(%Set{table: table}, key) do
case Base.lookup(table, key) do
{:ok, []} -> {:error, :key_not_found}
{:ok, [x | []]} -> {:ok, x}
{:ok, _} -> {:error, :invalid_set}
{:error, reason} -> {:error, reason}
end
end
@doc """
Returns record with specified key or the provided default (nil if not specified) if no record found.
## Examples
iex> Set.new!()
iex> |> Set.put!({:a, :b, :c})
iex> |> Set.put!({:d, :e, :f})
iex> |> Set.get(:d)
{:ok, {:d, :e, :f}}
"""
@spec get(Set.t(), any(), any()) :: {:ok, tuple() | nil} | {:error, any()}
def get(%Set{table: table}, key, default \\ nil) do
case Base.lookup(table, key) do
{:ok, []} -> {:ok, default}
{:ok, [x | []]} -> {:ok, x}
{:ok, _} -> {:error, :invalid_set}
{:error, reason} -> {:error, reason}
end
end
@doc """
Same as `get/3` but unwraps or raises on error.
"""
@spec get!(Set.t(), any(), any()) :: tuple() | nil
def get!(%Set{} = set, key, default \\ nil), do: unwrap_or_raise(get(set, key, default))
@doc """
Returns element in specified position of record with specified key.
## Examples
iex> Set.new!()
iex> |> Set.put!({:a, :b, :c})
iex> |> Set.put!({:d, :e, :f})
iex> |> Set.get_element(:d, 2)
{:ok, :e}
"""
@spec get_element(Set.t(), any(), non_neg_integer()) :: {:ok, any()} | {:error, any()}
def get_element(%Set{table: table}, key, pos), do: Base.lookup_element(table, key, pos)
@doc """
Same as `get_element/3` but unwraps or raises on error.
"""
@spec get_element!(Set.t(), any(), non_neg_integer()) :: any()
def get_element!(%Set{} = set, key, pos), do: unwrap_or_raise(get_element(set, key, pos))
@doc """
Returns records in the specified Set that match the specified pattern.
For more information on the match pattern, see the [erlang documentation](http://erlang.org/doc/man/ets.html#match-2)
## Examples
iex> Set.new!(ordered: true)
iex> |> Set.put!([{:a, :b, :c, :d}, {:e, :c, :f, :g}, {:h, :b, :i, :j}])
iex> |> Set.match({:"$1", :b, :"$2", :_})
{:ok, [[:a, :c], [:h, :i]]}
"""
@spec match(Set.t(), ETS.match_pattern()) :: {:ok, [tuple()]} | {:error, any()}
def match(%Set{table: table}, pattern) when is_atom(pattern) or is_tuple(pattern),
do: Base.match(table, pattern)
@doc """
Same as `match/2` but unwraps or raises on error.
"""
@spec match!(Set.t(), ETS.match_pattern()) :: [tuple()]
def match!(%Set{} = set, pattern) when is_atom(pattern) or is_tuple(pattern),
do: unwrap_or_raise(match(set, pattern))
@doc """
Same as `match/2` but limits number of results to the specified limit.
## Examples
iex> set = Set.new!(ordered: true)
iex> Set.put!(set, [{:a, :b, :c, :d}, {:e, :b, :f, :g}, {:h, :b, :i, :j}])
iex> {:ok, {results, _continuation}} = Set.match(set, {:"$1", :b, :"$2", :_}, 2)
iex> results
[[:a, :c], [:e, :f]]
"""
@spec match(Set.t(), ETS.match_pattern(), non_neg_integer()) ::
{:ok, {[tuple()], any() | :end_of_table}} | {:error, any()}
def match(%Set{table: table}, pattern, limit), do: Base.match(table, pattern, limit)
@doc """
Same as `match/3` but unwraps or raises on error.
"""
@spec match!(Set.t(), ETS.match_pattern(), non_neg_integer()) ::
{[tuple()], any() | :end_of_table}
def match!(%Set{} = set, pattern, limit), do: unwrap_or_raise(match(set, pattern, limit))
@doc """
Matches next set of records from a match/3 or match/1 continuation.
## Examples
iex> set = Set.new!(ordered: true)
iex> Set.put!(set, [{:a, :b, :c, :d}, {:e, :b, :f, :g}, {:h, :b, :i, :j}])
iex> {:ok, {results, continuation}} = Set.match(set, {:"$1", :b, :"$2", :_}, 2)
iex> results
[[:a, :c], [:e, :f]]
iex> {:ok, {records2, continuation2}} = Set.match(continuation)
iex> records2
[[:h, :i]]
iex> continuation2
:end_of_table
"""
@spec match(any()) :: {:ok, {[tuple()], any() | :end_of_table}} | {:error, any()}
def match(continuation), do: Base.match(continuation)
@doc """
Same as `match/1` but unwraps or raises on error.
"""
@spec match!(any()) :: {[tuple()], any() | :end_of_table}
def match!(continuation), do: unwrap_or_raise(match(continuation))
@doc """
Deletes all records that match the specified pattern.
Always returns `:ok`, regardless of whether anything was deleted or not.
## Examples
iex> set = Set.new!(ordered: true)
iex> Set.put!(set, [{:a, :b, :c, :d}, {:e, :b, :f, :g}, {:h, :i, :j, :k}])
iex> Set.match_delete(set, {:_, :b, :_, :_})
{:ok, set}
iex> Set.to_list!(set)
[{:h, :i, :j, :k}]
"""
@spec match_delete(Set.t(), ETS.match_pattern()) :: {:ok, Set.t()} | {:error, any()}
def match_delete(%Set{table: table} = set, pattern)
when is_atom(pattern) or is_tuple(pattern) do
with :ok <- Base.match_delete(table, pattern) do
{:ok, set}
end
end
@doc """
Same as `match_delete/2` but unwraps or raises on error.
"""
@spec match_delete!(Set.t(), ETS.match_pattern()) :: Set.t()
def match_delete!(%Set{} = set, pattern) when is_atom(pattern) or is_tuple(pattern),
do: unwrap_or_raise(match_delete(set, pattern))
@doc """
Returns records in the specified Set that match the specified pattern.
For more information on the match pattern, see the [erlang documentation](http://erlang.org/doc/man/ets.html#match-2)
## Examples
iex> Set.new!(ordered: true)
iex> |> Set.put!([{:a, :b, :c, :d}, {:e, :c, :f, :g}, {:h, :b, :i, :j}])
iex> |> Set.match_object({:"$1", :b, :"$2", :_})
{:ok, [{:a, :b, :c, :d}, {:h, :b, :i, :j}]}
"""
@spec match_object(Set.t(), ETS.match_pattern()) :: {:ok, [tuple()]} | {:error, any()}
def match_object(%Set{table: table}, pattern) when is_atom(pattern) or is_tuple(pattern),
do: Base.match_object(table, pattern)
@doc """
Same as `match_object/2` but unwraps or raises on error.
"""
@spec match_object!(Set.t(), ETS.match_pattern()) :: [tuple()]
def match_object!(%Set{} = set, pattern) when is_atom(pattern) or is_tuple(pattern),
do: unwrap_or_raise(match_object(set, pattern))
@doc """
Same as `match_object/2` but limits number of results to the specified limit.
## Examples
iex> set = Set.new!(ordered: true)
iex> Set.put!(set, [{:a, :b, :c, :d}, {:e, :b, :f, :g}, {:h, :b, :i, :j}])
iex> {:ok, {results, _continuation}} = Set.match_object(set, {:"$1", :b, :"$2", :_}, 2)
iex> results
[{:a, :b, :c, :d}, {:e, :b, :f, :g}]
"""
@spec match_object(Set.t(), ETS.match_pattern(), non_neg_integer()) ::
{:ok, {[tuple()], any() | :end_of_table}} | {:error, any()}
def match_object(%Set{table: table}, pattern, limit),
do: Base.match_object(table, pattern, limit)
@doc """
Same as `match_object/3` but unwraps or raises on error.
"""
@spec match_object!(Set.t(), ETS.match_pattern(), non_neg_integer()) ::
{[tuple()], any() | :end_of_table}
def match_object!(%Set{} = set, pattern, limit),
do: unwrap_or_raise(match_object(set, pattern, limit))
@doc """
Matches next set of records from a match_object/3 or match_object/1 continuation.
## Examples
iex> set = Set.new!(ordered: true)
iex> Set.put!(set, [{:a, :b, :c, :d}, {:e, :b, :f, :g}, {:h, :b, :i, :j}])
iex> {:ok, {results, continuation}} = Set.match_object(set, {:"$1", :b, :"$2", :_}, 2)
iex> results
[{:a, :b, :c, :d}, {:e, :b, :f, :g}]
iex> {:ok, {records2, continuation2}} = Set.match_object(continuation)
iex> records2
[{:h, :b, :i, :j}]
iex> continuation2
:end_of_table
"""
@spec match_object(any()) :: {:ok, {[tuple()], any() | :end_of_table}} | {:error, any()}
def match_object(continuation), do: Base.match_object(continuation)
@doc """
Same as `match_object/1` but unwraps or raises on error.
"""
@spec match_object!(any()) :: {[tuple()], any() | :end_of_table}
def match_object!(continuation), do: unwrap_or_raise(match_object(continuation))
@spec select(ETS.continuation()) ::
{:ok, {[tuple()], ETS.continuation()} | ETS.end_of_table()} | {:error, any()}
def select(continuation), do: Base.select(continuation)
@spec select!(ETS.continuation()) :: {[tuple()], ETS.continuation()} | ETS.end_of_table()
def select!(continuation) do
unwrap_or_raise(select(continuation))
end
@doc """
Returns records in the specified Set that match the specified match specification.
For more information on the match specification, see the [erlang documentation](http://erlang.org/doc/man/ets.html#select-2)
## Examples
iex> Set.new!(ordered: true)
iex> |> Set.put!([{:a, :b, :c, :d}, {:e, :c, :f, :g}, {:h, :b, :i, :j}])
iex> |> Set.select([{{:"$1", :b, :"$2", :_},[],[:"$$"]}])
{:ok, [[:a, :c], [:h, :i]]}
"""
@spec select(Set.t(), ETS.match_spec()) :: {:ok, [tuple()]} | {:error, any()}
def select(%Set{table: table}, spec) when is_list(spec),
do: Base.select(table, spec)
@doc """
Same as `select/2` but unwraps or raises on error.
"""
@spec select!(Set.t(), ETS.match_spec()) :: [tuple()]
def select!(%Set{} = set, spec) when is_list(spec),
do: unwrap_or_raise(select(set, spec))
@doc """
Same as `select/2` but limits the number of results returned.
"""
@spec select(Set.t(), ETS.match_spec(), limit :: integer) ::
{:ok, {[tuple()], ETS.continuation()} | ETS.end_of_table()} | {:error, any()}
def select(%Set{table: table}, spec, limit) when is_list(spec),
do: Base.select(table, spec, limit)
@doc """
Same as `select/3` but unwraps or raises on error.
"""
@spec select!(Set.t(), ETS.match_spec(), limit :: integer) ::
{[tuple()], ETS.continuation()} | ETS.end_of_table()
def select!(%Set{} = set, spec, limit) when is_list(spec),
do: unwrap_or_raise(select(set, spec, limit))
@doc """
Deletes records in the specified Set that match the specified match specification.
For more information on the match specification, see the [erlang documentation](http://erlang.org/doc/man/ets.html#select_delete-2)
## Examples
iex> set = Set.new!(ordered: true)
iex> set
iex> |> Set.put!([{:a, :b, :c, :d}, {:e, :c, :f, :g}, {:h, :b, :c, :h}])
iex> |> Set.select_delete([{{:"$1", :b, :"$2", :_},[{:"==", :"$2", :c}],[true]}])
{:ok, 2}
iex> Set.to_list!(set)
[{:e, :c, :f, :g}]
"""
@spec select_delete(Set.t(), ETS.match_spec()) :: {:ok, non_neg_integer()} | {:error, any()}
def select_delete(%Set{table: table}, spec) when is_list(spec),
do: Base.select_delete(table, spec)
@doc """
Same as `select_delete/2` but unwraps or raises on error.
"""
@spec select_delete!(Set.t(), ETS.match_spec()) :: non_neg_integer()
def select_delete!(%Set{} = set, spec) when is_list(spec),
do: unwrap_or_raise(select_delete(set, spec))
@doc """
Determines if specified key exists in specified set.
## Examples
iex> set = Set.new!()
iex> Set.has_key(set, :key)
{:ok, false}
iex> Set.put(set, {:key, :value})
iex> Set.has_key(set, :key)
{:ok, true}
"""
@spec has_key(Set.t(), any()) :: {:ok, boolean()} | {:error, any()}
def has_key(%Set{table: table}, key), do: Base.has_key(table, key)
@doc """
Same as `has_key/2` but unwraps or raises on error.
"""
@spec has_key!(Set.t(), any()) :: boolean()
def has_key!(set, key), do: unwrap_or_raise(has_key(set, key))
@doc """
Returns the first key in the specified Set. Set must be ordered or error is returned.
## Examples
iex> set = Set.new!(ordered: true)
iex> Set.first(set)
{:error, :empty_table}
iex> Set.put!(set, {:key1, :val})
iex> Set.put!(set, {:key2, :val})
iex> Set.first(set)
{:ok, :key1}
"""
@spec first(Set.t()) :: {:ok, any()} | {:error, any()}
def first(%Set{ordered: false}), do: {:error, :set_not_ordered}
def first(%Set{table: table}), do: Base.first(table)
@doc """
Same as `first/1` but unwraps or raises on error
"""
@spec first!(Set.t()) :: any()
def first!(%Set{} = set), do: unwrap_or_raise(first(set))
@doc """
Returns the last key in the specified Set. Set must be ordered or error is returned.
## Examples
iex> set = Set.new!(ordered: true)
iex> Set.last(set)
{:error, :empty_table}
iex> Set.put!(set, {:key1, :val})
iex> Set.put!(set, {:key2, :val})
iex> Set.last(set)
{:ok, :key2}
"""
@spec last(Set.t()) :: {:ok, any()} | {:error, any()}
def last(%Set{ordered: false}), do: {:error, :set_not_ordered}
def last(%Set{table: table}), do: Base.last(table)
@doc """
Same as `last/1` but unwraps or raises on error
"""
@spec last!(Set.t()) :: any()
def last!(set), do: unwrap_or_raise(last(set))
@doc """
Returns the next key in the specified Set.
The given key does not need to exist in the set. The key returned will be the first key that exists in the
set which is subsequent in term order to the key given.
Set must be ordered or error is returned.
## Examples
iex> set = Set.new!(ordered: true)
iex> Set.put!(set, {:key1, :val})
iex> Set.put!(set, {:key2, :val})
iex> Set.put!(set, {:key3, :val})
iex> Set.first(set)
{:ok, :key1}
iex> Set.next(set, :key1)
{:ok, :key2}
iex> Set.next(set, :key2)
{:ok, :key3}
iex> Set.next(set, :key3)
{:error, :end_of_table}
iex> Set.next(set, :a)
{:ok, :key1}
iex> Set.next(set, :z)
{:error, :end_of_table}
"""
@spec next(Set.t(), any()) :: {:ok, any()} | {:error, any()}
def next(%Set{ordered: false}, _key), do: {:error, :set_not_ordered}
def next(%Set{table: table}, key), do: Base.next(table, key)
@doc """
Same as `next/1` but unwraps or raises on error
"""
@spec next!(Set.t(), any()) :: any()
def next!(set, key), do: unwrap_or_raise(next(set, key))
@doc """
Returns the previous key in the specified Set.
The given key does not need to exist in the set. The key returned will be the first key that exists in the
set which is previous in term order to the key given.
Set must be ordered or error is returned.
## Examples
iex> set = Set.new!(ordered: true)
iex> Set.put!(set, {:key1, :val})
iex> Set.put!(set, {:key2, :val})
iex> Set.put!(set, {:key3, :val})
iex> Set.last(set)
{:ok, :key3}
iex> Set.previous(set, :key3)
{:ok, :key2}
iex> Set.previous(set, :key2)
{:ok, :key1}
iex> Set.previous(set, :key1)
{:error, :start_of_table}
iex> Set.previous(set, :a)
{:error, :start_of_table}
iex> Set.previous(set, :z)
{:ok, :key3}
"""
@spec previous(Set.t(), any()) :: {:ok, any()} | {:error, any()}
def previous(%Set{ordered: false}, _key), do: {:error, :set_not_ordered}
def previous(%Set{table: table}, key), do: Base.previous(table, key)
@doc """
Same as `previous/1` but raises on :error
Returns previous key in table.
"""
@spec previous!(Set.t(), any()) :: any()
def previous!(%Set{} = set, key), do: unwrap_or_raise(previous(set, key))
@doc """
Returns contents of table as a list.
## Examples
iex> Set.new!(ordered: true)
iex> |> Set.put!({:a, :b, :c})
iex> |> Set.put!({:d, :e, :f})
iex> |> Set.put!({:d, :e, :f})
iex> |> Set.to_list()
{:ok, [{:a, :b, :c}, {:d, :e, :f}]}
"""
@spec to_list(Set.t()) :: {:ok, [tuple()]} | {:error, any()}
def to_list(%Set{table: table}), do: Base.to_list(table)
@doc """
Same as `to_list/1` but unwraps or raises on error.
"""
@spec to_list!(Set.t()) :: [tuple()]
def to_list!(%Set{} = set), do: unwrap_or_raise(to_list(set))
@doc """
Deletes specified Set.
## Examples
iex> {:ok, set} = Set.new()
iex> {:ok, _} = Set.info(set, true)
iex> {:ok, _} = Set.delete(set)
iex> Set.info(set, true)
{:error, :table_not_found}
"""
@spec delete(Set.t()) :: {:ok, Set.t()} | {:error, any()}
def delete(%Set{table: table} = set), do: Base.delete(table, set)
@doc """
Same as `delete/1` but unwraps or raises on error.
"""
@spec delete!(Set.t()) :: Set.t()
def delete!(%Set{} = set), do: unwrap_or_raise(delete(set))
@doc """
Deletes record with specified key in specified Set.
## Examples
iex> set = Set.new!()
iex> Set.put(set, {:a, :b, :c})
iex> Set.delete(set, :a)
iex> Set.get!(set, :a)
nil
"""
@spec delete(Set.t(), any()) :: {:ok, Set.t()} | {:error, any()}
def delete(%Set{table: table} = set, key), do: Base.delete_records(table, key, set)
@doc """
Same as `delete/2` but unwraps or raises on error.
"""
@spec delete!(Set.t(), any()) :: Set.t()
def delete!(%Set{} = set, key), do: unwrap_or_raise(delete(set, key))
@doc """
Deletes all records in specified Set.
## Examples
iex> set = Set.new!()
iex> set
iex> |> Set.put!({:a, :b, :c})
iex> |> Set.put!({:b, :b, :c})
iex> |> Set.put!({:c, :b, :c})
iex> |> Set.to_list!()
[{:c, :b, :c}, {:b, :b, :c}, {:a, :b, :c}]
iex> Set.delete_all(set)
iex> Set.to_list!(set)
[]
"""
@spec delete_all(Set.t()) :: {:ok, Set.t()} | {:error, any()}
def delete_all(%Set{table: table} = set), do: Base.delete_all_records(table, set)
@doc """
Same as `delete_all/1` but unwraps or raises on error.
"""
@spec delete_all!(Set.t()) :: Set.t()
def delete_all!(%Set{} = set), do: unwrap_or_raise(delete_all(set))
@doc """
Wraps an existing :ets :set or :ordered_set in a Set struct.
## Examples
iex> :ets.new(:my_ets_table, [:set, :named_table])
iex> {:ok, set} = Set.wrap_existing(:my_ets_table)
iex> Set.info!(set)[:name]
:my_ets_table
"""
@spec wrap_existing(ETS.table_identifier()) :: {:ok, Set.t()} | {:error, any()}
def wrap_existing(table_identifier) do
case Base.wrap_existing(table_identifier, [:set, :ordered_set]) do
{:ok, {table, info}} ->
{:ok, %Set{table: table, info: info, ordered: info[:type] == :ordered_set}}
{:error, reason} ->
{:error, reason}
end
end
@doc """
Same as `wrap_existing/1` but unwraps or raises on error.
"""
@spec wrap_existing!(ETS.table_identifier()) :: Set.t()
def wrap_existing!(table_identifier), do: unwrap_or_raise(wrap_existing(table_identifier))
@doc """
Transfers ownership of a Set to another process.
## Examples
iex> set = Set.new!()
iex> receiver_pid = spawn(fn -> Set.accept() end)
iex> Set.give_away(set, receiver_pid)
{:ok, set}
iex> set = Set.new!()
iex> dead_pid = ETS.TestUtils.dead_pid()
iex> Set.give_away(set, dead_pid)
{:error, :recipient_not_alive}
"""
@spec give_away(Set.t(), pid(), any()) :: {:ok, Set.t()} | {:error, any()}
def give_away(%Set{table: table} = set, pid, gift \\ []),
do: Base.give_away(table, pid, gift, set)
@doc """
Same as `give_away/3` but unwraps or raises on error.
"""
@spec give_away!(Set.t(), pid(), any()) :: Set.t()
def give_away!(%Set{} = set, pid, gift \\ []),
do: unwrap_or_raise(give_away(set, pid, gift))
@doc """
Waits to accept ownership of a table after it is given away. Successful receipt will
return `{:ok, %{set: set, from: from, gift: gift}}` where `from` is the pid of the previous
owner, and `gift` is any additional metadata sent with the table.
A timeout may be given in milliseconds, which will return `{:error, :timeout}` if reached.
See `give_away/3` for more information.
"""
@spec accept() :: {:ok, Set.t(), pid(), any()} | {:error, any()}
def accept(timeout \\ :infinity) do
with {:ok, table, from, gift} <- Base.accept(timeout),
{:ok, set} <- Set.wrap_existing(table) do
{:ok, %{set: set, from: from, gift: gift}}
end
end
@doc """
For processes which may receive ownership of a Set unexpectedly - either via `give_away/3` or
by being named the Set's heir (see `new/1`) - the module should include at least one `accept`
clause. For example, if we want a server to inherit Sets after their previous owner dies:
```
defmodule Receiver do
use GenServer
alias ETS.Set
require ETS.Set
...
Set.accept :owner_crashed, set, _from, state do
new_state = Map.update!(state, :crashed_sets, &[set | &1])
{:noreply, new_state}
end
```
The first argument is a unique identifier which should match either the "heir_data"
in `new/1`, or the "gift" in `give_away/3`.
The other arguments declare the variables which may be used in the `do` block:
the received Set, the pid of the previous owner, and the current state of the process.
The return value should be in the form {:noreply, new_state}, or one of the similar
returns expected by `handle_info`/`handle_cast`.
"""
defmacro accept(id, table, from, state, do: contents) do
quote do
require Base
Base.accept unquote(id), unquote(table), unquote(from), unquote(state) do
var!(unquote(table)) = Set.wrap_existing!(unquote(table))
unquote(contents)
end
end
end
end
|
lib/ets/set.ex
| 0.920553 | 0.883739 |
set.ex
|
starcoder
|
defmodule RatError.Formatter do
@moduledoc """
Formats a RAT error.
Formatter is used to retrieve the error Map result by formatting the
parameters (error code, message, environment variables and so on) with the
specified Structure (see 'config/*.exs' for detail).
"""
alias RatError.Structure
@env_keys [
:file,
:function,
:line,
:module
]
@doc """
Format a RAT error with the specified Structure.
## Examples
iex> support_keys = %{code: :code, message: :message}
iex> structure = %Structure{node: :err, keys: support_keys}
iex> message = "Bad response!"
iex> Formatter.format(structure, __ENV__, :bad_response, message)
%{err: %{code: :bad_response, message: "Bad response!"}}
iex> support_keys = %{code: :code, message: :message}
iex> structure = %Structure{keys: support_keys}
iex> message = "Out of memory!"
iex> Formatter.format(structure, __ENV__, :no_memory, message)
%{code: :no_memory, message: "Out of memory!"}
"""
def format(
%Structure{} = structure,
%Macro.Env{} = env,
error_code,
error_message
) do
params =
%{}
|> format_code(structure, error_code)
|> format_message(structure, error_message)
|> format_env_values(structure, env)
if node = structure.node do
%{node => params}
else
params
end
end
defp add_field(nil, _value, params), do: params
defp add_field(key, value, params), do: Map.put(params, key, value)
defp format_code(params, structure, value),
do: format_entry(params, structure, :code, value)
defp format_entry(params, structure, key, value) when is_atom(key) do
structure.keys
|> get_field_name(key)
|> add_field(value, params)
end
defp format_env_values(params, structure, env) do
Enum.reduce(
@env_keys,
params,
&format_entry(&2, structure, &1, Map.get(env, &1))
)
end
defp format_message(params, structure, value),
do: format_entry(params, structure, :message, value)
defp get_field_name(nil, _key), do: nil
defp get_field_name(support_keys, key) when is_map(support_keys),
do: support_keys[key]
end
|
lib/rat_error/formatter.ex
| 0.801936 | 0.413448 |
formatter.ex
|
starcoder
|
defmodule GenQueue do
@moduledoc """
A behaviour module for implementing queues.
GenQueue relies on adapters to handle the specifics of how the queues
are run. At its most simple, this can mean simple FIFO queues. At its
most advanced, this can mean full async job queues with retries and
backoffs. By providing a standard interface for such tools - ease in
switching between different implementations is assured.
## Example
The GenQueue behaviour abstracts the common queue interactions.
Developers are only required to implement the callbacks and functionality
they are interested in via adapters.
Let's start with a simple FIFO queue:
defmodule Queue do
use GenQueue
end
# Start the queue
Queue.start_link()
# Push items into the :foo queue
Queue.push(:hello)
#=> {:ok, :hello}
Queue.push(:world)
#=> {:ok, :world}
# Pop items from the :foo queue
Queue.pop()
#=> {:ok, :hello}
Queue.pop()
#=> {:ok, :world}
We start our enqueuer by calling `start_link/1`. This call is then
forwarded to our adapter. In this case, we dont specify an adapter
anywhere, so it defaults to the simple FIFO queue implemented with
the included `GenQueue.Adapters.Simple`.
We can then add items into our simple FIFO queues with `push/2`, as
well as remove them with `pop/1`.
## use GenQueue and adapters
As we can see from above - implementing a simple queue is easy. But
we can further extend our queues by creating our own adapters or by using
external libraries. Simply specify the adapter name in your config.
config :my_app, MyApp.Enqueuer, [
adapter: GenQueue.MyAdapter
]
defmodule MyApp.Enqueuer do
use GenQueue, otp_app: :my_app
end
We can then create our own adapter by creating an adapter module that handles
the callbacks specified by `GenQueue.Adapter`.
defmodule MyApp.MyAdapter do
use GenQueue.Adapter
def handle_push(gen_queue, item) do
IO.inspect(item)
{:ok, item}
end
end
## Current adapters
Currently, the following adapters are available:
* [GenQueue Exq](https://github.com/nsweeting/gen_queue_exq) - Redis-backed job queue.
* [GenQueue TaskBunny](https://github.com/nsweeting/gen_queue_task_bunny) - RabbitMQ-backed job queue.
* [GenQueue Verk](https://github.com/nsweeting/gen_queue_verk) - Redis-backed job queue.
* [GenQueue OPQ](https://github.com/nsweeting/gen_queue_opq) - GenStage-backed job queue.
## Job queues
One of the benefits of using `GenQueue` is that it can abstract common tasks
like job enqueueing. We can then provide a common API for the various forms
of job enqueing we would like to implement, as well as easily swap
implementations.
Please refer to the documentation for each adapter for more details.
"""
@callback start_link(opts :: Keyword.t()) ::
{:ok, pid}
| {:error, {:already_started, pid}}
| {:error, term}
@doc """
Invoked to push an item to a queue
## Parameters:
* `item` - Any valid term
* `opts` - Any options that may be valid to an adapter
## Returns:
* `{:ok, item}` if the operation was successful
* `{:error, reason}` if there was an error
"""
@callback push(item :: any, opts :: Keyword.t()) :: {:ok, any} | {:error, any}
@doc """
Same as `push/2` but returns the item or raises if an error occurs.
"""
@callback push!(item :: any, opts :: Keyword.t()) :: any | no_return
@doc """
Invoked to pop an item from a queue
Parameters:
* `opts` - Any options that may be valid to an adapter
## Returns:
* `{:ok, item}` if the operation was successful
* `{:error, reason}` if there was an error
"""
@callback pop(opts :: Keyword.t()) :: {:ok, any} | {:error, any}
@doc """
Same as `pop/1` but returns the item or raises if an error occurs.
"""
@callback pop!(opts :: Keyword.t()) :: any | no_return
@doc """
Invoked to remove all items from a queue
Parameters:
* `opts` - Any options that may be valid to an adapter
## Returns:
* `{:ok, number_of_items_removed}` if the operation was successful
* `{:error, reason}` if there was an error
"""
@callback flush(opts :: Keyword.t()) :: {:ok, integer} | {:error, any}
@doc """
Invoked to get the number of items in a queue
Parameters:
* `opts` - Any options that may be valid to an adapter
## Returns:
* `{:ok, number_of_items}` if the operation was successful
* `{:error, reason}` if there was an error
"""
@callback length(opts :: Keyword.t()) :: {:ok, integer} | {:error, any}
@doc """
Invoked to return the adapter for a queue
"""
@callback adapter :: GenQueue.Adapter.t()
@type t :: module
@default_adapter GenQueue.Adapters.Simple
defmacro __using__(opts) do
quote bind_quoted: [opts: opts] do
@behaviour GenQueue
@adapter GenQueue.config_adapter(__MODULE__, opts)
def child_spec(arg) do
%{
id: __MODULE__,
start: {__MODULE__, :start_link, [arg]}
}
end
defoverridable child_spec: 1
def start_link(opts \\ []) do
apply(@adapter, :start_link, [__MODULE__, opts])
end
def push(item, opts \\ []) do
apply(@adapter, :handle_push, [__MODULE__, item, opts])
end
def push!(item, opts \\ []) do
case push(item, opts) do
{:ok, item} -> item
_ -> raise GenQueue.Error, "Failed to push item."
end
end
def pop(opts \\ []) do
apply(@adapter, :handle_pop, [__MODULE__, opts])
end
def pop!(opts \\ []) do
case pop(opts) do
{:ok, item} -> item
_ -> raise GenQueue.Error, "Failed to pop item."
end
end
def flush(opts \\ []) do
apply(@adapter, :handle_flush, [__MODULE__, opts])
end
def length(opts \\ []) do
apply(@adapter, :handle_length, [__MODULE__, opts])
end
def adapter do
@adapter
end
end
end
@doc """
Get the adapter for a GenQueue module based on the options provided. If
no adapter if specified, the default `GenQueue.Adapters.Simple` is returned.
Parameters:
* `gen_queue` - GenQueue module to use
"""
@spec config_adapter(GenQueue.t(), opts :: Keyword.t()) :: GenQueue.Adapter.t()
def config_adapter(gen_queue, opts \\ []) do
opts
|> Keyword.get(:otp_app)
|> Application.get_env(gen_queue, [])
|> Keyword.get(:adapter, @default_adapter)
end
end
|
lib/gen_queue.ex
| 0.906723 | 0.538983 |
gen_queue.ex
|
starcoder
|
defmodule StarkInfra.IssuingBin do
alias __MODULE__, as: IssuingBin
alias StarkInfra.Utils.Rest
alias StarkInfra.Utils.Check
alias StarkInfra.User.Project
alias StarkInfra.User.Organization
alias StarkInfra.Error
@moduledoc """
Groups IssuingBin related functions
"""
@doc """
The IssuingBin object displays information of BINs registered to your Workspace.
They represent a group of cards that begin with the same numbers (BIN) and offer the same product to end customers.
## Attributes (return-only):
- `:id` [string]: unique BIN number registered within the card network. ex: "53810200"
- `:network` [string]: card network flag. ex: "mastercard"
- `:settlement` [string]: settlement type. ex: "credit"
- `:category` [string]: purchase category. ex: "prepaid"
- `:client` [string]: client type. ex: "business"
- `:updated` [DateTime]: latest update DateTime for the IssuingBin. ex: ~U[2020-3-10 10:30:0:0]
- `:created` [DateTime]: creation datetime for the IssuingBin. ex: ~U[2020-03-10 10:30:0:0]
"""
@enforce_keys [
:id,
:network,
:settlement,
:category,
:client,
:updated,
:created
]
defstruct [
:id,
:network,
:settlement,
:category,
:client,
:updated,
:created
]
@type t() :: %__MODULE__{}
@doc """
Receive a stream of IssuingBin structs previously registered in the Stark Infra API
## Options:
- `:limit` [integer, default nil]: maximum number of structs to be retrieved. Unlimited if nil. ex: 35
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- stream of IssuingBin structs with updated attributes
"""
@spec query(
limit: integer,
user: Project.t() | Organization.t() | nil
) ::
{ :ok, [IssuingBin.t()] } |
{ :error, [error: Error.t()] }
def query(options \\ []) do
Rest.get_list(resource(), options)
end
@doc """
Same as query(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec query!(
limit: integer,
user: Project.t() | Organization.t() | nil
) :: any
def query!(options \\ []) do
Rest.get_list!(resource(), options)
end
@doc """
Receive a list of up to 100 IssuingBin structs previously registered in the Stark Infra API and the cursor to the next page.
## Options:
- `:cursor` [string, default nil]: cursor returned on the previous page function call
- `:limit` [integer, default 100]: maximum number of structs to be retrieved. Unlimited if nil. ex: 35
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- list of IssuingBin structs with updated attributes
- cursor to retrieve the next page of IssuingBin structs
"""
@spec page(
cursor: binary,
limit: integer,
user: Project.t() | Organization.t() | nil
) ::
{ :ok, {binary, [IssuingBin.t()]}} |
{ :error, [error: Error.t()] }
def page(options \\ []) do
Rest.get_page(resource(), options)
end
@doc """
\Same as page(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec page!(
cursor: binary,
limit: integer,
user: Project.t() | Organization.t() | nil
) :: any
def page!(options \\ []) do
Rest.get_page!(resource(), options)
end
@doc false
def resource() do
{
"IssuingBin",
&resource_maker/1
}
end
@doc false
def resource_maker(json) do
%IssuingBin{
id: json[:id],
network: json[:network],
settlement: json[:settlement],
category: json[:category],
client: json[:client],
updated: json[:updated] |> Check.datetime(),
created: json[:created] |> Check.datetime()
}
end
end
|
lib/issuing_bin/issuing_bin.ex
| 0.858541 | 0.595257 |
issuing_bin.ex
|
starcoder
|
defmodule Flect.Logger do
@moduledoc """
Provides logging facilities for the various Flect tools.
If the `:flect_event_pid` application configuration key is set for the
`:flect` application, log messages will be sent as `{:flect_stdout, msg}`
(where `msg` is a binary) to that PID instead of being printed to standard
output.
Note also that if `:flect_event_pid` is set, the current terminal is
not ANSI-compatible, or the `FLECT_COLORS` environment variable is set to
`0`, colored output will be disabled.
If the `FLECT_DIAGS` environment variable is not set to `0`, the various
functions in this module will output caret diagnostics when a source
location is provided.
"""
@spec colorize(String.t(), String.t()) :: String.t()
defp colorize(str, color, sep // ":", term // " ") do
emit = IO.ANSI.terminal?() && :application.get_env(:flect, :flect_event_pid) == :undefined && System.get_env("FLECT_COLORS") != "0"
IO.ANSI.escape_fragment("%{#{color}, bright}#{str}#{sep}%{reset}#{term}", emit)
end
@spec output(String.t()) :: :ok
defp output(str) do
_ = case :application.get_env(:flect, :flect_event_pid) do
{:ok, pid} -> pid <- {:flect_stdout, str <> "\n"}
:undefined -> IO.puts(str)
end
:ok
end
@spec output_diag(Flect.Compiler.Syntax.Location.t() | nil) :: :ok
defp output_diag(loc) do
if loc && System.get_env("FLECT_DIAGS") != "0" && (diag = diagnostic(loc)) do
output(diag)
end
end
@spec output_diags([{String.t(), Flect.Compiler.Syntax.Location.t() | nil}]) :: :ok
defp output_diags(notes) do
Enum.each(notes, fn({msg, loc}) ->
output(colorize("Note", "blue") <> stringize_loc(loc) <> colorize(msg, "white", "", ""))
output_diag(loc)
end)
end
@spec stringize_loc(Flect.Compiler.Syntax.Location.t() | nil) :: String.t()
defp stringize_loc(loc) do
if loc do
"#{loc.stringize()}: "
else
""
end
end
@doc """
Prints an informational message. Returns `:ok`.
`str` must be a binary containing the message.
"""
@spec info(String.t()) :: :ok
def info(str) do
output(str)
end
@doc """
Prints a warning message. Colorized as yellow and white. Returns `:ok`.
`str` must be a binary containing the message. `loc` should be either `nil`
or a `Flect.Compiler.Syntax.Location` if printing annotated source code
is desirable. `notes` is a list of extra locations to print notes at.
"""
@spec warn(String.t(), Flect.Compiler.Syntax.Location.t() | nil, [{String.t(), Flect.Compiler.Syntax.Location.t() | nil}]) :: :ok
def warn(str, loc // nil, notes // []) do
output(colorize("Warning", "yellow") <> stringize_loc(loc) <> colorize(str, "white", "", ""))
output_diag(loc)
output_diags(notes)
end
@doc """
Prints an error message. Colorized as red and white. Returns `:ok`.
`str` must be a binary containing the message. `loc` should be either `nil`
or a `Flect.Compiler.Syntax.Location` if printing annotated source code
is desirable. `notes` is a list of extra locations to print notes at.
"""
@spec error(String.t(), Flect.Compiler.Syntax.Location.t() | nil, [{String.t(), Flect.Compiler.Syntax.Location.t() | nil}]) :: :ok
def error(str, loc // nil, notes // []) do
output(colorize("Error", "red") <> stringize_loc(loc) <> colorize(str, "white", "", ""))
output_diag(loc)
output_diags(notes)
end
@doc """
Prints a log message. Colorized as cyan and white. Returns `:ok`.
`str` must be a binary containing the message. `loc` should be either `nil`
or a `Flect.Compiler.Syntax.Location` if printing annotated source code
is desirable.
"""
@spec log(String.t()) :: :ok
def log(str) do
output(colorize("Log", "cyan") <> colorize(str, "white", "", ""))
end
@doc """
Prints a debug message if the `FLECT_DEBUG` environment variable is set
to `1`. Colorized as magenta and white. Returns `:ok`.
`str` must be a binary containing the message.
"""
@spec debug(String.t()) :: :ok
def debug(str) do
if System.get_env("FLECT_DEBUG") == "1" do
output(colorize("Debug", "magenta") <> colorize(str, "white", "", ""))
end
end
@spec diagnostic(Flect.Compiler.Syntax.Location.t()) :: String.t() | nil
defp diagnostic(loc) do
loc_line = loc.line() - 1
classify = fn(i) ->
cond do
i == loc_line -> true
i > loc_line - 3 && i < loc_line -> :prev
i < loc_line + 3 && i > loc_line -> :next
true -> nil
end
end
case File.read(loc.file()) do
{:ok, data} ->
lines = data |>
String.split("\n") |>
Enum.with_index() |>
Enum.map(fn({x, i}) -> {x, classify.(i)} end) |>
Enum.filter(fn({_, t}) -> t != nil end)
# If any of the lines contain non-printable characters, bail and don't print anything.
if Enum.any?(lines, fn({x, _}) -> !String.printable?(x) end) do
nil
else
prev = lines |> Enum.filter(fn({_, t}) -> t == :prev end) |> Enum.map(fn({x, _}) -> x end)
line = lines |> Enum.filter(fn({_, t}) -> t == true end) |> Enum.first() |> elem(0)
next = lines |> Enum.filter(fn({_, t}) -> t == :next end) |> Enum.map(fn({x, _}) -> x end)
# If the leading and/or following lines are just white space, don't output them.
if Enum.all?(prev, fn(x) -> String.strip(x) == "" end), do: prev = []
if Enum.all?(next, fn(x) -> String.strip(x) == "" end), do: next = []
marker = generate_marker(line, loc.column() - 1, 0, "")
result = prev ++ [line] ++ [marker] ++ next
length = length(result)
result |>
Enum.with_index() |>
Enum.map(fn({x, i}) -> if i == length - 1, do: x, else: x <> "\n" end) |> Enum.join()
end
{:error, _} -> nil
end
end
@spec generate_marker(String.t(), non_neg_integer(), non_neg_integer(), String.t()) :: String.t()
defp generate_marker(line, col, ccol, acc) do
case String.next_codepoint(line) do
{cp, rest} ->
if ccol == col do
c = colorize("^", "green", "")
else
c = if cp == "\t", do: "\t", else: " "
end
generate_marker(rest, col, ccol + 1, acc <> c)
:no_codepoint ->
if acc != "" do
acc
else
colorize("^", "green", "")
end
end
end
end
|
lib/logger.ex
| 0.857321 | 0.549882 |
logger.ex
|
starcoder
|
defmodule Prelude.Map do
@moduledoc "Functions operating on `maps`."
@doc """
Group a map by an array of keys
Provide a list of maps, and a list of keys to group by. All maps must
have all the group_by fields, other fields can vary.
For example:
iex> Prelude.Map.group_by(
...> [%{name: "stian", group: 1, cat: 2},
...> %{name: "per", group: 1, cat: 1}],
...> [:group, :cat])
%{1 =>
%{1 => [%{cat: 1, group: 1, name: "per"}],
2 => [%{cat: 2, group: 1, name: "stian"}] } }
"""
def group_by(maps, groups) when is_list(maps) and is_list(groups) do
Enum.reduce(maps, %{}, fn(x, acc)->
extract_and_put(acc, x, groups)
end)
end
defp extract_and_put(map, item, groups) do
path = Enum.map(groups, fn(group)-> Map.get(item, group) end)
deep_put(map, path, [item])
end
@doc """
Put an arbitrarily deep key into an existing map.
Works also with Stucts.
If you want to create lists as values, provide :list as last parameter.
If a value already exists at that level, it is turned into a list
For example:
# it works as expected with empty maps
iex> Prelude.Map.deep_put(%{}, [:a, :b, :c], "0")
%{a: %{b: %{c: "0"}}}
# when provided a deep path, all intermediate items are converted to maps
# this can lead to loss of data, eg:
# a.b.c = 1 is replaced by a map to make path a.b.c.d = 2 possible
iex> Prelude.Map.deep_put(%{a: %{b: %{c: "1"}}}, [:a, :b, :c, :d], "2")
%{a: %{b: %{c: %{d: "2"}}}}
# to collect values in a list, provide :list as last parameter.
iex> Prelude.Map.deep_put(%{a: %{b: %{c: "1"}}}, [:a, :b, :c, :d], "2", :list)
%{a: %{b: %{c: %{d: ["2"]}}}}
# to collect values in a list, provide :list as last parameter.
iex> Prelude.Map.deep_put(%{a: %{b: %{c: ["1"]}}}, [:a, :b, :c], "2", :list)
%{a: %{b: %{c: ["2", "1"]}}}
"""
def deep_put(map, path, val, variation \\ :map)
def deep_put(map=%{__struct__: struct}, path, val, variation) do
map
|> Map.from_struct
|> deep_put(path, val, variation)
|> Map.put(:__struct__, struct)
end
def deep_put(map, path, val, variation) do
state = {map, []}
res = Enum.reduce(path, state, fn(x, {acc, cursor})->
cursor = [ x | cursor ]
final = length(cursor) == length(path)
curr_val = get_in(acc, Enum.reverse(cursor))
newval = new_value(variation, curr_val, val, final)
acc = put_in(acc, Enum.reverse(cursor), newval)
{ acc, cursor }
end)
res |> elem(0)
end
defp new_value(:map, curr_val, val, final) do
case curr_val do
h = %{} -> if final, do: val, else: h
_ -> if final, do: val, else: %{} # override non-map value!
end
end
defp new_value(:list, curr_val, val, final) do
case curr_val do
h when is_list(h) -> [ val | h ]
nil -> if final, do: [val], else: %{}
h = %{} -> if final, do: [val, h], else: h
h -> if final, do: [val, h], else: %{} # overrided non-map values!
end
end
@doc """
To keep the API consistent also a way to get deep nested values.
Works also with Stucts.
"""
def deep_get(map=%{__struct__: _type}, path) do
map
|> Map.from_struct
|> get_in(path)
end
def deep_get(map, path), do: get_in(map, path)
@doc """
Remove a map key arbitrarily deep in a structure, similar to put_in
Works also with Stucts.
For example:
iex> a = %{a: %{b: %{c: %{d: 1, e: 1}}}}
...> Prelude.Map.del_in(a, [:a, :b, :c, :d])
%{a: %{b: %{c: %{e: 1}}}}
"""
def del_in(map=%{__struct__: type}, path) do
map
|> Map.from_struct
|> del_in(path)
|> Map.put(:__struct__, type)
end
def del_in(map, path) do
[item | path] = path |> Enum.reverse
path = path |> Enum.reverse
obj = get_in(map, path)
put_in(map, path, Map.delete(obj, item))
end
@doc "Turns all string map keys into atoms, leaving existing atoms alone (only top level)"
def atomify(map) do
map
|> Enum.map(fn({k,v})-> {Prelude.String.to_atom(k), v} end)
|> Enum.into(%{})
end
@doc "Turns all atom map keys into strings, leaving existing strings alone (only top level)"
def stringify(map) do
map
|> Enum.map(fn({k,v})-> {Prelude.Atom.to_string(k), v} end)
|> Enum.into(%{})
end
@doc "Converts strings to atoms, but leaves existing atoms alone"
def to_atom(x) when is_atom(x), do: x
def to_atom(x) when is_binary(x), do: String.to_atom(x)
@doc "Appends to an array value in a map, creating one if the key does not exist"
def append_list(map, key, val) do
Map.update(map, key, [val], fn(x)-> List.insert_at(x, 0, val) end)
end
@doc "Switch the keys with the values in a map"
def switch(map) when is_map(map) do
map
|> Enum.map(fn({k, v})-> {v, k} end)
|> Enum.into(%{})
end
end
|
lib/prelude/map.ex
| 0.791539 | 0.563348 |
map.ex
|
starcoder
|
defmodule Lua do
alias Lua.{Chunk, Error, State}
@doc "Encodes an Elixir term as a Lua value."
@spec encode(nil | boolean | number | binary | atom) :: nil | boolean | float | binary
def encode(term)
def encode(nil), do: nil
def encode(false), do: false
def encode(true), do: true
def encode(value) when is_integer(value), do: :erlang.float(value)
def encode(value) when is_float(value), do: value
def encode(value) when is_binary(value), do: value
def encode(value) when is_atom(value), do: Atom.to_string(value)
def encode(value) when is_function(value), do: {:function, wrap_function(value)}
@doc "Encodes an Elixir term as a Lua value."
@spec encode(Lua.State.t, nil | boolean | number | binary | atom | map) ::
{Lua.State.t, nil | boolean | float | binary | {:tref, integer}}
def encode(%State{luerl: state}, value) do
{state, result} = _encode(state, value)
{State.wrap(state), result}
end
@spec _encode(tuple, map) :: {tuple, {:tref, integer}}
def _encode(state, value) when is_map(value) do
{tref, state} = :luerl_emul.alloc_table(state)
state = Enum.reduce(value, state, fn({k, v}, state) ->
k = case k do
k when is_atom(k) -> Atom.to_string(k)
k when is_binary(k) -> k
end
{state, v} = _encode(state, v)
:luerl_emul.set_table_key(tref, k, v, state)
end)
{state, tref}
end
@spec _encode(tuple, nil | boolean | number | binary | atom) ::
{tuple, nil | boolean | float | binary}
def _encode(state, value), do: {state, encode(value)}
@doc "Decodes a Lua value as an Elixir term."
@spec decode(nil | boolean | number | binary) :: term
def decode(value)
def decode(nil), do: nil
def decode(false), do: false
def decode(true), do: true
def decode(value) when is_number(value), do: value
def decode(value) when is_binary(value), do: value
def decode(value), do: value # FIXME
@doc "Performs garbage collection."
@spec gc(Lua.State.t) :: Lua.State.t
def gc(%State{luerl: state}) do
State.wrap(:luerl.gc(state))
end
@doc "Interprets a Lua code snippet, discarding any side effects."
@spec eval(Lua.State.t, binary) :: {:ok, any} | {:error, any}
def eval(%State{luerl: state}, code) when is_binary(code) do
:luerl.eval(code, state)
end
@doc "Interprets a Lua code snippet, discarding any side effects."
@spec eval!(Lua.State.t, binary) :: any
def eval!(%State{luerl: state}, code) when is_binary(code) do
case :luerl.eval(code, state) do
{:ok, result} -> result
{:error, reason} -> raise Error, reason: reason, message: inspect(reason)
end
end
@doc "Interprets a Lua source file, discarding any side effects."
@spec eval_file(Lua.State.t, binary) :: {:ok, any} | {:error, any}
def eval_file(%State{luerl: state}, filepath) when is_binary(filepath) do
:luerl.evalfile(filepath |> String.to_charlist, state)
end
@doc "Interprets a Lua source file, discarding any side effects."
@spec eval_file!(Lua.State.t, binary) :: any
def eval_file!(%State{luerl: state}, filepath) when is_binary(filepath) do
case :luerl.evalfile(filepath |> String.to_charlist, state) do
{:ok, result} -> result
{:error, reason} -> raise Error, reason: reason, message: inspect(reason)
end
end
@doc "Interprets a Lua code snippet, for its side effects."
@spec exec!(Lua.State.t, binary) :: Lua.State.t
def exec!(%State{luerl: state}, code) when is_binary(code) do
{_, state} = :luerl.do(code, state)
State.wrap(state)
end
@doc "Interprets a Lua source file, for its side effects."
@spec exec_file!(Lua.State.t, binary) :: Lua.State.t
def exec_file!(%State{luerl: state}, filepath) when is_binary(filepath) do
{_, state} = :luerl.dofile(filepath |> String.to_charlist, state)
State.wrap(state)
end
@doc "Compiles a Lua code snippet into a chunk."
@spec load(Lua.State.t, binary) :: {:ok, Lua.State.t, Lua.Chunk.t} | {:error, any, any}
def load(%State{luerl: state}, code) do
case :luerl.load(code, state) do
{:ok, function, state} ->
{:ok, State.wrap(state), %Chunk{luerl: function}}
error -> error
end
end
@doc "Compiles a Lua code snippet into a chunk."
@spec load!(Lua.State.t, binary) :: {Lua.State.t, Lua.Chunk.t}
def load!(%State{luerl: state}, code) do
case :luerl.load(code, state) do
{:ok, function, state} ->
{State.wrap(state), %Chunk{luerl: function}}
{:error, reason, _} ->
raise Error, reason: reason, message: inspect(reason)
end
end
@doc "Compiles a Lua source file into a chunk."
@spec load_file(Lua.State.t, binary) :: {:ok, Lua.State.t, Lua.Chunk.t} | {:error, any, any}
def load_file(%State{luerl: state}, filepath) do
case :luerl.loadfile(filepath |> String.to_charlist, state) do
{:ok, function, state} ->
{:ok, State.wrap(state), %Chunk{luerl: function}}
error -> error
end
end
@doc "Compiles a Lua source file into a chunk."
@spec load_file!(Lua.State.t, binary) :: {Lua.State.t, Lua.Chunk.t}
def load_file!(%State{luerl: state}, filepath) do
case :luerl.loadfile(filepath |> String.to_charlist, state) do
{:ok, function, state} ->
{State.wrap(state), %Chunk{luerl: function}}
{:error, reason, _} ->
raise Error, reason: reason, message: inspect(reason)
end
end
@doc "Calls a Lua compiled chunk."
@spec call_chunk!(Lua.State.t, Lua.Chunk.t, [any]) :: {Lua.State.t, [any]}
def call_chunk!(%State{luerl: state}, %Chunk{luerl: chunk}, args \\ []) when is_list(args) do
case :luerl.call_chunk(chunk, args, state) do
{result, state} -> {State.wrap(state), result}
end
end
def call_function!(state, name, args \\ [])
@doc "Calls a Lua function."
@spec call_function!(Lua.State.t, atom, [any]) :: {Lua.State.t, [any]}
def call_function!(%State{} = state, name, args) when is_atom(name) and is_list(args) do
call_function!(state, [name], args)
end
@doc "Calls a Lua function."
@spec call_function!(Lua.State.t, [atom], [any]) :: {Lua.State.t, [any]}
def call_function!(%State{luerl: state}, name, args) when is_list(name) and is_list(args) do
case :luerl.call_function(name, args, state) do
{result, state} -> {State.wrap(state), result}
end
end
@doc "Returns the value of a global variable."
@spec get_global(Lua.State.t, atom) :: {Lua.State.t, any}
def get_global(%State{} = state, name) when is_atom(name) do
get_global(state, name |> Atom.to_string)
end
@doc "Returns the value of a global variable."
@spec get_global(Lua.State.t, binary) :: {Lua.State.t, any}
def get_global(%State{luerl: state}, name) when is_binary(name) do
{result, state} = :luerl_emul.get_global_key(name, state)
{State.wrap(state), result}
end
@doc "Sets the value of a global variable."
@spec set_global(Lua.State.t, atom, any) :: Lua.State.t
def set_global(%State{} = state, name, value) when is_atom(name) do
set_global(state, name |> Atom.to_string, value)
end
@doc "Sets the value of a global variable."
@spec set_global(Lua.State.t, binary, any) :: Lua.State.t
def set_global(%State{luerl: state}, name, value) when is_binary(name) do
{state, value} = _encode(state, value)
State.wrap(:luerl_emul.set_global_key(name, value, state))
end
@doc "Returns the value of a table index."
@spec get_table(Lua.State.t, [atom]) :: {Lua.State.t, any}
def get_table(%State{luerl: state}, name) when is_list(name) do
{result, state} = :luerl.get_table(name, state)
{State.wrap(state), result}
end
@doc "Sets a table index to the given value."
@spec set_table(Lua.State.t, [atom], any) :: Lua.State.t
def set_table(%State{luerl: state}, name, value) when is_list(name) do
name = Enum.map(name, &Atom.to_string/1)
{state, value} = _encode(state, value)
State.wrap(:luerl_emul.set_table_keys(name, value, state))
end
@doc "Sets the value of the package.path global variable."
@spec set_package_path(Lua.State.t, binary) :: Lua.State.t
def set_package_path(%State{} = state, path) when is_binary(path) do
set_table(state, [:package, :path], path)
end
@doc "Attempts to load a package of the given name."
@spec require!(Lua.State.t, binary) :: {Lua.State.t, [any]}
def require!(%State{} = state, package_name) when is_binary(package_name) do
call_function!(state, :require, [package_name])
end
@spec wrap_function(([term] -> nil | [term])) :: fun
defp wrap_function(function) when is_function(function, 1) do
fn inputs, state ->
inputs = inputs |> Enum.map(&decode/1)
case function.(inputs) do
nil -> {[], state}
outputs when is_list(outputs) ->
{outputs |> Enum.map(&encode/1), state}
end
end
end
@spec wrap_function((Lua.State.t, [term] -> nil | [term] | {Lua.State.t, [term]})) :: fun
defp wrap_function(function) when is_function(function, 2) do
# ExLua's callback calling convention is effectively the reverse of Luerl's.
fn inputs, state ->
inputs = inputs |> Enum.map(&decode/1)
case function.(State.wrap(state), inputs) do
{%State{luerl: state}, nil} -> {[], state}
{%State{luerl: state}, outputs} when is_list(outputs) ->
{outputs |> Enum.map(&encode/1), state}
outputs when is_list(outputs) ->
{outputs |> Enum.map(&encode/1), state}
end
end
end
end
|
lib/lua.ex
| 0.821975 | 0.479077 |
lua.ex
|
starcoder
|
defmodule ConsulKv.Client do
@moduledoc """
The client for Consul KV store.
There are several configuration options for the client:
- consul_recv_timeout (default: 5000)
the timeout for receive response from the server side
- consul_connect_timeout (default: 5000)
the timeout for connect consul server
- consul_kv_address
the address of consul KV store
"""
use Tesla
adapter Tesla.Adapter.Hackney,
recv_timeout: Application.get_env(:consul_kv, :consul_recv_timeout, 5000),
connect_timeout: Application.get_env(:consul_kv, :consul_connect_timeout, 5000)
plug Tesla.Middleware.BaseUrl, Application.fetch_env!(:consul_kv, :consul_kv_address)
plug Tesla.Middleware.JSON
@doc """
This interface updates the value of the specified key. If no key exists at the given path,
the key will be created.
The key should be a string, and value could be a string or any types which could be encode
to json. The query parameters could be `Keyword`:
- dc (default: "")
Specifies the datacenter.
- flags (default: 0)
Specifies an unsigned value between 0 and (2^64)-1.
- cas (default: 0)
Specifies to use a `Check-And-Set` operation. If the index is 0, Consul will only put
the key if it does not already exist. If the index is non-zero, the key is only set if
the index matches the `ModifyIndex` of that key.
- acquire (default: "")
Supply a session ID to use in a lock acquisition operation. This is useful as it allows
leader election to be built on top of Consul.
- release (default: "")
Supply a session ID to use in a release operation.
- ns (default: "")
Specifies the namespace to query.
"""
@spec put_kv(String.t(), any(), Keyword.t()) :: {:ok, true} | {:error, any()}
def put_kv(key, value, query_params \\ []) do
key
|> put(value, query: query_params)
|> case do
{:ok, %{status: 200}} -> {:ok, true}
{:ok, other_status} -> {:error, other_status}
other -> other
end
end
@doc """
Return the specified key. If no key exists at the given path, a 404 is returned instead of
a 200 response.
The key should be a string, the query parameters could be `Keyword`:
- dc (default: "")
Specifies the datacenter.
- recurse (default: false)
Specifies to delete all keys which have the specified prefix.
Without this, only a key with an extract match will be deleted.
- raw (default: false)
Specifies the response is just the raw value of the key, without any encoding or metadata.
- keys (default: false)
Specifies to return only keys (no values or metadata).
- separator (default: "")
Specifies the string to use as a separator for recursive key lookups.
- ns (default: "")
Specifies the namespace to query.
"""
@spec get_kv(String.t(), Keyword.t()) :: {:ok, [ConsulKv.t()]} | {:error, any()}
def get_kv(key, query_params \\ []) do
key
|> get(query: query_params)
|> case do
{:ok, %{status: 200, body: body}} -> {:ok, parse_get_kv_body(body)}
{:ok, %{status: 404}} -> {:error, :not_found}
{:ok, other_status} -> {:error, other_status}
other -> other
end
end
@doc false
defp parse_get_kv_body(body) do
Enum.map(body, &parse_body/1)
end
@doc false
defp parse_body(item) when is_map(item) do
%ConsulKv{
key: Map.get(item, "Key"),
flags: Map.get(item, "Flags"),
value: decode_value(Map.get(item, "Value")),
lock_index: Map.get(item, "LockIndex"),
session: Map.get(item, "Session"),
create_index: Map.get(item, "CreateIndex"),
modify_index: Map.get(item, "ModifyIndex")
}
end
defp parse_body(item), do: item
@doc false
defp decode_value(nil), do: nil
defp decode_value(value), do: Base.decode64!(value)
@doc """
Delete a single key or all keys sharing a prefix.
The key should be a string, the query parameters could be `Keyword`:
- dc (default: "")
Specifies the datacenter.
- recurse (default: false)
Specifies to delete all keys which have the specified prefix.
Without this, only a key with an extract match will be deleted.
- cas (default: 0)
Specifies to use a Check-And-Set operation
- ns (default: "")
Specifies the namespace to query.
"""
@spec delete_kv(String.t(), Keyword.t()) :: {:ok, true} | {:error, any()}
def delete_kv(key, query_params \\ []) do
key
|> delete(query: query_params)
|> case do
{:ok, %{status: 200}} -> {:ok, true}
{:ok, other_status} -> {:error, other_status}
other -> other
end
end
end
|
lib/consul_kv/client.ex
| 0.856663 | 0.433682 |
client.ex
|
starcoder
|
defmodule ChoreRunner.Chore do
@moduledoc """
Behaviour and DSL for chores.
"""
require ChoreRunner.DSL
alias ChoreRunner.{DSL, Input}
defstruct id: nil,
mod: nil,
logs: [],
values: %{},
task: nil,
reporter: nil,
started_at: nil,
finished_at: nil,
result: nil
defmacro __using__(_args), do: DSL.using()
@type unix_timestamp :: integer()
@type t :: %__MODULE__{
id: String.t(),
mod: module(),
logs: [{unix_timestamp, String.t()}],
values: %{atom() => number()},
task: Task.t(),
reporter: pid(),
started_at: DateTime.t(),
finished_at: DateTime.t(),
result: any()
}
@doc """
An optional callback function for defining a chore restriction.
The restriction can be either :none, :self, or :global
- `:none` is no restrictions
- `:self` prevents more than one of the same chore from running simultaneously across all connected nodes
- `:global` prevents more than one of all chores with the restriction `:global` from running simultaneously across all connected nodes. This restriction does not affect non-`:global` chores.
If this callback is not defined, the default return is `:self`
"""
@callback restriction :: :none | :self | :global
@doc """
An optional callback function for defining a chore's inputs.
Expects a list of input function calls.
The input functions provided are `string`, `int`, `float`, `file`, and `bool`.
All input functions follow the same syntax.
For example:
```
def inputs do
[
string(:name),
int(:name2, [some: :option])
]
end
```
The supported options are
- `:description` — a string description of the input, for UI use
- `:validators` — a list of anonymous or captured validator functions.
Valiator functions should accept a single argument as a parameter, but can return a variety of things, including:
- an `{:ok, value}`, or `{:error, reason}` tuple
- an `:ok` or `:error` atom
- a `true` or `false`
- any erlang value, or nil
The positive values (`:ok`, `true`, non-falsey values) pass validation.
The negative values (`:error`, `false`, `nil`) fail validation
If a value is passed back as part of an {:ok, value} tuple, or by itself, that value is treated as the new value of the given input. This way, validators can also transform input if needed.
If this callback is not defined, the default return is `[]`, or no inputs.
"""
@callback inputs :: [Input.t()]
@doc """
A non-optional callback used to contain the main Chore logic.
Accepts a map of input, always atom keyed. (When calling ChoreRunner.run_chore/2, a string keyed map will be intelligently converted to an atom-keyed map automatically)
Only keys defined in the `inputs/0` callback will be present in the input map, but defined inputs are not garaunteed to be present.
The chore callback has access to several `Reporter` functions, used for live chore metrics and loggin.
These functions are:
- `log(message)` — Logs a string message with timestamp
- `set_counter(name, value)` — Sets a named counter, expects an atom for a name and a number for a value
- `inc_counter(name, inc_value)` — Increments a named counter. If the counter does not exist, it will default to 0, and then be incremented. Used negative values for decrements.
- `report_failed(reason_message)` — Fails a chore, marking it as failed.
The return value of the `run/1` callback will be stored in the chore struct and forwarded to the final chore handling function.
"""
@callback run(map()) :: {:ok, any()} | {:error, any()}
def validate_input(%__MODULE__{mod: mod}, input) do
expected_inputs = mod.inputs
Enum.reduce(input, {%{}, []}, fn {key, val}, {validated_inputs, errors_acc} ->
with {:ok, {type, name, opts}} <- verify_valid_input_name(expected_inputs, key),
{:ok, validated_value} <- validate_input(name, val, type, opts) do
{Map.put(validated_inputs, name, validated_value), errors_acc}
else
{:error, :invalid_input_name} ->
{validated_inputs, errors_acc}
{:error, name, errors} ->
{validated_inputs, [{name, errors} | errors_acc]}
end
end)
|> case do
{final_inputs, []} -> {:ok, final_inputs}
{_, errors} -> {:error, errors}
end
end
defp verify_valid_input_name(expected_inputs, key) do
Enum.find_value(expected_inputs, fn {type, name, opts} ->
if name == key or "#{name}" == key do
{:ok, {type, name, opts}}
else
false
end
end)
|> case do
nil -> {:error, :invalid_input_name}
{:ok, res} -> {:ok, res}
end
end
defp validate_input(name, value, type, opts) do
[(&Input.validate_field(type, &1)) | Keyword.get(opts, :validators, [])]
|> Enum.reduce({value, []}, fn validator, {val, errors} ->
case validator.(val) do
{:ok, validated_value} -> {validated_value, errors}
:ok -> {val, errors}
true -> {val, errors}
{:error, reason} -> {val, [reason | errors]}
false -> {val, ["invalid" | errors]}
nil -> {val, ["invalid" | errors]}
other -> {other, errors}
end
end)
|> case do
{final_value, [] = _no_errors} -> {:ok, final_value}
{_invalid, errors} -> {:error, name, errors}
end
end
end
|
lib/chore_runner/chore.ex
| 0.86053 | 0.884039 |
chore.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.