code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
defmodule Erlef.Agenda do
@moduledoc false
@board_ics "https://user.fm/calendar/v1-d950fe3b2598245f424e3ddbff1a674a/Board%20Public.ics"
# 2 minutes
@check_interval 120_000
require Logger
use GenServer
alias Erlef.Agenda.Parser
# Client
@spec start_link(Keyword.t()) :: :ignore | {:error, term()} | {:ok, pid()}
def start_link(opts) do
check_interval = Keyword.get(opts, :check_interval, @check_interval)
GenServer.start_link(__MODULE__, {sys_now(), check_interval})
end
@spec get_combined() :: {:ok, String.t()} | {:error, term()}
def get_combined() do
case :ets.lookup(__MODULE__, :all) do
[{:all, ics}] ->
{:ok, ics}
[] ->
case get_calendars() do
ics when is_binary(ics) -> {:ok, ics}
end
end
end
# Server
@impl true
def init(opts) do
{:ok, opts, {:continue, :init}}
end
@impl true
def handle_continue(:init, state) do
_tid = :ets.new(__MODULE__, ets_acl())
:ets.insert(__MODULE__, {:all, get_calendars()})
schedule_check(state)
{:noreply, state}
end
@impl true
def handle_info(:check, state) do
:ets.insert(__MODULE__, {:all, get_calendars()})
schedule_check(state)
{:noreply, state}
end
def handle_info(_, state), do: {:noreply, state}
# Private
defp get_calendars() do
wgs = Erlef.Groups.list_working_groups()
wgs
|> Enum.filter(fn wg -> not is_nil(wg.meta.public_calendar) end)
|> Enum.map(fn wg -> wg.meta.public_calendar end)
|> get_feeds()
end
defp get_feeds(links) do
[@board_ics | links]
|> Enum.reduce([], fn l, acc ->
case Erlef.HTTP.perform(:get, l, [], "", []) do
{:ok, res} ->
[res.body | acc]
err ->
# credo:disable-for-lines:3
if Erlef.in_env?([:dev, :prod]) do
Logger.warning(fn -> "Error getting feed #{l} : #{inspect(err)}" end)
end
acc
end
end)
|> Parser.combine(name: "ErlEF Public Calendars")
end
defp schedule_check({start, interval}) do
:erlang.send_after(next_check(start, interval), self(), :check)
end
defp sys_now() do
:erlang.monotonic_time(:millisecond)
end
defp next_check(start, interval) do
interval - rem(sys_now() - start, interval)
end
defp ets_acl() do
case Erlef.is_env?(:test) do
true ->
[:named_table, :public, {:write_concurrency, true}]
false ->
[:named_table, :protected, {:write_concurrency, true}]
end
end
end
|
lib/erlef/agenda.ex
| 0.620507 | 0.432663 |
agenda.ex
|
starcoder
|
defmodule TinyEctoHelperMySQL do
@moduledoc """
Documentation for TinyEctoHelperMySQL
"""
@doc """
get columns lists in queryable
from(
q in Question,
select: {q.id, q.title, q.body}
) |> TinyEctoHelperMySQL.get_select_keys()
|
|
|
[:id, :title, :body]
"""
@error_not_queryable {:error, :not_queryable}
@error_no_select {:error, :not_found_select}
def get_select_keys(query) when is_map(query) do
case get_select(query) do
{:error, msg} ->
{:error, msg}
{:ok, select} ->
{_, _, expr} = select.expr
case expr do
[] ->
@error_no_select
expr ->
keys =
expr
|> Enum.map(fn x ->
{{_, _, [_, key]}, _, _} = x
key
end)
{:ok, keys}
end
end
end
def get_select_keys(_), do: get_select_keys()
def get_select_keys() do
@error_not_queryable
end
@doc """
## Examples
Issue query to MySQL with queryable AND `SELECT SQL_CALC_FOUND_ROWS`, then return the result and the count returned from `SELECT FOUND_ROWS()` like following:
query =
from(
q in Question,
select: {q.id, q.title, q.body, q.user_id, q.inserted_at, q.updated_at},
order_by: [asc: q.id]
)
{:ok, select_keys} = TinyEctoHelperMySQL.get_select_keys(query)
TinyEctoHelperMySQL.query_and_found_rows(query, select_keys, [Repo, %Question{}, Question])
|
|
|
{:ok, %{results: results, count: count}}
"""
def query_and_found_rows(query, keys, [repo, struct, model]) do
{:ok, [results, count]} =
repo.transaction(fn ->
[inject_sql, values] = build_inject_sql(repo, query)
results =
Ecto.Adapters.SQL.query!(repo, inject_sql, values).rows
|> merge_result_keys(keys, [struct, model])
[[count]] = Ecto.Adapters.SQL.query!(repo, "SELECT FOUND_ROWS()").rows
[results, count]
end)
{:ok, %{results: results, count: count}}
end
defp get_select(query) when is_map(query) do
if not Map.has_key?(query, :__struct__) do
@error_not_queryable
else
select = Map.get(query, :select)
# select exists, but it may be nil(e.g. select: {})
if select == %{} or is_nil(select) do
@error_no_select
else
{:ok, select}
end
end
end
defp get_select(_), do: @error_not_queryable
defp build_inject_sql(repo, query) do
{sql, values} = repo.to_sql(:all, query)
inject_sql = String.replace_prefix(sql, "SELECT ", "SELECT SQL_CALC_FOUND_ROWS ")
[inject_sql, values]
end
# build maps such as {id: 1, title: "xxxx"}
defp merge_result_keys(raw_result, keys, [struct, model]) do
raw_result
|> Enum.map(fn x ->
key =
Enum.zip(keys, x)
|> Enum.reduce(%{}, fn {key, value}, acc ->
Map.put(acc, key, value)
end)
# call validation just in case.
if not model.changeset(struct, key).valid? do
raise "validation error: #{model}"
end
Map.merge(struct, key)
end)
end
end
|
lib/tiny_ecto_helper_mysql.ex
| 0.725065 | 0.440409 |
tiny_ecto_helper_mysql.ex
|
starcoder
|
defmodule Niesso.Assertion do
@moduledoc """
SAML assertion returned from the IdP upon succesful authentication.
"""
import SweetXml
use Timex
alias Niesso.Assertion
defstruct uid: "",
attributes: [],
success: false,
expires_at: nil
@type t :: %__MODULE__{
uid: String.t(),
attributes: [map()],
success: boolean(),
expires_at: DateTime.t()
}
@protocol "urn:oasis:names:tc:SAML:2.0:protocol"
@assertion "urn:oasis:names:tc:SAML:2.0:assertion"
@success "urn:oasis:names:tc:SAML:2.0:status:Success"
@doc """
This function takes as input raw XML comprising a SAML authorization
response, and from this data populates and returns an Assertion struct
containing this data.
"""
def from_xml(xml) do
doc = parse(xml, namespace_conformant: true)
attrs =
doc
|> xmap(
uid:
~x"//saml2p:Response/saml2:Assertion/saml2:Subject/saml2:NameID/text()"s
|> add_namespace("saml2p", @protocol)
|> add_namespace("saml2", @assertion),
attributes: [
~x"//saml2p:Response/saml2:Assertion/saml2:AttributeStatement/saml2:Attribute"l
|> add_namespace("saml2p", @protocol)
|> add_namespace("saml2", @assertion),
name: ~x"./@Name"s,
value: ~x"./saml2:AttributeValue/text()"s |> add_namespace("saml2", @assertion)
]
)
success =
doc
|> xpath(
~x"//saml2p:Response/saml2p:Status/saml2p:StatusCode/@Value"s
|> add_namespace("saml2p", @protocol)
|> add_namespace("saml2", @assertion)
) == @success
{:ok, timestamp} =
Timex.parse(
doc
|> xpath(
~x"//saml2p:Response/saml2:Assertion/saml2:Subject/saml2:SubjectConfirmation/saml2:SubjectConfirmationData/@NotOnOrAfter"s
|> add_namespace("saml2p", @protocol)
|> add_namespace("saml2", @assertion)
),
"{ISO:Extended}"
)
attrs =
Map.merge(attrs, %{
success: success,
expires_at: timestamp
})
{:ok, struct(Assertion, attrs)}
end
end
|
lib/assertion.ex
| 0.766468 | 0.500183 |
assertion.ex
|
starcoder
|
defmodule BinaryNode do
@enforce_keys [:value]
defstruct value: nil,
right: nil,
left: nil
@type t :: %__MODULE__{
value: integer(),
right: BinaryNode.t(),
left: BinaryNode.t()
}
@doc """
Create new Node
#Example
iex> BinaryNode.new(1)
%BinaryNode{value: 1}
iex> BinaryNode.new(2)
%BinaryNode{value: 2}
"""
def new(value, left \\ nil, right \\ nil) do
struct!(__MODULE__, value: value, left: left, right: right)
end
def left(%__MODULE__{left: left}) do
left
end
def right(%__MODULE__{right: right}) do
right
end
@doc """
Returns true if node is a leaf node
#Example
iex> node = BinaryNode.new(2, BinaryNode.new(1))
iex> BinaryNode.is_leaf(node)
false
iex> node = BinaryNode.new(1)
iex> BinaryNode.is_leaf(node)
true
"""
def is_leaf(%__MODULE__{right: nil, left: nil}) do
true
end
def is_leaf(%__MODULE__{}) do
false
end
end
defmodule BinaryTree do
defstruct root: nil
@type t :: %__MODULE__{
root: BinaryNode.t()
}
@doc """
Create a new binary tree
#Example
iex> BinaryTree.new()
%BinaryTree{root: nil}
"""
def new() do
struct!(__MODULE__, root: nil)
end
@doc """
Create a binary tree from list
#Example
iex> t = BinaryTree.from_list([4,5,3,1])
iex> BinaryTree.in_order_list(t)
[1,3,4,5]
"""
def from_list(list) when is_list(list) do
Enum.reduce(list, new(), fn e, t ->
insert(t, e)
end)
end
def insert(%__MODULE__{root: nil} = t, value) do
%{t | root: BinaryNode.new(value)}
end
def insert(%__MODULE__{root: root} = t, value) do
%{t | root: insert_with_node(root, value)}
end
@doc """
Find a node with a the given value in the tree
#Example
iex> t = BinaryTree.from_list([4,5,3,1,2,0])
iex> BinaryTree.find(t, 5)
%BinaryNode{value: 5}
iex> t = BinaryTree.from_list([4,5,3,1,2,0])
iex> BinaryTree.find(t, 6)
nil
"""
def find(%__MODULE__{root: root}, value) do
do_find(root, value)
end
@doc """
Return nodes as inorder list
#Example
iex> t = BinaryTree.new()
iex> t = BinaryTree.insert(t, 4)
iex> t = BinaryTree.insert(t, 3)
iex> t = BinaryTree.insert(t, 5)
iex> BinaryTree.in_order_list(t)
[3,4,5]
"""
def in_order_list(%{root: root}) do
do_to_list_in_order(root, [])
end
@doc """
Return nodes as pre list
#Example
iex> t = BinaryTree.from_list([4, 3, 5])
iex> BinaryTree.pre_order_list(t)
[4,3,5]
"""
def pre_order_list(%{root: root}) do
do_pre_order_list(root, [])
end
@doc """
Return nodes as post list
#Example
iex> t = BinaryTree.from_list([4, 3, 5, 1, 0, 2])
iex> BinaryTree.post_order_list(t)
[0,2,1,3,5,4]
"""
def post_order_list(%{root: root}) do
do_post_order_list(root, [])
end
@doc """
Returns dfs list of nodes in tree
#Example
iex> t = BinaryTree.from_list([4,3,6,5,2,8])
iex> BinaryTree.dfs_list(t)
[4, 3, 2, 6,5,8]
"""
def dfs_list(%{root: root}) do
dfs([root], [])
end
@doc """
Get nodes in tree as bfs list
#Example
iex> t = BinaryTree.from_list([4,3,5,1,2])
iex> BinaryTree.bfs_list(t)
[4,3,5,1,2]
"""
def bfs_list(%{root: root}) do
bfs([root], [])
end
@doc """
Delete a node with the given value from the tree
#Example
# iex> t = BinaryTree.from_list([4,3])
# iex> t = BinaryTree.delete(t, 3)
# iex> BinaryTree.in_order_list(t)
# [4]
iex> t = BinaryTree.from_list([4,3,6,8])
iex> t = BinaryTree.delete(t, 8)
iex> BinaryTree.in_order_list(t)
[3, 4, 6]
iex> t = BinaryTree.from_list([4,3,6,8])
iex> t = BinaryTree.delete(t, 6)
iex> BinaryTree.in_order_list(t)
[3, 4, 8]
iex> t = BinaryTree.from_list([4,3,6,8])
iex> t = BinaryTree.delete(t, 3)
iex> BinaryTree.in_order_list(t)
[4, 6, 8]
iex> t = BinaryTree.from_list([4,3,6,7])
iex> t = BinaryTree.delete(t, 4)
iex> BinaryTree.in_order_list(t)
[3, 6, 7]
iex> t = BinaryTree.from_list([4,3,6,1,2,7])
iex> t = BinaryTree.delete(t, 3)
iex> BinaryTree.bfs_list(t)
[4, 1, 6,2,7]
"""
def delete(%__MODULE__{root: nil} = t, _value) do
t
end
def delete(%__MODULE__{root: root} = t, value) do
# IO.inspect(root, label: "Old root")
new_root = find_and_delete(root, value)
# IO.inspect(new_root, label: "New root")
%{t | root: new_root}
end
# PRIVATE FUNCTIONS
defp find_min_node(%{left: nil} = node) do
node
end
defp find_min_node(%{left: left}) do
find_min_node(left)
end
defp find_and_delete(node, value) do
# IO.inspect([node, value], label: "Delet node")
cond do
is_nil(node) ->
nil
node.value > value ->
# IO.inspect([node.left, value], label: "Look in left sub tree")
%{node | left: find_and_delete(node.left, value)}
node.value < value ->
# IO.inspect([node.right, value], label: "Look in right sub tree")
%{node | right: find_and_delete(node.right, value)}
node.value == value ->
cond do
is_nil(node.left) and is_nil(node.right) ->
# IO.inspect([node, value], label: "Case 1")
nil
is_nil(node.left) ->
# IO.inspect([node, value], label: "Case 2a")
node.right
is_nil(node.right) ->
# IO.inspect([node, value], label: "Case 2b")
node.left
true ->
min_node = find_min_node(node.right)
new_node = %{node | value: min_node.value}
%{new_node | right: find_and_delete(node.right, min_node.value)}
end
end
end
defp do_find(nil, _value) do
nil
end
defp do_find(%{value: v} = node, value) do
cond do
v == value ->
node
v > value ->
do_find(node.left, value)
true ->
do_find(node.right, value)
end
end
defp do_to_list_in_order(nil, acc) do
acc
end
defp do_to_list_in_order(%{left: left, right: right} = node, acc) do
l1 = do_to_list_in_order(left, acc)
l2 = List.insert_at(l1, -1, node.value)
do_to_list_in_order(right, l2)
end
defp do_post_order_list(nil, acc) do
acc
end
defp do_post_order_list(%{left: left, right: right} = node, acc) do
l1 = do_post_order_list(left, acc)
l2 = do_post_order_list(right, l1)
List.insert_at(l2, -1, node.value)
end
defp do_pre_order_list(nil, acc) do
acc
end
defp do_pre_order_list(%{left: left, right: right} = node, acc) do
l0 = List.insert_at(acc, -1, node.value)
l1 = do_pre_order_list(left, l0)
do_pre_order_list(right, l1)
end
defp bfs([], acc) do
:lists.reverse(acc)
end
defp bfs(queue, acc) when is_list(queue) do
n = hd(queue)
# IO.inspect(n.value, label: "Current node")
child_nodes =
cond do
is_nil(n.left) and is_nil(n.right) ->
[]
is_nil(n.right) ->
[n.left]
is_nil(n.left) ->
[n.right]
true ->
[n.left, n.right]
end
new_queue = List.flatten([tl(queue), child_nodes])
bfs(new_queue, [n.value | acc])
end
defp dfs([], acc) do
:lists.reverse(acc)
end
defp dfs(stack, acc) do
n = hd(stack)
new_stack =
cond do
is_nil(n.left) and is_nil(n.right) ->
tl(stack)
is_nil(n.right) ->
[n.left | tl(stack)]
is_nil(n.left) ->
[n.right | tl(stack)]
true ->
[n.left | [n.right | tl(stack)]]
end
dfs(new_stack, [n.value | acc])
end
defp insert_with_node(%{value: node_value} = node, value)
when node_value > value do
if is_nil(node.left) do
%{node | left: BinaryNode.new(value)}
else
%{node | left: insert_with_node(node.left, value)}
end
end
defp insert_with_node(%{value: node_value} = node, value)
when node_value <= value do
if is_nil(node.right) do
%{node | right: BinaryNode.new(value)}
else
%{node | right: insert_with_node(node.right, value)}
end
end
end
|
lib/binary_tree.ex
| 0.887741 | 0.603815 |
binary_tree.ex
|
starcoder
|
defmodule Arrow.Type do
@moduledoc """
Conveniences for working with types.
A type is a two-element tuple with the name and the size.
The first element must be one of followed by the respective
sizes:
* `:s` - signed integer (8, 16, 32, 64)
* `:u` - unsigned integer (8, 16, 32, 64)
* `:f` - float (32, 64)
* `:utf8` - string # TODO
"""
@type t ::
{:s, 8}
| {:s, 16}
| {:s, 32}
| {:s, 64}
# {:u, 1} -> boolean
| {:u, 1}
| {:u, 8}
| {:u, 16}
| {:u, 32}
| {:u, 64}
| {:f, 32}
| {:f, 64}
# {:utf8, 32} is for a GenericStringArray<i32> on Rust side
| {:utf8, 32}
# Timestamp(Microsecond, None)
| {:timestamp_us, 64}
# Date(Days)
| {:date, 32}
@doc """
Returns the minimum possible value for the given type.
"""
def min_value_binary(type)
def min_value_binary({:s, 8}), do: <<-128::8-signed-native>>
def min_value_binary({:s, 16}), do: <<-32678::16-signed-native>>
def min_value_binary({:s, 32}), do: <<-2_147_483_648::32-signed-native>>
def min_value_binary({:s, 64}), do: <<-9_223_372_036_854_775_808::64-signed-native>>
def min_value_binary({:u, size}), do: <<0::size(size)-native>>
def min_value_binary({:bf, 16}), do: <<0xFF80::16-native>>
def min_value_binary({:f, 32}), do: <<0xFF7FFFFF::32-native>>
def min_value_binary({:f, 64}), do: <<0xFFEFFFFFFFFFFFFF::64-native>>
@doc """
Returns the minimum possible value for the given type.
"""
def max_value_binary(type)
def max_value_binary({:s, 8}), do: <<127::8-signed-native>>
def max_value_binary({:s, 16}), do: <<32677::16-signed-native>>
def max_value_binary({:s, 32}), do: <<2_147_483_647::32-signed-native>>
def max_value_binary({:s, 64}), do: <<9_223_372_036_854_775_807::64-signed-native>>
def max_value_binary({:u, 8}), do: <<255::8-native>>
def max_value_binary({:u, 16}), do: <<65535::16-native>>
def max_value_binary({:u, 32}), do: <<4_294_967_295::32-native>>
def max_value_binary({:u, 64}), do: <<18_446_744_073_709_551_615::64-native>>
def max_value_binary({:bf, 16}), do: <<0x7F80::16-native>>
def max_value_binary({:f, 32}), do: <<0x7F7FFFFF::32-native>>
def max_value_binary({:f, 64}), do: <<0x7FEFFFFFFFFFFFFF::64-native>>
@doc """
Infers the type of the given value.
The value may be a number, boolean, or an arbitrary list with
any of the above. Integers are by default signed and of size 64.
Floats have size of 64. Booleans are unsigned integers of size 1
(also known as predicates).
In case mixed types are given, the one with highest space
requirements is used (i.e. float > brain floating > integer > boolean).
## Examples
iex> Nx.Type.infer([1, 2, 3])
{:s, 64}
iex> Nx.Type.infer([[1, 2], [3, 4]])
{:s, 64}
iex> Nx.Type.infer([1.0, 2.0, 3.0])
{:f, 32}
iex> Nx.Type.infer([1, 2.0])
{:f, 32}
iex> Nx.Type.infer([])
{:f, 32}
iex> Nx.Type.infer("string")
** (ArgumentError) cannot infer the numerical type of "string"
"""
def infer(value) do
case infer(value, -1) do
-1 -> {:f, 32}
0 -> {:u, 1}
1 -> {:s, 64}
2 -> {:f, 32}
{:utf8, 32} -> {:utf8, 32}
{:timestamp_us, 64} -> {:timestamp_us, 64}
{:date, 32} -> {:date, 32}
end
end
defp infer(arg, _inferred) when is_binary(arg), do: {:utf8, 32}
defp infer(%DateTime{} = _arg, _inferred), do: {:timestamp_us, 64}
defp infer(%Date{} = _arg, _inferred), do: {:date, 32}
defp infer(arg, inferred) when is_list(arg), do: Enum.reduce(arg, inferred, &infer/2)
defp infer(arg, inferred) when is_boolean(arg), do: max(inferred, 0)
defp infer(arg, inferred) when is_integer(arg), do: max(inferred, 1)
defp infer(arg, inferred) when is_float(arg), do: max(inferred, 2)
defp infer(nil, inferred), do: max(inferred, 0)
defp infer(other, _inferred),
do: raise(ArgumentError, "cannot infer the numerical type of #{inspect(other)}")
@doc """
Validates the given type tuple.
It returns the type itself or raises.
## Examples
iex> Arrow.Type.normalize!({:u, 8})
{:u, 8}
iex> Arrow.Type.normalize!({:u, 0})
** (ArgumentError) invalid numerical type: {:u, 0} (see Arrow.Type docs for all supported types)
iex> Arrow.Type.normalize!({:k, 8})
** (ArgumentError) invalid numerical type: {:k, 8} (see Arrow.Type docs for all supported types)
"""
def normalize!(type) do
case validate(type) do
:error ->
raise ArgumentError,
"invalid numerical type: #{inspect(type)} (see Arrow.Type docs for all supported types)"
type ->
type
end
end
defp validate({:s, size} = type) when size in [8, 16, 32, 64], do: type
defp validate({:u, size} = type) when size in [1, 8, 16, 32, 64], do: type
defp validate({:f, size} = type) when size in [32, 64], do: type
defp validate({:utf8, size} = type) when size in [32, 64], do: type
defp validate({:timestamp_us, size} = type) when size in [64], do: type
defp validate({:date, size} = type) when size in [32], do: type
defp validate({:bf, size} = type) when size in [16], do: type
defp validate(_type), do: :error
end
|
lib/arrow/type.ex
| 0.667906 | 0.439928 |
type.ex
|
starcoder
|
defmodule Genome.Sequence do
alias Genome.Nucleotide
def from_enumerable(stream), do: stream |> Enum.map(&Nucleotide.encode/1)
def from_string(string), do: string |> to_charlist() |> from_enumerable()
def to_string(seq), do: seq |> Enum.map(&Nucleotide.decode/1) |> Kernel.to_string()
def encode(seq), do: seq |> Integer.undigits(4)
def decode(hash, k), do: hash |> Integer.digits(4) |> :string.right(k, 0)
def pattern_count(seq, pattern, acc \\ 0)
def pattern_count(seq, pattern, acc) when length(pattern) > length(seq), do: acc
def pattern_count(seq, pattern, acc) do
pattern_count(tl(seq), pattern, acc + (if Enum.take(seq, length(pattern)) == pattern, do: 1, else: 0))
end
def frequent_patterns(seq, k) do
{patterns, _max_count} =
seq
|> frequencies(k)
|> Enum.reduce({[], 0}, fn
{encoded_pattern, count}, {_, winning_count} when count > winning_count ->
{MapSet.new([decode(encoded_pattern, k)]), count}
{encoded_pattern, count}, {patterns, count} ->
{MapSet.put(patterns, decode(encoded_pattern, k)), count}
_, acc ->
acc
end)
patterns
end
def reverse_complement(seq), do: seq |> Enum.map(&Nucleotide.reverse/1) |> Enum.reverse()
def pattern_matches(seq, pattern, index \\ 0, acc \\ [])
def pattern_matches(seq, pattern, _, acc) when length(pattern) > length(seq), do: acc
def pattern_matches(seq, pattern, index, acc) do
k = length(pattern)
kmer = Enum.take(seq, k)
new_acc = if kmer == pattern, do: [index | acc], else: acc
pattern_matches(tl(seq), pattern, index + 1, new_acc)
end
def frequencies(seq, k, acc \\ %{}) do
with kmer <- Enum.take(seq, k),
^k <- Enum.count(kmer) do
frequencies(tl(seq), k, Map.update(acc, encode(kmer), 1, & &1 + 1))
else
_ -> acc
end
end
def clumps(seq, k, window_size, saturation) do
seq
|> Stream.chunk(window_size, 1)
|> Stream.map(& &1 |> :array.from_list())
|> Enum.reduce({MapSet.new(), nil}, fn window, {patterns, freqs_template} ->
{freqs, candidate_freqs} =
case freqs_template do
nil ->
with freqs = frequencies(window |> :array.to_list(), k),
do: {freqs, freqs}
value ->
with encoded_last_kmer = window |> array_slice(window_size - k..window_size - 1) |> encode(),
freqs = Map.update(value, encoded_last_kmer, 1, & &1 + 1),
do: {freqs, [{encoded_last_kmer, Map.get(freqs, encoded_last_kmer)}]}
end
new_patterns =
candidate_freqs
|> Enum.filter_map(
fn {_, freq} -> freq >= saturation end,
fn {pattern, _} -> pattern end
)
|> Enum.into(MapSet.new())
|> MapSet.union(patterns)
{_, next_freqs} =
freqs
|> Map.get_and_update!(window |> array_slice(0..k - 1) |> encode, &(if &1 == 1, do: :pop, else: {nil, &1 - 1}))
{new_patterns, next_freqs}
end)
|> elem(0)
end
def skews(seq) do
seq
|> Enum.reduce([0], fn
1, [prev | skews] -> [prev - 1, prev | skews]
2, [prev | skews] -> [prev + 1, prev | skews]
_, [prev | skews] -> [prev, prev | skews]
end)
|> Enum.reverse()
end
def minimum_skews(seq) do
seq
|> skews()
|> Enum.with_index()
|> Enum.sort()
|> Enum.reduce_while(nil, fn
{skew, index}, nil ->
{:cont, {skew, [index]}}
{skew, index}, {skew, acc} ->
{:cont, {skew, [index | acc]}}
_, {_, acc} ->
{:halt, Enum.reverse(acc)}
end)
end
defp array_slice(array, range) do
range |> Enum.map(&:array.get(&1, array))
end
end
|
lib/genome/sequence.ex
| 0.642545 | 0.554651 |
sequence.ex
|
starcoder
|
defmodule Datamusex do
@moduledoc """
Elixir wrapper for the free [Datamuse](https://www.datamuse.com/api/) API.
## Exaple usage:
Datamusex.similar_meaning("computer")
|> Datamusex.triggered_by("device")
|> Datamusex.get_words
"""
defmodule ParamList do
@enforce_keys [:params]
defstruct params: []
end
defmodule Param do
@enforce_keys [:name, :value]
defstruct [:name, :value]
end
[
:similar_meaning,
:sound_like,
:spelled_similarly,
:rhyme_with,
:used_to_describe,
:often_follow,
:triggered_by,
:synonyme
]
|> Enum.each(fn name ->
def unquote(name)(acc \\ %ParamList{params: []}, words) when is_binary(words) do
%ParamList{
params: [%Param{name: unquote(name), value: words |> process_words} | acc.params]
}
end
end)
@doc """
Builds params from `%Datamusex.Paramlist{}`, executes request and returns response
from datamuse.com.
See [https://www.datamuse.com/api/](https://www.datamuse.com/api/)
for further info.
## Example:
Datamusex.similar_meaning("computer")
|> Datamusex.triggered_by("device")
|> Datamusex.get_words
"""
def get_words(param_list = %ParamList{}, headers \\ [], options \\ []) do
params_from_param_list = param_list |> param_list_to_httpoison_params
options =
options |> Keyword.update(:params, params_from_param_list, &(&1 ++ params_from_param_list))
Datamusex.API.get(
"words",
headers,
options
)
end
@doc """
Auto complete suggestions.
See [https://www.datamuse.com/api/](https://www.datamuse.com/api/)
for further info.
## Example:
Datamusex.get_suggestions("car")
"""
def get_suggestions(words, headers \\ [], options \\ [])
when is_binary(words) and is_list(headers) and is_list(options) do
default_params = [s: process_words(words)]
options = options |> Keyword.update(:params, default_params, &(&1 ++ default_params))
Datamusex.API.get(
"sug",
headers,
options
)
end
defp process_words(words) when is_binary(words) do
words
|> String.split(" ")
|> Enum.join("+")
end
defp param_list_to_httpoison_params(%ParamList{params: params}) do
params |> Enum.map(¶m_to_param_tuple/1)
end
defp param_to_param_tuple(%Param{name: name, value: value}) do
tuple_name =
case name do
:similar_meaning -> :ml
:sound_like -> :sl
:spelled_similarly -> :sp
:rhyme_with -> :rel_rhy
:used_to_describe -> :rel_jjb
:often_follow -> :lc
:triggered_by -> :rel_trg
:synonyme -> :rel_syn
end
{tuple_name, value}
end
end
|
lib/datamusex.ex
| 0.765243 | 0.52476 |
datamusex.ex
|
starcoder
|
defmodule Aoc2021.Day10 do
@moduledoc """
See https://adventofcode.com/2021/day/10
"""
@type token() :: char()
defmodule Reader do
@moduledoc false
@spec read_input(Path.t()) :: [[Aoc2021.Day10.token()]]
def read_input(path) do
path
|> File.stream!()
|> Stream.map(&String.trim/1)
|> Stream.reject(&empty_line?/1)
|> Stream.map(&tokens/1)
|> Enum.to_list()
end
defp empty_line?(""), do: true
defp empty_line?(_), do: false
defp tokens(line) do
String.to_charlist(line)
end
end
defmodule Parser do
@moduledoc false
@spec parse([Aoc2021.Day10.token()]) ::
:ok
| {:incomplete, [Aoc2021.Day10.token()]}
| {:error, {:unexpected_token, Aoc2021.Day10.token(), non_neg_integer()}}
def parse(line) do
result =
line
|> Enum.with_index()
|> Enum.reduce_while([], &step/2)
case result do
[] -> :ok
[_ | _] = stack -> {:incomplete, stack}
{:error, reason} -> {:error, reason}
end
end
defp step({token, _pos}, stack) when token in [?(, ?{, ?[, ?<],
do: {:cont, [token | stack]}
defp step({?), _pos}, [?( | rest]), do: {:cont, rest}
defp step({?}, _pos}, [?{ | rest]), do: {:cont, rest}
defp step({?], _pos}, [?[ | rest]), do: {:cont, rest}
defp step({?>, _pos}, [?< | rest]), do: {:cont, rest}
defp step({token, pos}, _stack), do: {:halt, {:error, {:unexpected_token, token, pos}}}
end
@spec solve_part1() :: non_neg_integer()
@spec solve_part1(Path.t()) :: non_neg_integer()
def solve_part1(path \\ "priv/day10/input.txt") do
path
|> Reader.read_input()
|> Enum.map(&Parser.parse/1)
|> Enum.map(&error_score/1)
|> Enum.sum()
end
defp error_score({:error, {:unexpected_token, token, _pos}}) do
error_token_score(token)
end
defp error_score(_), do: 0
defp error_token_score(?)), do: 3
defp error_token_score(?]), do: 57
defp error_token_score(?}), do: 1197
defp error_token_score(?>), do: 25_137
@spec solve_part2() :: non_neg_integer()
@spec solve_part2(Path.t()) :: non_neg_integer()
def solve_part2(path \\ "priv/day10/input.txt") do
path
|> Reader.read_input()
|> Enum.map(&Parser.parse/1)
|> Enum.filter(&incomplete?/1)
|> Enum.map(&autocomplete_score/1)
|> median()
end
defp median(scores) do
scores
|> Enum.sort()
|> Enum.at(div(length(scores), 2))
end
defp incomplete?({:incomplete, _}), do: true
defp incomplete?(_), do: false
defp autocomplete_score({:incomplete, stack}) do
Enum.reduce(stack, 0, &autocomplete_token_score/2)
end
defp autocomplete_token_score(token, acc) do
score = token |> closing_token() |> incomplete_token_score()
5 * acc + score
end
defp incomplete_token_score(?)), do: 1
defp incomplete_token_score(?]), do: 2
defp incomplete_token_score(?}), do: 3
defp incomplete_token_score(?>), do: 4
defp closing_token(?(), do: ?)
defp closing_token(?[), do: ?]
defp closing_token(?{), do: ?}
defp closing_token(?<), do: ?>
end
|
lib/aoc2021/day10.ex
| 0.671147 | 0.447823 |
day10.ex
|
starcoder
|
defmodule DarkMatter.Inflections do
@moduledoc """
General utils for working with case conversions.
"""
@moduledoc since: "1.0.0"
alias DarkMatter.Namings.AbsintheNaming
alias DarkMatter.Namings.PhoenixNaming
@typedoc """
Available inflection conversions
"""
@type conversion() ::
:camel
| :human
| :title
| :pascal
| :plural
| :singular
| :constant
| :underscore
| :absinthe_camel
| :absinthe_pascal
@conversions [
:camel,
:human,
:title,
:pascal,
:plural,
:constant,
:singular,
:underscore,
:absinthe_camel,
:absinthe_pascal
]
@doc """
Converts either an atom or string to an atom based on the `conversion`.
## Examples
iex> atom("_foo_bar123XYZ", :absinthe_camel)
:_fooBar123XYZ
iex> atom(:fooBarz___test, :camel)
:fooBarzTest
"""
@spec atom(atom() | String.t(), conversion() | [conversion(), ...]) :: atom()
def atom(atom_or_binary, conversion_or_conversions)
when (is_atom(atom_or_binary) or is_binary(atom_or_binary)) and
(conversion_or_conversions in @conversions or is_list(conversion_or_conversions)) do
atom_or_binary
|> binary(conversion_or_conversions)
|> String.to_atom()
end
@doc """
Converts either an atom or string to a string based on the `conversion`.
## Examples
iex> binary("_foo_bar123XYZ", :absinthe_pascal)
"_FooBar123XYZ"
iex> binary("_foo_bar123XYZ", :absinthe_camel)
"_fooBar123XYZ"
iex> binary(:fooBarz___TESTPDF, :pascal)
"FooBarzTestpdf"
iex> binary(:fooBarz___TESTPDF, :camel)
"fooBarzTestpdf"
iex> binary(:HTTP_PdfTEST, :underscore)
"http_pdf_test"
iex> binary("buses", :singular)
"bus"
iex> binary("business", :plural)
"businesses"
iex> binary("MerchantBusiness", [:plural, :underscore])
"merchant_businesses"
"""
@spec binary(atom() | String.t(), conversion() | [conversion(), ...]) :: String.t()
def binary(binary, :camel) when is_binary(binary), do: camelize(binary)
def binary(binary, :human) when is_binary(binary), do: humanize(binary)
def binary(binary, :title) when is_binary(binary), do: titleize(binary)
def binary(binary, :pascal) when is_binary(binary), do: pascalize(binary)
def binary(binary, :plural) when is_binary(binary), do: pluralize(binary)
def binary(binary, :constant) when is_binary(binary), do: constantize(binary)
def binary(binary, :singular) when is_binary(binary), do: singularize(binary)
def binary(binary, :underscore) when is_binary(binary), do: underscore(binary)
def binary(binary, :absinthe_camel) when is_binary(binary), do: absinthe_camelize(binary)
def binary(binary, :absinthe_pascal) when is_binary(binary), do: absinthe_pascalize(binary)
def binary(atom_or_binary, conversion_or_conversions)
when (is_atom(atom_or_binary) or is_binary(atom_or_binary)) and
(conversion_or_conversions in @conversions or
(is_list(conversion_or_conversions) and conversion_or_conversions != [])) do
binary = if is_atom(atom_or_binary), do: Atom.to_string(atom_or_binary), else: atom_or_binary
for conversion <- List.wrap(conversion_or_conversions),
conversion in @conversions,
reduce: binary do
acc -> binary(acc, conversion)
end
end
@doc """
Returns the camel case version of the string
## Examples
iex> absinthe_camelize("foo_bar")
"fooBar"
iex> absinthe_camelize("foo")
"foo"
iex> absinthe_camelize("__foo_bar")
"__fooBar"
iex> absinthe_camelize("__foo")
"__foo"
iex> absinthe_camelize("_foo")
"_foo"
"""
@spec absinthe_camelize(String.t()) :: String.t()
def absinthe_camelize(binary) when is_binary(binary) do
AbsintheNaming.camelize(binary, lower: true)
end
@doc """
Returns the pascal case version of the string
## Examples
iex> absinthe_pascalize("foo_bar")
"FooBar"
iex> absinthe_pascalize("foo")
"Foo"
iex> absinthe_pascalize("__foo_bar")
"__FooBar"
iex> absinthe_pascalize("__foo")
"__Foo"
iex> absinthe_pascalize("_foo")
"_Foo"
"""
@spec absinthe_pascalize(String.t()) :: String.t()
def absinthe_pascalize(binary) when is_binary(binary) do
AbsintheNaming.camelize(binary, lower: false)
end
@doc """
Returns the upper case version of the string
## Examples
iex> constantize("foo_bar")
"FOO_BAR"
iex> constantize("foo")
"FOO"
iex> constantize("__foo_bar")
"FOO_BAR"
iex> constantize("__foo")
"FOO"
iex> constantize("_foo")
"FOO"
"""
@spec constantize(String.t()) :: String.t()
def constantize(binary) when is_binary(binary) do
Recase.to_constant(binary)
end
@doc """
Returns the pascal case version of the string
## Examples
iex> camelize("foo_bar")
"fooBar"
iex> camelize("foo")
"foo"
iex> camelize("__foo_bar")
"fooBar"
iex> camelize("__foo")
"foo"
iex> camelize("_foo")
"foo"
"""
@spec camelize(String.t()) :: String.t()
def camelize(binary) when is_binary(binary) do
Recase.to_camel(binary)
end
@doc """
Returns the humanized case version of the string
## Examples
iex> humanize("foo_bar")
"Foo bar"
iex> humanize("foo")
"Foo"
iex> humanize("__foo_bar")
" foo bar"
iex> humanize("__foo")
" foo"
iex> humanize("_foo")
" foo"
"""
@spec humanize(String.t()) :: String.t()
def humanize(binary) when is_binary(binary) do
PhoenixNaming.humanize(binary)
end
@doc """
Returns the titleize case version of the string
## Examples
iex> titleize("foo_bar")
"Foo Bar"
iex> titleize("foo")
"Foo"
iex> titleize("__foo_bar")
"Foo Bar"
iex> titleize("__foo")
"Foo"
iex> titleize("_foo")
"Foo"
"""
@spec titleize(String.t()) :: String.t()
def titleize(binary) when is_binary(binary) do
Recase.to_title(binary)
end
@doc """
Returns the camel case version of the string
## Examples
iex> pascalize("foo_bar")
"FooBar"
iex> pascalize("foo")
"Foo"
iex> pascalize("__foo_bar")
"FooBar"
iex> pascalize("__foo")
"Foo"
iex> pascalize("_foo")
"Foo"
"""
@spec pascalize(String.t()) :: String.t()
def pascalize(binary) when is_binary(binary) do
Recase.to_pascal(binary)
end
@doc """
Returns the singluar version of the string
## Examples
With an uppercase first letter:
iex> singularize("dogs")
"dog"
iex> singularize("people")
"person"
"""
@spec singularize(String.t()) :: String.t()
def singularize(binary) when is_binary(binary) do
Inflex.singularize(binary)
end
@doc """
Returns the plural version of the string
## Examples
iex> pluralize("dog")
"dogs"
iex> pluralize("person")
"people"
"""
@spec pluralize(String.t()) :: String.t()
def pluralize(binary) when is_binary(binary) do
Inflex.pluralize(binary)
end
@doc """
Returns the underscore version of the string
## Examples
iex> underscore("UpperCamelCase")
"upper_camel_case"
iex> underscore("pascalCase")
"pascal_case"
"""
@spec underscore(String.t()) :: String.t()
def underscore(binary) when is_binary(binary) do
Recase.to_snake(binary)
end
end
|
lib/dark_matter/inflections.ex
| 0.939081 | 0.466906 |
inflections.ex
|
starcoder
|
defmodule FlowAssertions.EnumA do
use FlowAssertions.Define
alias FlowAssertions.Messages
alias FlowAssertions.StructA
@moduledoc """
Assertions that apply to Enums.
"""
@doc """
Assert that an Enum has only a single element.
```
[1] |> assert_singleton # passes
[ ] |> assert_singleton # fails
%{a: 1} |> assert_singleton # passes
%{a: 1, b: 2} |> assert_singleton # fails
```
"""
defchain assert_singleton(enum),
do: singleton_or_flunk(enum)
@doc """
Returns the content element of what must be a single-element Enum.
'''
[1] |> singleton_content # 1
[ ] |> singleton_content # fails
%{a: 1} |> singleton_content # the tuple {:a, 1}
5 |> singleton_content # faila
"""
def singleton_content(enum),
do: singleton_or_flunk(enum)
@doc """
Combines `singleton_content/1` and `FlowAssertions.StructA.assert_struct_named/2`.
```
|> VM.Animal.lift(Schema.Animal)
|> singleton_content(VM.Animal)
|> assert_fields(...)
```
In addition to checking that the value is a singleton Enumerable, it
checks that the `content` is a value of the named struct before returning it.
"""
def singleton_content(enum, struct_name) do
singleton_content(enum)
|> StructA.assert_struct_named(struct_name)
end
@doc """
Assert that an Enum has no elements."
```
[] |> assert_empty # true
%{} |> assert_empty # true
```
"""
defchain assert_empty(value_to_check) do
assert_enumerable(value_to_check)
elaborate_assert(Enum.empty?(value_to_check),
Messages.expected_no_element,
left: value_to_check)
end
@doc """
If the value doesn't implement `Enumerable` produces an assertion exception.
The output is more friendly than a `Protocol.UndefinedError`. So, for example,
the other assertions in this module start with this assertion.
defchain assert_empty(value_to_check) do
assert_enumerable(value_to_check)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
elaborate_assert(Enum.empty?(value_to_check),
Messages.expected_no_element,
left: value_to_check)
end
"""
defchain assert_enumerable(value_to_check) do
elaborate_assert(Enumerable.impl_for(value_to_check),
Messages.not_enumerable, left: value_to_check)
end
# ----------------------------------------------------------------------------
defp singleton_or_flunk(enum) do
assert_enumerable(enum)
case Enum.into(enum, []) do
[x] -> x
_ -> elaborate_flunk(Messages.expected_1_element, left: enum)
end
end
end
|
lib/enum_a.ex
| 0.862844 | 0.917893 |
enum_a.ex
|
starcoder
|
defmodule Scenic.Component do
@moduledoc """
A Component is Scene that is optimized to be used as a child of another scene.
These are typically controls that you want to define once and use in multiple places.
## Standard Components
Scenic includes a several standard components that you can use in your
scenes. These were chosen to be in the main library because:
* They are used frequently
* Their use promotes a certain amount of "common" look and feel
All of these components are typically added/modified via the helper functions in the
[`Scenic.Components`](Scenic.Components.html) module.
| Helper | Component Module | Description |
|---|---|---|
| [`button/3`](Scenic.Components.html#button/3) | `Scenic.Component.Button` | A simple button |
| [`checkbox/3`](Scenic.Components.html#checkbox/3) | `Scenic.Component.Input.Checkbox` | A boolean checkbox control |
| [`dropdown/3`](Scenic.Components.html#dropdown/3) | `Scenic.Component.Input.Dropdown` | A menu-like dropdown control |
| [`radio_group/3`](Scenic.Components.html#radio_group/3) | `Scenic.Component.Input.RadioGroup` | A group of radio controls |
| [`slider/3`](Scenic.Components.html#slider/3) | `Scenic.Component.Input.Slider` | A slider ranging from one value to another |
| [`text_field/3`](Scenic.Components.html#text_field/3) | `Scenic.Component.Input.TextField` | A text input field. |
| [`toggle/3`](Scenic.Components.html#toggle/3) | `Scenic.Component.Input.Toggle` | A boolean toggle control. |
```elixir
defmodule MyApp.Scene.MyScene do
use Scenic.Scene
import Scenic.Components
@impl Scenic.Scene
def init(scene, text, opts) do
graph =
Scenic.Graph.build()
|> button( "Press Me", id: :press_me )
|> slider( {{0,100}, 0}, id: :slide_me )
{ :ok, push_graph(scene, graph) }
end
end
```
## Creating Custom Components
Creating a custom component that you can use in your scenes is just like creating a scene
with an extra validation function. This validation function is used when the graph that
uses your component is built in order to make sure it uses data that conforms to what your
component expects.
```elixir
defmodule MyApp.Component.Fancy do
use Scenic.Component
@impl Scenic.Component
def validate(data) when is_bitstring(data), do: {:ok, data}
def validate(_), do: {:error, "Descriptive error message goes here."}
@impl Scenic.Scene
def init(scene, data, opts) do
{ :ok, scene }
end
end
```
## Generating/Sending Events
Communication from a component to it's parent is usually done via event messages. Scenic knows how
to route events to a component's parent. If that parent doesn't handle it, then it is automatically
routed to the parent's parent. If it gets all the way to the ViewPort itself, then it is ignored.
```elixir
defmodule MyApp.Component.Fancy do
# ... validate, and other setup ...
@impl Scenic.Scene
def init(scene, data, opts) do
# setup and push a graph here...
{ :ok, assign(scene, id: opts[:id] }
end
@impl Scenic.Scene
def handle_input( {:cursor_button, {0, :release, _, _}}, :btn,
%Scene{assigns: %{id: id}} = scene
) do
:ok = send_parent_event( scene, {:click, id} )
{ :noreply, scene }
end
end
```
Notice how the component saved the original `id` that was passed in to the `init` function via
the `opts` list. This is then used to identify the click to the parent. This is a common pattern.
## Optional: Fetch/Put Handlers
If you would like the parent scene to be able to query your component's state without waiting
for the component to send events, you can optionally implement the following handle_call functions.
This is an "informal" spec... You don't have to implement it, but it is nice when you do.
```elixir
defmodule MyApp.Component.Fancy do
use Scenic.Component
# ... init, validate, and other functions ...
def handle_call(:fetch, _, %{assigns: %{value: value}} = scene) do
{ :reply, {:ok, value}, scene }
end
def handle_call({:put, value}, _, scene) when is_bitstring(value) do
{ :reply, :ok, assign(scene, value: value) }
end
def handle_call({:put, _}, _, scene) do
{:reply, {:error, :invalid}, scene}
end
end
```
To make the above example more practical, you would probably also modify and push a graph when
handling the `:put` message. See the code for the standard input components for deeper examples.
## Optional: `has_children: false`
If you know for certain that your component will not itself use any components, you can
set `:has_children` to `false` like this.
```elixir
defmodule MyApp.Component.Fancy do
use Scenic.Component, has_children: false
# ...
end
```
When `:has_children` is set to `false`, no `DynamicSupervisor` is started to manage the
scene's children, overall resource use is improved, and startup time is faster. You will not,
however, be able to nested components in any scene where `:has_children` is `false`.
For example, the `Scenic.Component.Button` component sets `:has_children` to `false`.
This option is available for any Scene, not just components.
"""
alias Scenic.Primitive
@doc """
Add this component to a Graph.
A standard `add_to_graph/3` is automatically added to your component. Override this
callback if you want to customize it.
"""
@callback add_to_graph(graph :: Scenic.Graph.t(), data :: any, opts :: Keyword.t()) ::
Scenic.Graph.t()
@doc """
Validate that the data for a component is correctly formed.
This callback is required.
"""
@callback validate(data :: any) :: {:ok, data :: any} | {:error, String.t()}
@doc """
Compute the bounding box of the component.
This function can be called outside of the context of a running component. The box
should be computed as if it was running with the given data and styles.
"""
@callback bounds(data :: any, styles :: map) :: Scenic.Graph.bounds()
@doc """
Provide a default pin for this component.
If this callback is not implemented, then the default pin will be {0,0}.
"""
@callback default_pin(data :: any, styles :: map) :: Scenic.Math.vector_2()
@optional_callbacks bounds: 2,
default_pin: 2
# ===========================================================================
defmodule Error do
@moduledoc false
defexception message: nil, error: nil, data: nil
end
# ===========================================================================
defmacro __using__(opts) do
quote do
@behaviour Scenic.Component
use Scenic.Scene, unquote(opts)
def add_to_graph(graph, data, opts \\ [])
def add_to_graph(%Scenic.Graph{} = graph, data, opts) do
Primitive.Component.add_to_graph(graph, {__MODULE__, data}, opts)
end
# --------------------------------------------------------
defoverridable add_to_graph: 3
end
# quote
end
# defmacro
@filter_out [
:cap,
:fill,
:font,
:font_size,
:hidden,
:input,
:join,
:line_height,
:miter_limit,
:scissor,
:stroke,
:text_align,
:text_base,
:translate,
:scale,
:rotate,
:pin,
:matrix
]
# prepare the list of opts to send to a component as it is being started up
# the main task is to remove styles that have already been consumed or don't make
# sense, while leaving any opts/styles that are intended for the component itself.
# also, add the viewport as an option.
@doc false
@spec filter_opts(opts :: Keyword.t()) :: Keyword.t()
def filter_opts(opts) when is_list(opts) do
Enum.reject(opts, fn {key, _} -> Enum.member?(@filter_out, key) end)
end
end
|
lib/scenic/component.ex
| 0.929808 | 0.855489 |
component.ex
|
starcoder
|
defmodule LcovEx.Stats do
@moduledoc """
Output parser for `:cover.analyse/3`
"""
@type cover_analyze_function_output :: [{{module(), atom(), integer()}, integer()}, ...]
@type cover_analyze_line_output :: [{{module(), integer()}, integer()}, ...]
@type coverage_info :: {binary(), integer()}
@doc """
Function coverage data parser. Discards BEAM file `:__info__/1` function data.
## Examples
iex> LcovEx.Stats.function_coverage_data([{{MyModule, :__info__, 1}, 3}, {{MyModule, :foo, 2}, 0}])
{[{"foo/2", 0}], %{fnf: 1, fnh: 0}}
"""
@spec function_coverage_data(cover_analyze_function_output()) ::
{[coverage_info(), ...], %{fnf: integer(), fnh: integer()}}
def function_coverage_data(fun_data) do
Enum.reduce_while(fun_data, {[], %{fnf: 0, fnh: 0}}, fn data,
acc = {list, %{fnf: fnf, fnh: fnh}} ->
# TODO get FN + line by inspecting file
case data do
{{_, :__info__, _1}, _} ->
{:cont, acc}
{{_mod, name, arity}, count} ->
{:cont,
{list ++ [{"#{name}/#{arity}", count}],
%{fnf: fnf + 1, fnh: fnh + ((count > 0 && 1) || 0)}}}
end
end)
end
@doc """
Function coverage data parser. Discards BEAM file line `0` data.
## Examples
iex> LcovEx.Stats.line_coverage_data([{{MyModule, 0}, 3}, {{MyModule, 0}, 0}, {{MyModule, 8}, 0}])
{[{8, 0}], %{lf: 1, lh: 0}}
iex> LcovEx.Stats.line_coverage_data([{{MyModule, 1}, 12}, {{MyModule, 1}, 0}, {{MyModule, 2}, 0}])
{[{1, 12}, {2, 0}], %{lf: 2, lh: 1}}
"""
@spec line_coverage_data(cover_analyze_line_output()) ::
{[coverage_info(), ...], %{lf: integer(), lh: integer()}}
def line_coverage_data(lines_data) do
{list_reversed, _previous_line, lf, lh} =
Enum.reduce(lines_data, {[], nil, 0, 0}, fn data, acc = {list, previous_line, lf, lh} ->
case data do
{{_, 0}, _} ->
acc
{^previous_line, count} ->
[{line, previous_count} | rest] = list
count = max(count, previous_count)
lh = increment_line_hit(lh, count, previous_count)
{[{line, count} | rest], previous_line, lf, lh}
{{_mod, line} = previous_line, count} ->
list = [{line, count} | list]
lf = lf + 1
lh = increment_line_hit(lh, count, 0)
{list, previous_line, lf, lh}
end
end)
{Enum.reverse(list_reversed), %{lf: lf, lh: lh}}
end
defp increment_line_hit(lh, count, previous_count)
defp increment_line_hit(lh, 0, _), do: lh
defp increment_line_hit(lh, _count, 0), do: lh + 1
defp increment_line_hit(lh, _, _), do: lh
end
|
lib/lcov_ex/stats.ex
| 0.687 | 0.443661 |
stats.ex
|
starcoder
|
defmodule Pie.Pipeline.Step do
@moduledoc """
Pipeline step handling
"""
defstruct context: nil,
callback: nil,
input: nil,
output: nil,
error: nil,
executed?: false,
failed?: false,
label: nil
alias Pie.State
@typedoc """
A struct to hold data about a pipeline step
"""
@type t :: %__MODULE__{
context: any(),
callback: function(),
input: any(),
error: any(),
output: any(),
executed?: boolean(),
failed?: boolean(),
label: any()
}
@doc """
Creates a new step
"""
@spec new(function(), any(), Keyword.t()) :: t()
def new(fun, context \\ nil, options \\ []) when is_function(fun) do
%__MODULE__{
callback: fun,
context: context,
label: options[:label]
}
end
@doc """
Executes the step and track the data about its input and output
"""
@spec execute(t(), State.t(), boolean()) :: {t(), State.t()}
def execute(step = %__MODULE__{executed?: false}, state, capture_error? \\ false) do
case run_step(step, state, capture_error?) do
{:ok, updated_state} ->
failed? = state.valid? && !updated_state.valid?
updated_step = %__MODULE__{
step
| input: state.current_value,
output: if(failed?, do: nil, else: updated_state.current_value),
executed?: state.valid?,
failed?: failed?
}
{updated_step, updated_state}
{:error, error} ->
updated_state = State.invalidate(state, :error)
updated_step = %__MODULE__{
step
| input: state.current_value,
output: nil,
executed?: true,
failed?: true,
error: error
}
{updated_step, updated_state}
end
end
defp run_step(step, state, _capture_error = true) do
try do
run_step(step, state, false)
rescue
error -> {:error, error}
end
end
defp run_step(step, state, _capture_error) do
{:ok, step.callback.(state, step.context)}
end
end
|
lib/pie/pipeline/step.ex
| 0.795181 | 0.410697 |
step.ex
|
starcoder
|
defmodule Part1 do
def run() do
AOCHelper.read_input()
|> find_closest_intersection()
end
def find_closest_intersection(wires) do
wires
|> GridHelper.draw_wires(%{})
|> find_smallest_distance()
end
defp find_smallest_distance(grid) do
distances =
GridHelper.crossings(grid)
|> Enum.map(fn {{x, y}, _} ->
abs(x) + abs(y)
end)
distances
|> Enum.sort()
|> hd()
end
end
defmodule Part2 do
def run() do
AOCHelper.read_input()
|> draw_and_find_intersection()
end
def draw_and_find_intersection(wires) do
wires
|> GridHelper.draw_wires(%{})
|> find_minimal_wire_length_intersection()
end
def find_minimal_wire_length_intersection(grid) do
distances =
GridHelper.crossings(grid)
|> Enum.map(fn {_, entry} ->
entry.lengths
|> Map.values
|> Enum.sum
end)
distances
|> Enum.sort()
|> hd()
end
end
defmodule GridHelper do
def draw_wires(wires, grid) do
wires
|> Enum.reduce(grid, fn instructions, grid ->
draw_wire(instructions, grid)
end)
end
defp draw_wire(instructions, grid) do
start_point = %{x: 0, y: 0}
line_id = AOCHelper.random_string(3)
instructions
|> Enum.reduce(%{point: start_point, grid: grid, wire_length: 1}, fn instruction, acc ->
{end_point, updated_grid, updated_wire_length} =
draw_line(
acc.grid,
acc.point,
instruction,
line_id,
acc.wire_length
)
%{
point: end_point,
grid: updated_grid,
wire_length: updated_wire_length
}
end)
|> Map.fetch!(:grid)
end
defp draw_line(grid, from, instruction, line_id, wire_length) do
points =
case instruction do
"U" <> l ->
l = String.to_integer(l)
for y <- from.y-1..from.y-l, do: {from.x, y}
"R" <> l ->
l = String.to_integer(l)
for x <- from.x+1..from.x+l, do: {x, from.y}
"D" <> l ->
l = String.to_integer(l)
for y <- from.y+1..from.y+l, do: {from.x, y}
"L" <> l ->
l = String.to_integer(l)
for x <- from.x-1..from.x-l, do: {x, from.y}
end
updated =
points
|> Enum.reduce(%{grid: grid, wire_length: wire_length}, fn p, acc ->
{updated_grid, updated_wire_length} = add_point(acc.grid, p, line_id, acc.wire_length)
%{grid: updated_grid, wire_length: updated_wire_length}
end)
{x, y} = points |> Enum.reverse |> hd
{%{x: x, y: y}, updated.grid, updated.wire_length}
end
defp add_point(grid, point, line_id, wire_length) do
grid_entry = Map.get(grid, point, %{wires: MapSet.new(), lengths: %{}})
updated_wires =
grid_entry.wires
|> MapSet.put(line_id)
updated_length =
grid_entry.lengths
|> Map.put(line_id, wire_length)
grid = Map.put(grid, point, %{wires: updated_wires, lengths: updated_length})
{grid, wire_length + 1}
end
def crossings(grid) do
grid
|> Enum.filter(fn {_, entry} ->
length(MapSet.to_list(entry.wires)) == 2
end)
end
end
defmodule AOCHelper do
def read_input() do
"input.txt"
|> File.read!()
|> String.split("\n")
|> Enum.map(&(String.split(&1, ",")))
end
def random_string(length) do
:crypto.strong_rand_bytes(length) |> Base.url_encode64 |> binary_part(0, length)
end
end
|
aoc-2019/day3/lib/part1.ex
| 0.619932 | 0.470311 |
part1.ex
|
starcoder
|
defmodule Paramsx do
@moduledoc """
Paramsx provides functionally to whitelist and validate parameters
"""
@doc """
Filter params based on your required and optional keyword.
Important: You have to allow all params correctly, by default it allow only string or number
for a simple key, if you want specify a keyword list with correct params like the last example.
## Examples
iex> Paramsx.filter(%{"foo" => "bar", "foo2" => "bar2"}, required: [:foo])
{:ok, %{foo: "bar"}}
iex> Paramsx.filter(%{"foo" => "bar", "foo2" => "bar2"}, required: [:foo3])
{:error, %{missing_keys: [:foo3]}}
iex> Paramsx.filter(%{"foo" => "bar", "foo2" => "bar2"}, required: [:foo], optional: [:foo3])
{:ok, %{foo: "bar"}}
iex> Paramsx.filter(%{"foo" => %{"bar" => "value_bar"}}, required: [:foo])
{:error, %{missing_keys: [:foo]}}
iex> Paramsx.filter(%{"foo" => %{"bar" => "value_bar"}}, required: [foo: [:bar]])
{:ok, %{foo: %{bar: "value_bar"}}}
iex> Paramsx.filter(%{"foo" => [%{"bar" => "value_bar"}]}, required: [foo_list: [:bar]])
{:ok, %{foo: [%{bar: "value_bar"}]}}
"""
@types ["list"]
def filter(params, filters) when params == %{},
do: {:error, %{missing_keys: Keyword.get(filters, :required, [])}}
def filter(params, filters) when is_map(params) and is_list(filters) do
required = Keyword.get(filters, :required, [])
optional = Keyword.get(filters, :optional, [])
params
|> filter_required(required)
|> filter_optional(params, optional)
end
defp filter_required(params, filters),
do: reduce_filters(filters, %{}, params, :required)
defp filter_optional(%{} = acc, params, filters),
do: {:ok, reduce_filters(filters, acc, params, :optional)}
defp filter_optional(missing, _params, _filters), do: {:error, %{missing_keys: missing}}
defp reduce_filters(filters, acc, params, mode),
do: Enum.reduce(filters, acc, &reduce_fun(&1, &2, params, mode))
defp reduce_fun([{key, filters}], acc, params, mode) when is_list(filters) do
{:ok, %{key: key}} =
key
|> split_word_by_dash()
|> key_type()
reduce_fun_for_nested(key, filters, acc, params, mode)
end
defp reduce_fun({key, filters}, acc, params, mode) when is_list(filters) do
key
|> split_word_by_dash()
|> key_type()
|> verify_list_of_atoms(filters)
|> call_for(filters, acc, params, mode)
end
defp reduce_fun(_key, %{} = acc, [], _mode), do: acc
defp reduce_fun(key, %{} = acc, %{} = params, mode) do
case fetch(params, key) do
{:ok, value} when is_binary(value) or is_number(value) -> Map.put(acc, key, value)
{:ok, _value} -> handle_missing_key(mode, acc, key)
_not_found -> handle_missing_key(mode, acc, key)
end
end
defp reduce_fun(key, missing, params, _mode) when is_list(missing) do
if Map.has_key?(params, to_string(key)) do
missing
else
[key | missing]
end
end
defp reduce_fun_for_nested(key, filters, acc, params, mode) do
case fetch(params, key) do
{:ok, value} -> handle_partial(mode, reduce_filters(filters, %{}, value, mode), key, acc)
_not_found -> handle_partial(mode, filters, key, acc)
end
end
defp handle_partial(_mode, value, _key, acc) when is_list(acc) and is_map(value),
do: acc
defp handle_partial(_mode, value, key, acc) when is_map(acc) and is_map(value),
do: Map.put(acc, key, value)
defp handle_partial(:required, missing, key, %{}) when is_list(missing), do: [{key, missing}]
defp handle_partial(:required, missing, key, acc)
when is_list(missing) and is_list(acc) and is_atom(key),
do: [[{key, missing}] | acc]
defp handle_partial(:optional, missing, _k, acc) when is_list(missing) and is_list(acc), do: acc
defp handle_partial(_mode, value, _key, acc) when is_map(acc) and is_list(value),
do: acc
defp generate_list_of_params(_keys, acc, _params, _key, _mode) when is_list(acc), do: acc
defp generate_list_of_params(keys, %{} = acc, params, key, mode) do
case fetch(params, key) do
{:ok, nested_params} ->
nested_params
|> Enum.reduce([], &create_nested_map(&1, &2, keys, mode))
|> update_acc_with_generated_list(acc, key, mode)
_not_found ->
handle_missing_key(mode, acc, key)
end
end
defp create_nested_map(params, list_acc, keys, _mode) do
case create_params_map(keys, params) do
%{} = acc -> Enum.concat([acc], list_acc)
keys_not_found -> keys_not_found
end
end
defp create_params_map(keys, params) do
Enum.reduce(keys, %{}, fn key, map_acc ->
case fetch(params, key) do
{:ok, value} -> set_new_value(map_acc, key, value)
_not_found -> set_new_value(map_acc, key, [key])
end
end)
end
defp set_new_value(acc, key, value) when is_list(acc) and is_list(value),
do: [key | acc]
defp set_new_value(_acc, _key, value) when is_list(value), do: value
defp set_new_value(acc, key, value) when is_map(acc), do: Map.put(acc, key, value)
defp update_acc_with_generated_list(generated_list, acc, key, mode) do
case list_of_atoms?(generated_list) do
true -> handle_missing_key(mode, acc, {key, generated_list})
false -> Map.put(acc, key, generated_list)
end
end
defp handle_missing_key(:required, _acc, key), do: [key]
defp handle_missing_key(:optional, acc, _key), do: acc
defp call_for({:ok, key}, filters, acc, params, mode),
do: generate_list_of_params(filters, acc, params, key, mode)
defp call_for({:error, key}, filters, acc, params, mode),
do: reduce_fun_for_nested(key, filters, acc, params, mode)
defp split_word_by_dash(key), do: key |> to_string() |> String.split("_")
defp key_type([key]), do: {:ok, %{key: String.to_atom(key), type: "default"}}
defp key_type(key) when is_list(key) do
{value, list} = List.pop_at(key, -1)
if value in @types do
{:ok, %{key: join_to_atom(list), type: "list"}}
else
{:ok, %{key: join_to_atom(key), type: "default"}}
end
end
defp join_to_atom(list), do: list |> Enum.join("_") |> String.to_atom()
defp verify_list_of_atoms({:ok, %{type: "default", key: [key]}}, _list),
do: {:error, key}
defp verify_list_of_atoms({:ok, %{type: "default", key: key}}, _list),
do: {:error, key}
defp verify_list_of_atoms({:ok, %{type: "list", key: key}}, list) do
if list_of_atoms?(list) do
{:ok, key}
else
{:error, key}
end
end
defp list_of_atoms?(list), do: Enum.all?(list, &is_atom/1)
defp fetch(map, key) when is_atom(key) and is_map(map), do: Map.fetch(map, to_string(key))
end
|
lib/paramsx.ex
| 0.725649 | 0.488893 |
paramsx.ex
|
starcoder
|
defmodule Adventofcode.Day23ExperimentalEmergencyTeleportation do
use Adventofcode
defmodule Part1 do
alias Adventofcode.Day23ExperimentalEmergencyTeleportation.Nanobots
def solve(input) do
input
|> Nanobots.parse()
|> nanobots_within_range_of_strongest_one()
|> Enum.count()
end
defp nanobots_within_range_of_strongest_one(nanobots) do
nanobots
|> strongest_nanobot
|> Nanobots.nanobots_within_range_of(nanobots)
end
defp strongest_nanobot(nanobots) do
Enum.max_by(nanobots, fn {_x, _y, _z, radius} -> radius end)
end
end
defmodule Part2 do
alias Adventofcode.Day23ExperimentalEmergencyTeleportation.Nanobots
def solve(input) do
input
|> coordinate_in_range_of_most_nanobots()
|> distance_to_zero_for_closest_nanobot()
end
def coordinate_in_range_of_most_nanobots(input) do
input
|> Nanobots.parse()
|> nanobots_within_range_of_coordinates()
|> sort_coordinates_by_range()
|> hd()
end
defp distance_to_zero_for_closest_nanobot({_coordinate, nanobots}) do
nanobots
|> Enum.map(&distance_to_zero/1)
|> Enum.min()
end
defp sort_coordinates_by_range(coordinates) do
Enum.sort_by(coordinates, &build_range/1, &sort_by_range/2)
end
defp build_range({coordinate, nanobots}) do
{length(nanobots), distance_to_zero(coordinate)}
end
defp sort_by_range({length1, distance1}, {length2, distance2}) do
length1 > length2 && distance1 <= distance2
end
def distance_to_zero(coordinate) do
Nanobots.manhattan_distance(coordinate, {0, 0, 0, nil})
end
defp nanobots_within_range_of_coordinates(nanobots) do
nanobots
|> determine_axis_ranges()
|> build_coordinates()
|> do_determine_nanobots_within_range(nanobots)
end
defp do_determine_nanobots_within_range(coordinates, nanobots) do
coordinates
|> Enum.map(&{&1, determine_nanobots_within_range(&1, nanobots)})
|> Enum.sort_by(fn {_coordinate, nanobots} -> length(nanobots) end)
end
defp determine_nanobots_within_range(coordinate, nanobots) do
Enum.filter(nanobots, &Nanobots.within_range_of?(&1, coordinate))
end
defp determine_axis_ranges(nanobots) do
nanobots
|> Enum.map(fn {x, y, z, _radius} -> [x, y, z] end)
|> transpose()
|> Enum.map(&build_min_max_range/1)
|> List.to_tuple()
end
defp build_coordinates({x_range, y_range, z_range}) do
for x <- x_range, y <- y_range, z <- z_range, do: {x, y, z, nil}
end
defp transpose(list_of_lists) do
list_of_lists
|> List.zip()
|> Enum.map(&Tuple.to_list/1)
end
defp build_min_max_range(values) do
Enum.min(values)..Enum.max(values)
end
end
defmodule Nanobots do
def nanobots_within_range_of(nanobot, nanobots) do
Enum.filter(nanobots, &within_range_of?(nanobot, &1))
end
def within_range_of?({_, _, _, radius} = nanobot, other) do
manhattan_distance(nanobot, other) <= radius
end
def manhattan_distance({x1, y1, z1, _}, {x2, y2, z2, _}) do
abs(x1 - x2) + abs(y1 - y2) + abs(z1 - z2)
end
def parse(input) do
input
|> String.trim_trailing("\n")
|> String.split("\n")
|> Enum.map(&parse_line/1)
end
defp parse_line(line) do
~r/-?\d+/
|> Regex.scan(line)
|> List.flatten()
|> Enum.map(&String.to_integer/1)
|> List.to_tuple()
end
end
end
|
lib/day_23_experimental_emergency_teleportation.ex
| 0.670393 | 0.652619 |
day_23_experimental_emergency_teleportation.ex
|
starcoder
|
defmodule ExGherkin.Scanner.LanguageSupport do
@gherkin_languages_source Application.get_env(:my_ex_gherkin, :file).source
@gherkin_languages_resource Application.get_env(:my_ex_gherkin, :file).resource
@homonyms Application.get_env(:my_ex_gherkin, :homonyms)
@moduledoc_homonyms @homonyms |> Enum.map(&" * '#{&1}'") |> Enum.join("\n")
@moduledoc """
The main purpose of this module is to facilitate full international
language support by normalizing each entry under
'#{@gherkin_languages_source}' to the following format:
```elixir
%{
# Top Level Gherkin Keywords
feature: ["Feature", "Business Need", "Ability"],
rule: ["Rule"],
background: ["Background"],
scenario_outline: ["Scenario Outline", "Scenario Template"],
example: ["Example", "Scenario"],
examples: ["Examples", "Scenarios"],
# Step Level Gherkin Keywords
given: ["Given "],
when: ["When "],
then: ["Then "],
and: ["And "],
but: ["But "],
# Meta
name: "English",
native: "English",
direction: :ltr,
homonyms: %{
"* " => %{
given: :when,
when: :then
then: :and,
and: :and,
but: :but,
default: :given,
}
},
}
```
and persisting the same as '#{@gherkin_languages_resource}'.
The `# Meta` section comprises of the keys `:name` and `:native` which
are part and parcel of the #{@gherkin_languages_source} standard. The
other keys are newly introduced:
* `:direction` is to designate if it pertains a `:ltr`(Left to Right)
or `:rtl` (Right to Left) language. This can be derived thanks to
the contents under `:native`.
* `:homonyms` represent the various keywords that are the same
accross languages, such as "* " to mean any of the Step Level Gherkin
Keywords or within a language, such as `"Tha "` for old English to
mean `When` and `Then`. Currently the `homonyms` existent are:
#{@moduledoc_homonyms}
Each homonym has a sequence of keywords that it can logically revolve
to. For the above sample presented, this would mean that the
following feature:
```cucumber
Feature: Some Feature
Scenario: Some Scenario
* A
* B
* C
* D
```
could be interpreted as:
```cucumber
Feature: Some Feature
Scenario: Some Scenario
Given A
When B
Then C
And D
```
"""
def gherkin_languages_source, do: @gherkin_languages_source
def gherkin_languages_resource, do: @gherkin_languages_resource
@doc """
Convenience function that provides the contents under the resource:
'#{@gherkin_languages_resource}'
"""
def all, do: load()
@doc """
Saves parsed content to: '#{@gherkin_languages_resource}' in `binary`
format.
"""
def unload(
source \\ @gherkin_languages_source,
resource \\ @gherkin_languages_resource,
homonyms \\ @homonyms,
languages \\ :all
) do
content =
source
|> parse(homonyms, languages)
|> :erlang.term_to_binary()
File.write!(resource, content)
end
@doc """
Loads: '#{@gherkin_languages_resource}' as Erlang compatible `terms`.
"""
def load(resource \\ @gherkin_languages_resource) do
resource
|> File.read!()
|> :erlang.binary_to_term()
end
@doc """
Parses the content of '#{@gherkin_languages_source}' into the desired
format.
"""
def parse(source \\ @gherkin_languages_source, all_homonyms \\ @homonyms, languages \\ :all) do
source
|> File.read!()
|> :jiffy.decode([:return_maps, :copy_strings])
|> filter_languages_to_use(languages)
|> Enum.reduce(%{}, fn {language, translations}, a ->
{%{homonyms: homonyms}, remainder} =
Enum.reduce(translations, %{}, fn
{"name", val}, a ->
Map.put(a, :name, val)
{"native", val}, a ->
Map.put(a, :native, val)
{key, vals}, a ->
normalized_key = handle_key(key)
{homonyms, remainder} =
vals
|> Enum.uniq()
|> seperate_out_homonyms(all_homonyms)
a
|> Map.put(normalized_key, remainder)
|> put_in([Access.key(:homonyms, %{}), normalized_key], homonyms)
end)
|> Map.put(:direction, :ltr)
|> Map.split([:homonyms])
normalized_homonyms =
homonyms
|> Enum.reduce(%{}, fn
{_, :none}, a ->
a
{keyword, homonyms_for_keyword}, a ->
Enum.reduce(homonyms_for_keyword, a, fn homonym, a ->
put_in(
a,
[Access.key(homonym, %{}), keyword],
next_keyword(keyword, homonym, homonyms)
)
end)
end)
|> Enum.reduce(%{}, fn {homonym, keywords_sequence}, a ->
default_homonym =
cond do
keywords_sequence[:given] -> :given
keywords_sequence[:when] -> :when
keywords_sequence[:then] -> :then
keywords_sequence[:and] -> :and
keywords_sequence[:but] -> :but
true -> raise "Developer Error. Keywords Sequence Has No Members"
end
put_in(a, [Access.key(homonym, keywords_sequence), :default], default_homonym)
end)
normalized_translations = Map.put(remainder, :homonyms, normalized_homonyms)
Map.put(a, language, normalized_translations)
end)
end
defp filter_languages_to_use(all_json_entries, :all), do: all_json_entries
defp filter_languages_to_use(all_json_entries, languages),
do: Enum.filter(all_json_entries, fn {k, _} -> k in languages end)
defp seperate_out_homonyms(words, homonyms) do
words
|> Enum.split_with(fn e -> e in homonyms end)
|> case do
{[], ^words} -> {:none, words}
paritioned_result -> paritioned_result
end
end
defp next_keyword(:given, homonym, homonyms) do
cond do
homonym in (homonyms[:when] || []) -> :when
homonym in (homonyms[:then] || []) -> :then
homonym in (homonyms[:and] || []) -> :and
true -> :given
end
end
defp next_keyword(:when, homonym, homonyms) do
cond do
homonym in (homonyms[:then] || []) -> :then
homonym in (homonyms[:and] || []) -> :and
true -> :given
end
end
defp next_keyword(:then, homonym, homonyms) do
cond do
homonym in (homonyms[:and] || []) -> :and
true -> :given
end
end
defp next_keyword(:and, homonym, homonyms) do
cond do
homonym in (homonyms[:and] || []) -> :and
true -> :given
end
end
defp next_keyword(:but, homonym, homonyms) do
cond do
homonym in (homonyms[:but] || []) -> :but
homonym in (homonyms[:and] || []) -> :and
true -> :given
end
end
defp handle_key("scenarioOutline"), do: :scenario_outline
defp handle_key("scenario"), do: :example
defp handle_key(key), do: String.to_atom(key)
end
|
lib/scanner/lib/language_support.ex
| 0.804981 | 0.736424 |
language_support.ex
|
starcoder
|
defmodule AWS.Snowball do
@moduledoc """
AWS Snow Family is a petabyte-scale data transport solution that uses secure
devices to transfer large amounts of data between your on-premises data centers
and Amazon Simple Storage Service (Amazon S3).
The Snow commands described here provide access to the same functionality that
is available in the AWS Snow Family Management Console, which enables you to
create and manage jobs for a Snow device. To transfer data locally with a Snow
device, you'll need to use the Snowball Edge client or the Amazon S3 API
Interface for Snowball or AWS OpsHub for Snow Family. For more information, see
the [User Guide](https://docs.aws.amazon.com/AWSImportExport/latest/ug/api-reference.html).
"""
@doc """
Cancels a cluster job.
You can only cancel a cluster job while it's in the `AwaitingQuorum` status.
You'll have at least an hour after creating a cluster job to cancel it.
"""
def cancel_cluster(client, input, options \\ []) do
request(client, "CancelCluster", input, options)
end
@doc """
Cancels the specified job.
You can only cancel a job before its `JobState` value changes to
`PreparingAppliance`. Requesting the `ListJobs` or `DescribeJob` action returns
a job's `JobState` as part of the response element data returned.
"""
def cancel_job(client, input, options \\ []) do
request(client, "CancelJob", input, options)
end
@doc """
Creates an address for a Snow device to be shipped to.
In most regions, addresses are validated at the time of creation. The address
you provide must be located within the serviceable area of your region. If the
address is invalid or unsupported, then an exception is thrown.
"""
def create_address(client, input, options \\ []) do
request(client, "CreateAddress", input, options)
end
@doc """
Creates an empty cluster.
Each cluster supports five nodes. You use the `CreateJob` action separately to
create the jobs for each of these nodes. The cluster does not ship until these
five node jobs have been created.
"""
def create_cluster(client, input, options \\ []) do
request(client, "CreateCluster", input, options)
end
@doc """
Creates a job to import or export data between Amazon S3 and your on-premises
data center.
Your AWS account must have the right trust policies and permissions in place to
create a job for a Snow device. If you're creating a job for a node in a
cluster, you only need to provide the `clusterId` value; the other job
attributes are inherited from the cluster.
"""
def create_job(client, input, options \\ []) do
request(client, "CreateJob", input, options)
end
@doc """
Creates a shipping label that will be used to return the Snow device to AWS.
"""
def create_return_shipping_label(client, input, options \\ []) do
request(client, "CreateReturnShippingLabel", input, options)
end
@doc """
Takes an `AddressId` and returns specific details about that address in the form
of an `Address` object.
"""
def describe_address(client, input, options \\ []) do
request(client, "DescribeAddress", input, options)
end
@doc """
Returns a specified number of `ADDRESS` objects.
Calling this API in one of the US regions will return addresses from the list of
all addresses associated with this account in all US regions.
"""
def describe_addresses(client, input, options \\ []) do
request(client, "DescribeAddresses", input, options)
end
@doc """
Returns information about a specific cluster including shipping information,
cluster status, and other important metadata.
"""
def describe_cluster(client, input, options \\ []) do
request(client, "DescribeCluster", input, options)
end
@doc """
Returns information about a specific job including shipping information, job
status, and other important metadata.
"""
def describe_job(client, input, options \\ []) do
request(client, "DescribeJob", input, options)
end
@doc """
Information on the shipping label of a Snow device that is being returned to
AWS.
"""
def describe_return_shipping_label(client, input, options \\ []) do
request(client, "DescribeReturnShippingLabel", input, options)
end
@doc """
Returns a link to an Amazon S3 presigned URL for the manifest file associated
with the specified `JobId` value.
You can access the manifest file for up to 60 minutes after this request has
been made. To access the manifest file after 60 minutes have passed, you'll have
to make another call to the `GetJobManifest` action.
The manifest is an encrypted file that you can download after your job enters
the `WithCustomer` status. The manifest is decrypted by using the `UnlockCode`
code value, when you pass both values to the Snow device through the Snowball
client when the client is started for the first time.
As a best practice, we recommend that you don't save a copy of an `UnlockCode`
value in the same location as the manifest file for that job. Saving these
separately helps prevent unauthorized parties from gaining access to the Snow
device associated with that job.
The credentials of a given job, including its manifest file and unlock code,
expire 90 days after the job is created.
"""
def get_job_manifest(client, input, options \\ []) do
request(client, "GetJobManifest", input, options)
end
@doc """
Returns the `UnlockCode` code value for the specified job.
A particular `UnlockCode` value can be accessed for up to 90 days after the
associated job has been created.
The `UnlockCode` value is a 29-character code with 25 alphanumeric characters
and 4 hyphens. This code is used to decrypt the manifest file when it is passed
along with the manifest to the Snow device through the Snowball client when the
client is started for the first time.
As a best practice, we recommend that you don't save a copy of the `UnlockCode`
in the same location as the manifest file for that job. Saving these separately
helps prevent unauthorized parties from gaining access to the Snow device
associated with that job.
"""
def get_job_unlock_code(client, input, options \\ []) do
request(client, "GetJobUnlockCode", input, options)
end
@doc """
Returns information about the Snow Family service limit for your account, and
also the number of Snow devices your account has in use.
The default service limit for the number of Snow devices that you can have at
one time is 1. If you want to increase your service limit, contact AWS Support.
"""
def get_snowball_usage(client, input, options \\ []) do
request(client, "GetSnowballUsage", input, options)
end
@doc """
Returns an Amazon S3 presigned URL for an update file associated with a
specified `JobId`.
"""
def get_software_updates(client, input, options \\ []) do
request(client, "GetSoftwareUpdates", input, options)
end
@doc """
Returns an array of `JobListEntry` objects of the specified length.
Each `JobListEntry` object is for a job in the specified cluster and contains a
job's state, a job's ID, and other information.
"""
def list_cluster_jobs(client, input, options \\ []) do
request(client, "ListClusterJobs", input, options)
end
@doc """
Returns an array of `ClusterListEntry` objects of the specified length.
Each `ClusterListEntry` object contains a cluster's state, a cluster's ID, and
other important status information.
"""
def list_clusters(client, input, options \\ []) do
request(client, "ListClusters", input, options)
end
@doc """
This action returns a list of the different Amazon EC2 Amazon Machine Images
(AMIs) that are owned by your AWS account that would be supported for use on a
Snow device.
Currently, supported AMIs are based on the CentOS 7 (x86_64) - with Updates HVM,
Ubuntu Server 14.04 LTS (HVM), and Ubuntu 16.04 LTS - Xenial (HVM) images,
available on the AWS Marketplace.
"""
def list_compatible_images(client, input, options \\ []) do
request(client, "ListCompatibleImages", input, options)
end
@doc """
Returns an array of `JobListEntry` objects of the specified length.
Each `JobListEntry` object contains a job's state, a job's ID, and a value that
indicates whether the job is a job part, in the case of export jobs. Calling
this API action in one of the US regions will return jobs from the list of all
jobs associated with this account in all US regions.
"""
def list_jobs(client, input, options \\ []) do
request(client, "ListJobs", input, options)
end
@doc """
While a cluster's `ClusterState` value is in the `AwaitingQuorum` state, you can
update some of the information associated with a cluster.
Once the cluster changes to a different job state, usually 60 minutes after the
cluster being created, this action is no longer available.
"""
def update_cluster(client, input, options \\ []) do
request(client, "UpdateCluster", input, options)
end
@doc """
While a job's `JobState` value is `New`, you can update some of the information
associated with a job.
Once the job changes to a different job state, usually within 60 minutes of the
job being created, this action is no longer available.
"""
def update_job(client, input, options \\ []) do
request(client, "UpdateJob", input, options)
end
@doc """
Updates the state when a the shipment states changes to a different state.
"""
def update_job_shipment_state(client, input, options \\ []) do
request(client, "UpdateJobShipmentState", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "snowball"}
host = build_host("snowball", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AWSIESnowballJobManagementService.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/snowball.ex
| 0.894222 | 0.694685 |
snowball.ex
|
starcoder
|
defmodule RedisGraph.Graph do
@moduledoc """
A Graph consisting of `RedisGraph.Node`s and `RedisGraph.Edge`s.
A name is required for each graph.
Construct graphs by adding `RedisGraph.Node`s followed
by `RedisGraph.Edge`s which relate existing nodes.
If a node does not have an alias, a random alias will
be created for it when adding to a `RedisGraph.Graph`.
Edges cannot be added unless both the source node and
destination node aliases already exist in the graph.
"""
alias RedisGraph.Edge
alias RedisGraph.Node
@type t() :: %__MODULE__{
name: String.t(),
nodes: %{optional(String.t()) => Node.t()},
edges: list(Edge.t())
}
@enforce_keys [:name]
defstruct [
:name,
nodes: %{},
edges: []
]
@doc """
Create a graph from a map.
## Example
```
alias RedisGraph.{Node, Edge, Graph, QueryResult}
# Create a graph
graph = Graph.new(%{
name: "social"
})
# Create a node
john = Node.new(%{
label: "person",
properties: %{
name: "<NAME>",
age: 33,
gender: "male",
status: "single"
}
})
# Add the node to the graph
# The graph and node are returned
# The node may be modified if no alias has been set
# For this reason, nodes should always be added to the graph
# before creating edges between them.
{graph, john} = Graph.add_node(graph, john)
# Create a second node
japan = Node.new(%{
label: "country",
properties: %{
name: "Japan"
}
})
# Add the second node
{graph, japan} = Graph.add_node(graph, japan)
# Create an edge connecting the two nodes
edge = Edge.new(%{
src_node: john,
dest_node: japan,
relation: "visited"
})
# Add the edge to the graph
# If the nodes are not present, an {:error, error} is returned
{:ok, graph} = Graph.add_edge(graph, edge)
```
"""
@spec new(map()) :: t()
def new(map) do
struct(__MODULE__, map)
end
@doc """
Add a `RedisGraph.Node` to a graph.
Creates a random string alias for the Node
if the Node has no alias.
"""
@spec add_node(t(), Node.t()) :: {t(), Node.t()}
def add_node(graph, node) do
node = Node.set_alias_if_nil(node)
{%{graph | nodes: Map.put(graph.nodes, node.alias, node)}, node}
end
@doc """
Add a `RedisGraph.Edge` to a graph.
If the source node or destination node are not part of the
graph, then the edge cannot be added. Uses node aliases
to check graph membership.
"""
@spec add_edge(t(), Edge.t()) :: {:ok, t()} | {:error, any()}
def add_edge(graph, edge) do
cond do
not node_in_graph?(graph, edge.src_node) -> {:error, "source node not in graph"}
not node_in_graph?(graph, edge.dest_node) -> {:error, "destination node not in graph"}
true -> {:ok, %{graph | edges: graph.edges ++ [edge]}}
end
end
defp node_in_graph?(graph, node) do
Map.has_key?(graph.nodes, node.alias)
end
end
|
lib/redis_graph/graph.ex
| 0.918968 | 0.907681 |
graph.ex
|
starcoder
|
defprotocol SimpleMarkdown.Renderer.HTML.AST do
@moduledoc """
A renderer protocol for HTML AST.
Individual rule renderers can be overriden or new ones may be
added. Rule types follow the format of structs defined under
`SimpleMarkdown.Attribute.*`. e.g. If there is a rule with the
name `:header`, to provide a rendering implementation for that
rule, you would specify `for: SimpleMarkdown.Attribute.Header`.
Rules then consist of a Map with an `input` field, and an optional
`option` field. See `t:SimpleMarkdown.attribute/0`.
HTML vs AST
-----------
The AST format (`t:SimpleMarkdown.Renderer.HTML.Utilities.ast/0`) provides a
more flexible general purpose way of structuring HTML. While HTML provides a
more cumbersome by explicit way of structuring the rendered HTML.
When there is no implementation for a certain rule it will fallback to the
`SimpleMarkdown.Renderer.HTML` renderer (if one exists) and will convert that
HTML to AST using `SimpleMarkdown.Renderer.HTML.Utilities.html_to_ast/2`. So
you only need to maintain one set of implementations to cover all HTML renderers.
Example
-------
defimpl SimpleMarkdown.Renderer.HTML.AST, for: SimpleMarkdown.Attribute.Header do
def render(%{ input: input, option: size }), do: { "h\#{size}", [], SimpleMarkdown.Renderer.HTML.AST.render(input) }
end
"""
@fallback_to_any true
@doc """
Render the parsed markdown as HTML AST.
"""
@spec render(Stream.t | [SimpleMarkdown.attribute | String.t] | SimpleMarkdown.attribute | String.t) :: SimpleMarkdown.Renderer.HTML.Utilities.ast
def render(ast)
end
defimpl SimpleMarkdown.Renderer.HTML.AST, for: Any do
def render(ast) do
case SimpleMarkdown.Renderer.HTML.impl_for(ast) do
SimpleMarkdown.Renderer.HTML.Any -> raise Protocol.UndefinedError, protocol: SimpleMarkdown.Renderer.HTML.AST, value: ast
_ -> SimpleMarkdown.Renderer.HTML.render(ast) |> SimpleMarkdown.Renderer.HTML.Utilities.html_to_ast
end
end
end
defimpl SimpleMarkdown.Renderer.HTML.AST, for: [List, Stream] do
def render(ast) do
Enum.map(ast, fn attribute ->
SimpleMarkdown.Renderer.HTML.AST.render(attribute)
end)
end
end
defimpl SimpleMarkdown.Renderer.HTML.AST, for: BitString do
def render(string), do: string
end
defimpl SimpleMarkdown.Renderer.HTML.AST, for: SimpleMarkdown.Attribute.LineBreak do
def render(_), do: { :br, [], [] }
end
defimpl SimpleMarkdown.Renderer.HTML.AST, for: SimpleMarkdown.Attribute.Header do
def render(%{ input: input, option: 1 }), do: { :h1, [], SimpleMarkdown.Renderer.HTML.AST.render(input) }
def render(%{ input: input, option: 2 }), do: { :h2, [], SimpleMarkdown.Renderer.HTML.AST.render(input) }
def render(%{ input: input, option: 3 }), do: { :h3, [], SimpleMarkdown.Renderer.HTML.AST.render(input) }
def render(%{ input: input, option: 4 }), do: { :h4, [], SimpleMarkdown.Renderer.HTML.AST.render(input) }
def render(%{ input: input, option: 5 }), do: { :h5, [], SimpleMarkdown.Renderer.HTML.AST.render(input) }
def render(%{ input: input, option: 6 }), do: { :h6, [], SimpleMarkdown.Renderer.HTML.AST.render(input) }
end
defimpl SimpleMarkdown.Renderer.HTML.AST, for: SimpleMarkdown.Attribute.Emphasis do
def render(%{ input: input, option: :regular }), do: { :em, [], SimpleMarkdown.Renderer.HTML.AST.render(input) }
def render(%{ input: input, option: :strong }), do: { :strong, [], SimpleMarkdown.Renderer.HTML.AST.render(input) }
end
defimpl SimpleMarkdown.Renderer.HTML.AST, for: SimpleMarkdown.Attribute.HorizontalRule do
def render(_), do: { :hr, [], [] }
end
defimpl SimpleMarkdown.Renderer.HTML.AST, for: SimpleMarkdown.Attribute.Table do
def render(%{ input: input, option: heading = [{_, _}|_] }) do
{ titles, aligns } = Enum.unzip(heading)
input = Enum.map(input, fn
%{ __struct__: SimpleMarkdown.Attribute.Row, input: elements } -> %{ __struct__: SimpleMarkdown.Attribute.Row, input: elements, option: aligns }
end)
{
:table,
[],
[
{ :thead, [], { :tr, [], Enum.map(titles, &({ :th, [], SimpleMarkdown.Renderer.HTML.AST.render(&1) })) } },
{ :tbody, [], SimpleMarkdown.Renderer.HTML.AST.render(input) }
]
}
end
def render(%{ input: input, option: aligns }) do
input = Enum.map(input, fn
%{ __struct__: SimpleMarkdown.Attribute.Row, input: elements } -> %{ __struct__: SimpleMarkdown.Attribute.Row, input: elements, option: aligns }
end)
{
:table,
[],
[
{ :tbody, [], SimpleMarkdown.Renderer.HTML.AST.render(input) }
]
}
end
end
defimpl SimpleMarkdown.Renderer.HTML.AST, for: SimpleMarkdown.Attribute.Row do
def render(%{ input: input, option: align }) do
{
:tr,
[],
Enum.zip(input, align)
|> Enum.map(fn
{ input, :default } -> { :td, [], SimpleMarkdown.Renderer.HTML.AST.render(input) }
{ input, align } -> { :td, [style: ["text-align: ", to_string(align), ";"]], SimpleMarkdown.Renderer.HTML.AST.render(input) }
end)
}
end
def render(%{ input: input }), do: { :tr, [], Enum.map(input, &({ :td, [], SimpleMarkdown.Renderer.HTML.AST.render(&1) })) }
end
defimpl SimpleMarkdown.Renderer.HTML.AST, for: SimpleMarkdown.Attribute.TaskList do
def render(%{ input: input }), do: { :ul, [], SimpleMarkdown.Renderer.HTML.AST.render(input) }
end
defimpl SimpleMarkdown.Renderer.HTML.AST, for: SimpleMarkdown.Attribute.Task do
def render(%{ input: input, option: :deselected }), do: { :li, [], [{ :input, [type: :checkbox, disabled: ""], [] }, SimpleMarkdown.Renderer.HTML.AST.render(input)] }
def render(%{ input: input, option: :selected }), do: { :li, [], [{ :input, [type: :checkbox, checked: "", disabled: ""], [] }, SimpleMarkdown.Renderer.HTML.AST.render(input)] }
end
defimpl SimpleMarkdown.Renderer.HTML.AST, for: SimpleMarkdown.Attribute.List do
def render(%{ input: input, option: :unordered }), do: { :ul, [], SimpleMarkdown.Renderer.HTML.AST.render(input) }
def render(%{ input: input, option: :ordered }), do: { :ol, [], SimpleMarkdown.Renderer.HTML.AST.render(input) }
end
defimpl SimpleMarkdown.Renderer.HTML.AST, for: SimpleMarkdown.Attribute.Item do
def render(%{ input: input }), do: { :li, [], SimpleMarkdown.Renderer.HTML.AST.render(input) }
end
defimpl SimpleMarkdown.Renderer.HTML.AST, for: SimpleMarkdown.Attribute.PreformattedCode do
def render(%{ input: input, option: syntax }) do
try do
module = SimpleMarkdown.child_module!(SimpleMarkdown.Attribute.PreformattedCode, syntax)
:ok = Protocol.assert_impl!(SimpleMarkdown.Renderer.HTML.AST, module)
module
rescue
ArgumentError ->
try do
module = SimpleMarkdown.child_module!(SimpleMarkdown.Attribute.PreformattedCode, syntax)
:ok = Protocol.assert_impl!(SimpleMarkdown.Renderer.HTML, module)
module
rescue
ArgumentError -> SimpleMarkdown.Renderer.HTML.AST.render(%{ __struct__: SimpleMarkdown.Attribute.PreformattedCode, input: input })
else
module -> SimpleMarkdown.Renderer.HTML.render(%{ __struct__: module, input: input }) |> SimpleMarkdown.Renderer.HTML.Utilities.html_to_ast
end
else
module -> SimpleMarkdown.Renderer.HTML.AST.render(%{ __struct__: module, input: input })
end
end
def render(%{ input: input }), do: { :pre, [], { :code, [], SimpleMarkdown.Renderer.HTML.AST.render(input) } }
end
defimpl SimpleMarkdown.Renderer.HTML.AST, for: SimpleMarkdown.Attribute.Paragraph do
def render(%{ input: input }), do: { :p, [], SimpleMarkdown.Renderer.HTML.AST.render(input) }
end
defimpl SimpleMarkdown.Renderer.HTML.AST, for: SimpleMarkdown.Attribute.Blockquote do
def render(%{ input: input }), do: { :blockquote, [], SimpleMarkdown.Renderer.HTML.AST.render(input) }
end
defimpl SimpleMarkdown.Renderer.HTML.AST, for: SimpleMarkdown.Attribute.Link do
def render(%{ input: input, option: url }), do: { :a, [href: url], SimpleMarkdown.Renderer.HTML.AST.render(input) }
end
defimpl SimpleMarkdown.Renderer.HTML.AST, for: SimpleMarkdown.Attribute.Image do
def render(%{ input: input, option: url }), do: { :img, [src: url, alt: SimpleMarkdown.Renderer.HTML.render(input)], [] }
end
defimpl SimpleMarkdown.Renderer.HTML.AST, for: SimpleMarkdown.Attribute.Code do
def render(%{ input: input }), do: { :code, [], SimpleMarkdown.Renderer.HTML.AST.render(input) }
end
|
lib/simple_markdown/Renderer/html/ast.ex
| 0.915034 | 0.440229 |
ast.ex
|
starcoder
|
defmodule Miss.String do
@moduledoc """
Functions to extend the Elixir `String` module.
"""
@doc ~S"""
Builds a string with the given values using IO lists.
In Erlang and Elixir concatenating binaries will copy the concatenated binaries into a new
binary. Every time you concatenate binaries (`<>`) or use interpolation (`#{}`) you are making
copies of those binaries.
To build a string, it is cheaper and more efficient to use IO lists to build the binary just
once instead of concatenating along the way.
See the [Elixir IO Data documentation](https://hexdocs.pm/elixir/IO.html#module-io-data) for
more information.
All elements in the list must be a binary or convertible to a binary, otherwise an error is
raised.
## Examples
iex> Miss.String.build(["akira", "hamasaki", "123", "pim", "2010-09-01", "99.99"])
"akirahamasaki123pim2010-09-0199.99"
iex> Miss.String.build([:akira, 'hamasaki', 123, [112, 105, 109], ~D[2010-09-01], 99.99])
"akirahamasaki123pim2010-09-0199.99"
"""
@spec build(list()) :: String.t()
def build(values) when is_list(values) do
values
|> Enum.map(&value_to_string/1)
|> IO.iodata_to_binary()
end
@spec value_to_string(term()) :: String.t()
defp value_to_string(value) when is_binary(value), do: value
defp value_to_string(value), do: String.Chars.to_string(value)
@doc """
Builds a string with the given two values using IO lists similar to `Miss.String.build/1`.
When both values are binary, `#{inspect(__MODULE__)}.build/2` is more efficient than
`Miss.String.build/1` because it avoids to check if each value is a binary.
## Examples
iex> Miss.String.build("akira", "hamasaki")
"akirahamasaki"
iex> Miss.String.build(:akira, 'hamasaki')
"akirahamasaki"
"""
@spec build(term(), term()) :: String.t()
def build(value1, value2)
when is_binary(value1) and
is_binary(value2),
do: IO.iodata_to_binary([value1, value2])
def build(value1, value2), do: build([value1, value2])
@doc """
Builds a string with the given three values using IO lists similar to `Miss.String.build/1`.
When all the values are binary, `#{inspect(__MODULE__)}.build/3` is more efficient than
`Miss.String.build/1` because it avoids to check if each value is a binary.
## Examples
iex> Miss.String.build("akira", "hamasaki", "123")
"akirahamasaki123"
iex> Miss.String.build(:akira, 'hamasaki', 123)
"akirahamasaki123"
"""
@spec build(term(), term(), term()) :: String.t()
def build(value1, value2, value3)
when is_binary(value1) and
is_binary(value2) and
is_binary(value3),
do: IO.iodata_to_binary([value1, value2, value3])
def build(value1, value2, value3), do: build([value1, value2, value3])
@doc """
Builds a string with the given four values using IO lists similar to `Miss.String.build/1`.
When all the values are binary, `#{inspect(__MODULE__)}.build/4` is more efficient than
`Miss.String.build/1` because it avoids to check if each value is a binary.
## Examples
iex> Miss.String.build("akira", "hamasaki", "123", "pim")
"akirahamasaki123pim"
iex> Miss.String.build(:akira, 'hamasaki', 123, [112, 105, 109])
"akirahamasaki123pim"
"""
@spec build(term(), term(), term(), term()) :: String.t()
def build(value1, value2, value3, value4)
when is_binary(value1) and
is_binary(value2) and
is_binary(value3) and
is_binary(value4),
do: IO.iodata_to_binary([value1, value2, value3, value4])
def build(value1, value2, value3, value4), do: build([value1, value2, value3, value4])
@doc """
Builds a string with the given five values using IO lists similar to `Miss.String.build/1`.
When all the values are binary, `#{inspect(__MODULE__)}.build/5` is more efficient than
`Miss.String.build/1` because it avoids to check if each value is a binary.
## Examples
iex> Miss.String.build("akira", "hamasaki", "123", "pim", "2010-09-01")
"akirahamasaki123pim2010-09-01"
iex> Miss.String.build(:akira, 'hamasaki', 123, [112, 105, 109], ~D[2010-09-01])
"akirahamasaki123pim2010-09-01"
"""
@spec build(term(), term(), term(), term(), term()) :: String.t()
def build(value1, value2, value3, value4, value5)
when is_binary(value1) and
is_binary(value2) and
is_binary(value3) and
is_binary(value4) and
is_binary(value5),
do: IO.iodata_to_binary([value1, value2, value3, value4, value5])
def build(value1, value2, value3, value4, value5),
do: build([value1, value2, value3, value4, value5])
end
|
lib/miss/string.ex
| 0.902451 | 0.714952 |
string.ex
|
starcoder
|
defmodule WordsWithEnemies.Letters do
@moduledoc """
Provides a set of functions for intelligently creating
sets of letters. Uses <NAME>'s letter frequencies
and <NAME>'s common pairs.
"""
import Enum, only: [random: 1, take_random: 2]
import WordsWithEnemies.WordFinder, only: [word_list: 0, using: 2]
@pairs [
{"t", "h"}, {"h", "e"}, {"a", "n"}, {"r", "e"}, {"o", "n"},
{"a", "t"}, {"n", "d"}, {"s", "t"}, {"n", "g"}, {"o", "f"},
{"t", "e"}, {"e", "d"}, {"o", "r"}, {"t", "i"}, {"a", "s"},
{"t", "o"}, {"i", "n"}, {"e", "n"}, {"h", "i"}
]
@high_freq ["e", "t", "a", "o", "i", "n", "s", "h", "r", "d"]
@med_freq ["l", "c", "u", "m", "w", "f", "g", "y", "p", "b"]
@low_freq ["v", "k", "j", "x", "q", "z"]
@alphabet Enum.sort(@high_freq ++ @med_freq ++ @low_freq)
@doc """
Generates a list of letters based on the difficulty.
"""
@spec generate_set(:player | :ai, String.t) :: list
def generate_set(:player, "easy") do
frequencies(%{high: 5, med: 4, low: 2, pairs: 2}) # 20
end
def generate_set(:player, "medium") do
frequencies(%{high: 5, med: 4, low: 4, pairs: 2}) # 15
end
def generate_set(:player, "hard") do
frequencies(%{high: 3, med: 3, low: 4, pairs: 1}) # 8
end
def generate_set(:ai, "easy") do
frequencies(%{high: 3, med: 3, low: 4, pairs: 1}) # 12
end
def generate_set(:ai, "medium") do
frequencies(%{high: 5, med: 4, low: 4, pairs: 1}) # 15
end
def generate_set(:ai, "hard") do
frequencies(%{high: 7, med: 6, low: 5, pairs: 2}) # 22
end
@spec frequencies(map) :: [String.t]
defp frequencies(%{high: h, med: m, low: l, pairs: p}) do
highs = @high_freq |> take_random(h)
meds = @med_freq |> take_random(m)
lows = @low_freq |> take_random(l)
pairs = @pairs |> take_random(p) |> get_pairs
Enum.shuffle(highs ++ meds ++ lows ++ pairs)
end
@spec get_pairs([{String.t , String.t}]) :: [String.t]
defp get_pairs(pairs) do
pairs
|> Enum.map(&Tuple.to_list/1)
|> List.flatten
end
@doc """
Appends a new letter to `letters`, based on the frequency
of letters currently in the list.
"""
@spec add_letter(list) :: String.t
def add_letter(letters) do
case prevailing_freq(letters) do
:high -> letters ++ random(@low_freq)
:med -> letters ++ random(@low_freq)
:low -> letters ++ random(@high_freq)
end
end
@spec prevailing_freq([String.t]) :: integer
defp prevailing_freq(letters) do
freqs = %{high: count_highs(letters),
med: count_meds(letters),
low: count_lows(letters)}
{freq, _count} = Enum.max(freqs)
freq
end
defp count_highs(letters) do
Enum.count(letters, &(&1 in @high_freq))
end
defp count_meds(letters) do
Enum.count(letters, &(&1 in @med_freq))
end
defp count_lows(letters) do
Enum.count(letters, &(&1 in @low_freq))
end
@doc """
Returns the most common item in `list`, or `nil` if
there isn't one.
"""
@spec most_common(list) :: any
def most_common([n]), do: n
def most_common(list) do
list
|> frequency_table
|> Enum.max_by(&elem(&1, 1))
|> do_most_common
end
defp do_most_common({_item, count}) when count <= 1, do: nil
defp do_most_common({item, _count}), do: item
@doc """
Constructs a map containing each item in `list`,
and the number of times each one appears.
"""
@spec frequency_table(String.t) :: map
@spec frequency_table(list) :: map
def frequency_table(string) when is_bitstring(string) do
string
|> String.codepoints
|> frequency_table
end
def frequency_table(list) when is_list(list) do
Enum.reduce(list, %{}, fn (item, freqs) ->
Map.update(freqs, item, 1, &(&1 + 1))
end)
end
end
|
lib/words_with_enemies/language/letters.ex
| 0.822403 | 0.691458 |
letters.ex
|
starcoder
|
defmodule Rajska.FieldAuthorization do
@moduledoc """
Absinthe middleware to ensure field permissions.
Authorizes Absinthe's object [field](https://hexdocs.pm/absinthe/Absinthe.Schema.Notation.html#field/4) according to the result of the `c:Rajska.Authorization.is_field_authorized?/3` function, which receives the user role, the meta `scope_by` atom defined in the object schema and the `source` object that is resolving the field.
## Usage
[Create your Authorization module and add it and FieldAuthorization to your Absinthe.Schema](https://hexdocs.pm/rajska/Rajska.html#module-usage). Then add the meta `scope_by` to an object and meta `private` to your sensitive fields:
```elixir
object :user do
meta :scope_by, :id
field :name, :string
field :is_email_public, :boolean
field :phone, :string, meta: [private: true]
field :email, :string, meta: [private: & !&1.is_email_public]
end
```
As seen in the example above, a function can also be passed as value to the meta `:private` key, in order to check if a field is private dynamically, depending of the value of another field.
"""
@behaviour Absinthe.Middleware
alias Absinthe.{
Resolution,
Type
}
def call(resolution, [object: %Type.Object{fields: fields} = object, field: field]) do
is_field_private? = fields[field] |> Type.meta(:private) |> is_field_private?(resolution.source)
scope_by = get_scope_by_field!(object, is_field_private?)
resolution
|> Map.get(:context)
|> authorized?(is_field_private?, scope_by, resolution.source)
|> put_result(resolution, field)
end
defp is_field_private?(true, _source), do: true
defp is_field_private?(private, source) when is_function(private), do: private.(source)
defp is_field_private?(_private, _source), do: false
defp get_scope_by_field!(_object, false), do: :ok
defp get_scope_by_field!(object, _private) do
case Type.meta(object, :scope_by) do
nil -> raise "No scope_by meta defined for object returned from query #{object.identifier}"
scope_by_field when is_atom(scope_by_field) -> scope_by_field
end
end
defp authorized?(_context, false, _scope_by, _source), do: true
defp authorized?(context, true, scope_by, source) do
case Rajska.apply_auth_mod(context, :is_super_user?, [context]) do
true -> true
false -> Rajska.apply_auth_mod(context, :is_context_field_authorized?, [context, scope_by, source])
end
end
defp put_result(true, resolution, _field), do: resolution
defp put_result(false, resolution, field) do
Resolution.put_result(resolution, {:error, "Not authorized to access field #{field}"})
end
end
|
lib/middlewares/field_authorization.ex
| 0.876463 | 0.860838 |
field_authorization.ex
|
starcoder
|
defmodule Square.CashDrawers do
@moduledoc """
Documentation for `Square.CashDrawers`.
"""
@doc """
Provides the details for all of the cash drawer shifts for a location
in a date range.
```
def list_cash_drawer_shifts(client, [
location_id: "", # required
sort_order: nil,
begin_time: nil,
end_time: nil,
limit: nil,
cursor: nil
])
```
### Parameters
| Parameter | Type | Tags | Description |
| --- | --- | --- | --- |
| `location_id` | `String` | Query, Required | The ID of the location to query for a list of cash drawer shifts. |
| `sort_order` | [`String (Sort Order)`](https://github.com/square/square-ruby-sdk/blob/master/doc/models/sort-order.md) | Query, Optional | The order in which cash drawer shifts are listed in the response,<br>based on their opened_at field. Default value: ASC |
| `begin_time` | `String` | Query, Optional | The inclusive start time of the query on opened_at, in ISO 8601 format. |
| `end_time` | `String` | Query, Optional | The exclusive end date of the query on opened_at, in ISO 8601 format. |
| `limit` | `Integer` | Query, Optional | Number of cash drawer shift events in a page of results (200 by<br>default, 1000 max). |
| `cursor` | `String` | Query, Optional | Opaque cursor for fetching the next page of results. |
### Response Type
[`List Cash Drawer Shifts Response Map`](https://github.com/square/square-ruby-sdk/blob/master/doc/models/list-cash-drawer-shifts-response.md)
### Example Usage
iex> location_id = "location_id"
iex> Square.client |> Square.CashDrawers.list_cash_drawer_shifts([location_id: "..."])
"""
@spec list_cash_drawers_shifts(Tesla.Client.t(), list) :: {:error, any} | {:ok, Tesla.Env.t()}
def list_cash_drawers_shifts(client, params \\ []),
do: Tesla.get(client, "cash-drawers/shifts", query: params)
@doc """
Provides the summary details for a single cash drawer shift. See
RetrieveCashDrawerShiftEvents for a list of cash drawer shift events.
```
def retrieve_cash_drawer_shift(client, shift_id, [
location_id: "" # required
])
```
### Parameters
| Parameter | Type | Tags | Description |
| --- | --- | --- | --- |
| `location_id` | `String` | Query, Required | The ID of the location to retrieve cash drawer shifts from. |
| `shift_id` | `String` | Template, Required | The shift ID. |
### Response Type
[`Retrieve Cash Drawer Shift Response Map`](https://github.com/square/square-ruby-sdk/blob/master/doc/doc/models/retrieve-cash-drawer-shift-response.md)
### Example Usage
iex> location_id = "location_id4"
iex> shift_id = "shift_id0"
iex> Square.client |> Square.CashDrawers.retrieve_cash_drawer_shift(shift_id, [location_id: "..."])
"""
@spec retreive_cash_drawer_shift(Tesla.Client.t(), binary, list) ::
{:error, any} | {:ok, Tesla.Env.t()}
def retreive_cash_drawer_shift(client, shift_id, params \\ []),
do: Tesla.get(client, "cash-drawers/shifts/#{shift_id}", query: params)
@doc """
Provides a paginated list of events for a single cash drawer shift.
```
def list_cash_drawer_shift_events(shift_id, [
location_id: "", # required
limit: nil,
cursor: nil
])
```
### Parameters
| Parameter | Type | Tags | Description |
| --- | --- | --- | --- |
| `location_id` | `String` | Query, Required | The ID of the location to list cash drawer shifts for. |
| `shift_id` | `String` | Template, Required | The shift ID. |
| `limit` | `Integer` | Query, Optional | Number of resources to be returned in a page of results (200 by<br>default, 1000 max). |
| `cursor` | `String` | Query, Optional | Opaque cursor for fetching the next page of results. |
### Response Type
[`List Cash Drawer Shift Events Response Map`](https://github.com/square/square-ruby-sdk/blob/master/doc/models/list-cash-drawer-shift-events-response.md)
### Example Usage
iex> location_id = "location_id4"
iex> shift_id = "shift_id0"
iex> Square.client |> Square.CashDrawers.list_cash_drawer_shift_events(client, shift_id, [location_id: ""])
"""
@spec list_cash_drawers_shift_events(Tesla.Client.t(), binary, list) ::
{:error, any} | {:ok, Tesla.Env.t()}
def list_cash_drawers_shift_events(client, shift_id, params \\ []),
do: Tesla.get(client, "cash-drawers/shifts/#{shift_id}/events", query: params)
end
|
lib/api/cash_drawers_api.ex
| 0.935006 | 0.822439 |
cash_drawers_api.ex
|
starcoder
|
defmodule Scope do
@moduledoc """
Scope is a small module that provides two macros to facilitate
function overload and local import/aliases execution.
## Overload functions
```
import Scope
overload [+: 2, -: 2], from: Kernel, with: Test
1 + 3 - 2 # gives [2, [1, 3]]
```
## Local importation
You can just import one or more module :
```
import Scope
x = local System do
user_home <> " !"
end
# Or multiple module
y = local Elixir.{System, Path} do
absname(user_home())
end
# Or specifics function from a module
z = local [user_home: 0, user_home!: 0] in System do
user_home <> " !"
end
```
You can also directly use an expression:
```
import Scope
a = local (overload [+: 2, -: 2], from: Kernel, with: Test) do
1 + 2 - 3
end
b = 1 + 2 - 3
# a == [3, [1, 2]]
# b == 0
c = local (import Test) do
a = 1 + 2
b = 1 - 2
a - b
end
```
"""
@doc false
def __using__(_opts) do
quote do: (import Scope)
end
@doc """
Import module `from` except the gived functions and
import module `with` with only the gived functions.
This is mainly useful for overloading operators used in `Kernel`.
(Arithmetics operators for example)
"""
defmacro overload(methods, from: a, with: b) do
quote do
import unquote(a), except: unquote(methods)
import unquote(b), only: unquote(methods)
end
end
@doc """
Generate the execution of a lambda, with the importation
of the gived module expression.
"""
defmacro local(module_expr, body)
defmacro local({:in, _, [kw, module]}, do: expr) do
quote do
local(
(import unquote(module), only: unquote(kw)),
do: unquote(expr)
)
end
end
defmacro local({:__aliases__, _, [_]} = module, do: expr) do
quote do
local(
(import unquote(module)),
do: unquote(expr))
end
end
defmacro local(module_expr, do: expr) do
quote do
fn() ->
unquote(module_expr)
unquote(expr)
end.()
end
end
end
|
lib/scope.ex
| 0.612773 | 0.905239 |
scope.ex
|
starcoder
|
defmodule Sebex.ElixirAnalyzer.Span do
@type t :: %__MODULE__{
start_line: non_neg_integer(),
start_column: non_neg_integer(),
end_line: non_neg_integer(),
end_column: non_neg_integer()
}
@derive Jason.Encoder
@enforce_keys [:start_line, :start_column, :end_line, :end_column]
defstruct @enforce_keys
@spec new(
start_line :: non_neg_integer(),
start_column :: non_neg_integer(),
end_line :: non_neg_integer(),
end_column :: non_neg_integer()
) :: t()
def new(
start_line,
start_column,
end_line,
end_column
) do
%__MODULE__{
start_line: start_line,
start_column: start_column,
end_line: end_line,
end_column: end_column
}
end
@spec zero :: t()
def zero() do
new(0, 0, 0, 0)
end
@spec set(span :: t(), kv :: Keyword.t()) :: t()
def set(span, kv), do: set(span, kv, :start)
@spec set(span :: t(), kv :: Keyword.t(), where :: :start | :end) :: t()
def set(span, kv, :start) do
%{span | start_line: Keyword.fetch!(kv, :line), start_column: Keyword.fetch!(kv, :column)}
end
def set(span, kv, :end) do
%{span | end_line: Keyword.fetch!(kv, :line), end_column: Keyword.fetch!(kv, :column)}
end
@spec literal(literal :: {:literal, Keyword.t(), [term]}) :: t()
def literal({:literal, meta, [str]}) when is_binary(str) do
if String.contains?(str, "\n") do
raise "finding spans for multiline strings is not implemented"
end
start_line = Keyword.fetch!(meta, :line)
start_column = Keyword.fetch!(meta, :column)
delimiter = Keyword.fetch!(meta, :delimiter)
end_column = start_column + String.length(str) + 2 * String.length(delimiter)
new(start_line, start_column, start_line, end_column)
end
def literal({:literal, meta, [atom]}) when is_atom(atom) do
start_line = Keyword.fetch!(meta, :line)
start_column = Keyword.fetch!(meta, :column)
end_column = start_column + String.length(Macro.to_string(atom))
new(start_line, start_column, start_line, end_column)
end
end
|
sebex_elixir_analyzer/lib/sebex_elixir_analyzer/span.ex
| 0.755817 | 0.413596 |
span.ex
|
starcoder
|
defmodule AWS.CostExplorer do
@moduledoc """
The Cost Explorer API enables you to programmatically query your cost and
usage data. You can query for aggregated data such as total monthly costs
or total daily usage. You can also query for granular data, such as the
number of daily write operations for Amazon DynamoDB database tables in
your production environment.
Service Endpoint
The Cost Explorer API provides the following endpoint:
<ul> <li> `https://ce.us-east-1.amazonaws.com`
</li> </ul> For information about costs associated with the Cost Explorer
API, see [AWS Cost Management
Pricing](http://aws.amazon.com/aws-cost-management/pricing/).
"""
@doc """
Creates a new Cost Category with the requested name and rules.
"""
def create_cost_category_definition(client, input, options \\ []) do
request(client, "CreateCostCategoryDefinition", input, options)
end
@doc """
Deletes a Cost Category. Expenses from this month going forward will no
longer be categorized with this Cost Category.
"""
def delete_cost_category_definition(client, input, options \\ []) do
request(client, "DeleteCostCategoryDefinition", input, options)
end
@doc """
Returns the name, ARN, rules, definition, and effective dates of a Cost
Category that's defined in the account.
You have the option to use `EffectiveOn` to return a Cost Category that is
active on a specific date. If there is no `EffectiveOn` specified, you’ll
see a Cost Category that is effective on the current date. If Cost Category
is still effective, `EffectiveEnd` is omitted in the response.
"""
def describe_cost_category_definition(client, input, options \\ []) do
request(client, "DescribeCostCategoryDefinition", input, options)
end
@doc """
Retrieves cost and usage metrics for your account. You can specify which
cost and usage-related metric, such as `BlendedCosts` or `UsageQuantity`,
that you want the request to return. You can also filter and group your
data by various dimensions, such as `SERVICE` or `AZ`, in a specific time
range. For a complete list of valid dimensions, see the
[GetDimensionValues](https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_GetDimensionValues.html)
operation. Master accounts in an organization in AWS Organizations have
access to all member accounts.
"""
def get_cost_and_usage(client, input, options \\ []) do
request(client, "GetCostAndUsage", input, options)
end
@doc """
Retrieves cost and usage metrics with resources for your account. You can
specify which cost and usage-related metric, such as `BlendedCosts` or
`UsageQuantity`, that you want the request to return. You can also filter
and group your data by various dimensions, such as `SERVICE` or `AZ`, in a
specific time range. For a complete list of valid dimensions, see the
[GetDimensionValues](https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_GetDimensionValues.html)
operation. Master accounts in an organization in AWS Organizations have
access to all member accounts. This API is currently available for the
Amazon Elastic Compute Cloud – Compute service only.
<note> This is an opt-in only feature. You can enable this feature from the
Cost Explorer Settings page. For information on how to access the Settings
page, see [Controlling Access for Cost
Explorer](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/ce-access.html)
in the *AWS Billing and Cost Management User Guide*.
</note>
"""
def get_cost_and_usage_with_resources(client, input, options \\ []) do
request(client, "GetCostAndUsageWithResources", input, options)
end
@doc """
Retrieves a forecast for how much Amazon Web Services predicts that you
will spend over the forecast time period that you select, based on your
past costs.
"""
def get_cost_forecast(client, input, options \\ []) do
request(client, "GetCostForecast", input, options)
end
@doc """
Retrieves all available filter values for a specified filter over a period
of time. You can search the dimension values for an arbitrary string.
"""
def get_dimension_values(client, input, options \\ []) do
request(client, "GetDimensionValues", input, options)
end
@doc """
Retrieves the reservation coverage for your account. This enables you to
see how much of your Amazon Elastic Compute Cloud, Amazon ElastiCache,
Amazon Relational Database Service, or Amazon Redshift usage is covered by
a reservation. An organization's master account can see the coverage of the
associated member accounts. This supports dimensions, Cost Categories, and
nested expressions. For any time period, you can filter data about
reservation usage by the following dimensions:
<ul> <li> AZ
</li> <li> CACHE_ENGINE
</li> <li> DATABASE_ENGINE
</li> <li> DEPLOYMENT_OPTION
</li> <li> INSTANCE_TYPE
</li> <li> LINKED_ACCOUNT
</li> <li> OPERATING_SYSTEM
</li> <li> PLATFORM
</li> <li> REGION
</li> <li> SERVICE
</li> <li> TAG
</li> <li> TENANCY
</li> </ul> To determine valid values for a dimension, use the
`GetDimensionValues` operation.
"""
def get_reservation_coverage(client, input, options \\ []) do
request(client, "GetReservationCoverage", input, options)
end
@doc """
Gets recommendations for which reservations to purchase. These
recommendations could help you reduce your costs. Reservations provide a
discounted hourly rate (up to 75%) compared to On-Demand pricing.
AWS generates your recommendations by identifying your On-Demand usage
during a specific time period and collecting your usage into categories
that are eligible for a reservation. After AWS has these categories, it
simulates every combination of reservations in each category of usage to
identify the best number of each type of RI to purchase to maximize your
estimated savings.
For example, AWS automatically aggregates your Amazon EC2 Linux, shared
tenancy, and c4 family usage in the US West (Oregon) Region and recommends
that you buy size-flexible regional reservations to apply to the c4 family
usage. AWS recommends the smallest size instance in an instance family.
This makes it easier to purchase a size-flexible RI. AWS also shows the
equal number of normalized units so that you can purchase any instance size
that you want. For this example, your RI recommendation would be for
`c4.large` because that is the smallest size instance in the c4 instance
family.
"""
def get_reservation_purchase_recommendation(client, input, options \\ []) do
request(client, "GetReservationPurchaseRecommendation", input, options)
end
@doc """
Retrieves the reservation utilization for your account. Master accounts in
an organization have access to member accounts. You can filter data by
dimensions in a time period. You can use `GetDimensionValues` to determine
the possible dimension values. Currently, you can group only by
`SUBSCRIPTION_ID`.
"""
def get_reservation_utilization(client, input, options \\ []) do
request(client, "GetReservationUtilization", input, options)
end
@doc """
Creates recommendations that help you save cost by identifying idle and
underutilized Amazon EC2 instances.
Recommendations are generated to either downsize or terminate instances,
along with providing savings detail and metrics. For details on calculation
and function, see [Optimizing Your Cost with Rightsizing
Recommendations](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/ce-rightsizing.html)
in the *AWS Billing and Cost Management User Guide*.
"""
def get_rightsizing_recommendation(client, input, options \\ []) do
request(client, "GetRightsizingRecommendation", input, options)
end
@doc """
Retrieves the Savings Plans covered for your account. This enables you to
see how much of your cost is covered by a Savings Plan. An organization’s
master account can see the coverage of the associated member accounts. This
supports dimensions, Cost Categories, and nested expressions. For any time
period, you can filter data for Savings Plans usage with the following
dimensions:
<ul> <li> `LINKED_ACCOUNT`
</li> <li> `REGION`
</li> <li> `SERVICE`
</li> <li> `INSTANCE_FAMILY`
</li> </ul> To determine valid values for a dimension, use the
`GetDimensionValues` operation.
"""
def get_savings_plans_coverage(client, input, options \\ []) do
request(client, "GetSavingsPlansCoverage", input, options)
end
@doc """
Retrieves your request parameters, Savings Plan Recommendations Summary and
Details.
"""
def get_savings_plans_purchase_recommendation(client, input, options \\ []) do
request(client, "GetSavingsPlansPurchaseRecommendation", input, options)
end
@doc """
Retrieves the Savings Plans utilization for your account across date ranges
with daily or monthly granularity. Master accounts in an organization have
access to member accounts. You can use `GetDimensionValues` in
`SAVINGS_PLANS` to determine the possible dimension values.
<note> You cannot group by any dimension values for
`GetSavingsPlansUtilization`.
</note>
"""
def get_savings_plans_utilization(client, input, options \\ []) do
request(client, "GetSavingsPlansUtilization", input, options)
end
@doc """
Retrieves attribute data along with aggregate utilization and savings data
for a given time period. This doesn't support granular or grouped data
(daily/monthly) in response. You can't retrieve data by dates in a single
response similar to `GetSavingsPlanUtilization`, but you have the option to
make multiple calls to `GetSavingsPlanUtilizationDetails` by providing
individual dates. You can use `GetDimensionValues` in `SAVINGS_PLANS` to
determine the possible dimension values.
<note> `GetSavingsPlanUtilizationDetails` internally groups data by
`SavingsPlansArn`.
</note>
"""
def get_savings_plans_utilization_details(client, input, options \\ []) do
request(client, "GetSavingsPlansUtilizationDetails", input, options)
end
@doc """
Queries for available tag keys and tag values for a specified period. You
can search the tag values for an arbitrary string.
"""
def get_tags(client, input, options \\ []) do
request(client, "GetTags", input, options)
end
@doc """
Retrieves a forecast for how much Amazon Web Services predicts that you
will use over the forecast time period that you select, based on your past
usage.
"""
def get_usage_forecast(client, input, options \\ []) do
request(client, "GetUsageForecast", input, options)
end
@doc """
Returns the name, ARN, `NumberOfRules` and effective dates of all Cost
Categories defined in the account. You have the option to use `EffectiveOn`
to return a list of Cost Categories that were active on a specific date. If
there is no `EffectiveOn` specified, you’ll see Cost Categories that are
effective on the current date. If Cost Category is still effective,
`EffectiveEnd` is omitted in the response. `ListCostCategoryDefinitions`
supports pagination. The request can have a `MaxResults` range up to 100.
"""
def list_cost_category_definitions(client, input, options \\ []) do
request(client, "ListCostCategoryDefinitions", input, options)
end
@doc """
Updates an existing Cost Category. Changes made to the Cost Category rules
will be used to categorize the current month’s expenses and future
expenses. This won’t change categorization for the previous months.
"""
def update_cost_category_definition(client, input, options \\ []) do
request(client, "UpdateCostCategoryDefinition", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, Poison.Parser.t() | nil, Poison.Response.t()}
| {:error, Poison.Parser.t()}
| {:error, HTTPoison.Error.t()}
defp request(client, action, input, options) do
client = %{client | service: "ce",
region: "us-east-1"}
host = build_host("ce", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AWSInsightsIndexService.#{action}"}
]
payload = Poison.Encoder.encode(input, %{})
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} ->
{:ok, nil, response}
{:ok, %HTTPoison.Response{status_code: 200, body: body} = response} ->
{:ok, Poison.Parser.parse!(body, %{}), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body, %{})
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{endpoint: endpoint}) do
"#{endpoint_prefix}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/cost_explorer.ex
| 0.928124 | 0.661684 |
cost_explorer.ex
|
starcoder
|
defmodule PaymentStream do
@moduledoc """
The PaymentStream module provides utilities for dealing with streams
of payments.
## Payment Streams
A *payment* is a tuple `{amount, date}` consisting of an amount (in
whatever currency) and a date. The amount can be positive or
negative.
For example, `{-2000, ~D[2015-01-01]}` represents an amount of -2000
transferred at Jan 01, 2015.
A *payment stream* is a list of payments.
## Relative Payment Streams
Let `[{a_1, t_1}, ..., {a_n, t_n}]` be a payment stream and let
`{a_f, t_f}` be the earliest payment in this stream. A *relative
payment stream* is a list `[{a_1, r_1}, ..., {a_n, r_n}]` where
`r_k` is the difference of `t_k` and `t_f` "expressed in years".
More precisely, `r_k` is computed as follows: Let `t_f` be the
`d_f`th day in a year `y_f` and let `t_k` be the `d_k`th day in year
`y_k`. (Days are indexed starting at `0`. Jan 01 is day `0`.) Let
`D(y)` denote the number of days in a year `y`. For a leap year `y`,
`D(y)` is 366. Otherwise, `D(y)` is 365. Then
```
r_k = (y_k - y_f) + (d_k / D(y_k) - d_f / D(y_f)).
```
## The Net Present Value Function
A relative payment stream `[{a_1, r_1}, ..., {a_n, r_n}]` gives rise
to the definition of the net present value function
```
npv(x) = a_1 * (1 + x)^(-r_1) + ... + a_n * (1 + x)^(-r_n)
```
of single real variable `x`. The internal interest rate of the
original payment stream is the root of the `npv` function.
In general, there is no closed formula for the computation of the
roots of `npv`. However, given a "reasonable" start value, Newton's
method converges very fast to the wanted root.
Newton's method requires the computation of the derivative `npv'` of
`npv`. Fortunately, `npv'` can be easily written in a closed form:
```
npv' = a_1 * (-r_1) * (1 + x)^(-r_1 - 1) + ... + a_n * (-r_n) * (1 + x)^(-r_n - 1)
```
"""
@doc ~S"""
Finds the earliest payment in a payment stream
## Examples
```
iex> PaymentStream.earliest_payment([{-1000, ~D[2021-01-01]}, {1000, ~D[2020-01-01]}])
{1000, ~D[2020-01-01]}
```
"""
def earliest_payment(payment_stream) do
Enum.sort_by(payment_stream, fn {_amount, date} -> date end) |> Enum.at(0)
end
@doc ~S"""
Converts a payment stream to a relative payment stream
## Examples
```
iex> PaymentStream.to_relative_payment_stream([{1000, ~D[2020-01-01]}, {1000, ~D[2021-01-01]}])
[{1000, 0.0}, {1000, 1.0}]
```
"""
def to_relative_payment_stream(payment_stream) do
t_f = earliest_payment(payment_stream)
Enum.map(payment_stream, fn payment -> to_relative_payment(t_f, payment) end)
end
defp to_relative_payment({_a_f, t_f}, {a_k, t_k}) do
{a_k, t_k.year - t_f.year + relative_day_in_year(t_k) - relative_day_in_year(t_f)}
end
defp relative_day_in_year(t) do
d = day_in_year(t)
if Date.leap_year?(t) do
d / 366
else
d / 365
end
end
defp day_in_year(t) do
{:ok, jan01} = Date.new(t.year, 1, 1)
Date.diff(t, jan01)
end
@doc ~S"""
Computes the net present value function `npv`of a relative payment
stream
## Examples
Let `[{1000, ~D[2021-01-01]}, {-1000, ~D[2022-01-01]}]` be a very
simple payment stream. Since the amount payed on Jan 01, 2021 is the
negative of the amount received one year later on Jan 01, 2022, the
internal interest rate for this payment stream should be `0`.
The relative payment stream corresponding to the payment stream
above is `[{1000, 0.0}, {-1000, 1.0}]` and then the corresponding
`npv` function is
```
npv(x) = 1000 * (1 + x)^0.0 + (-1000) * (1 + x)^(-1.0)
```
so that
```
npv(0) = 1000 * (1 + 0)^0.0 + (-1000) * (1 + 0)^(-1.0)
= 1000 * 1 - 1000 * 1
= 0
```
```
iex> [{1000, ~D[2021-01-01]}, {-1000, ~D[2022-01-01]}]
...> |> PaymentStream.to_relative_payment_stream()
...> |> PaymentStream.net_present_value()
...> |> apply([0.0])
0.0
```
"""
def net_present_value(relative_payment_stream) do
fn x ->
Enum.reduce(
relative_payment_stream,
0,
fn {a, r}, sum -> sum + a * :math.pow(1 + x, -r) end
)
end
end
@doc ~S"""
Computes the derivative `npv'` of the net present value function of
a relative payment stream
## Examples
Let `[{1000, 0.0}, {-1000, 1.0}]` be very simple realtive payment
stream with a corrsponding net present value function
```
npv(x) = 1000 * (1 + x)^0.0 + (-1000) * (1 + x)^(-1.0)
```
Then the derivative of `npv` is
```
npv'(x) = 0.0 * 1000 * (1 + x)^(-1.0) + (-1.0) * (-1000) * (1 + x)^-(2.0)
= 1000 * (1 + x)^(-2.0)
```
```
iex> [{1000, 0.0}, {-1000, 1.0}]
...> |> PaymentStream.net_present_value_derivative()
...> |> apply([0.0])
1000.0
```
"""
def net_present_value_derivative(relative_payment_stream) do
fn x ->
Enum.reduce(
relative_payment_stream,
0,
fn {a, r}, sum -> sum + a * -r * :math.pow(1 + x, -r - 1) end
)
end
end
end
|
lib/payment_stream.ex
| 0.924398 | 0.965381 |
payment_stream.ex
|
starcoder
|
defmodule Mix.Utils do
@moduledoc """
Utilities used throughout Mix and tasks.
## Conversions
This module handles two types of conversions:
* From command names to module names, i.e. how the command
`deps.get` translates to `Deps.Get` and vice-versa;
* From underscore to CamelCase, i.e. how the file path
`my_project` translates to `MyProject`;
"""
@doc """
Gets the mix home. It defaults to `~/.mix` unless the
`MIX_HOME` environment variable is set.
"""
def mix_home do
System.get_env("MIX_HOME") || Path.expand("~/.mix")
end
@doc """
Gets all extra paths defined in the environment variable
`MIX_PATH`. `MIX_PATH` may contain multiple paths. If on Windows,
those paths should be separated by `;`, if on unix systems,
use `:`.
"""
def mix_paths do
if path = System.get_env("MIX_PATH") do
String.split(path, path_separator)
else
[]
end
end
defp path_separator do
case :os.type do
{ :win32, _ } -> ";"
{ :unix, _ } -> ":"
end
end
@doc """
Takes a `command` name and attempts to load a module
with the command name converted to a module name
in the given `at` scope.
Returns `{ :module, module }` in case a module
exists and is loaded, `{ :error, reason }` otherwise.
## Examples
Mix.Utils.command_to_module("compile", Mix.Tasks)
#=> { :module, Mix.Tasks.Compile }
"""
def command_to_module(command, at // Elixir) do
module = Module.concat(at, command_to_module_name(command))
Code.ensure_loaded(module)
end
@doc """
Returns `true` if any of the `sources` are stale
compared to the given `targets`.
"""
def stale?(sources, targets) do
Enum.any? stale_stream(sources, targets)
end
@doc """
Extract all stale `sources` compared to the given `targets`.
"""
def extract_stale(_sources, []), do: []
def extract_stale(sources, targets) do
stale_stream(sources, targets) |> Enum.to_list
end
defp stale_stream(sources, targets) do
modified_target = targets |> Enum.map(&last_modified(&1)) |> Enum.min
Stream.filter(sources, fn(source) ->
source_mtime(source) > modified_target
end)
end
defp source_mtime({ _, { { _, _, _ }, { _, _, _ } } = source }) do
source
end
defp source_mtime(source) do
last_modified(source)
end
defp last_modified(path) do
case File.stat(path) do
{ :ok, File.Stat[mtime: mtime] } -> mtime
{ :error, _ } -> { { 1970, 1, 1 }, { 0, 0, 0 } }
end
end
@doc %S"""
Reads the given file as a manifest and returns each entry
as a list.
A manifest is a tabular file where each line is a row
and each entry in a row is separated by "\t". The first
entry must always be a path to a compiled artifact.
In case there is no manifest file, returns an empty list.
"""
def read_manifest(file) do
case File.read(file) do
{ :ok, contents } -> String.split(contents, "\n")
{ :error, _ } -> []
end
end
@doc """
Writes a manifest file with the given `entries` list.
"""
def write_manifest(file, entries) do
Path.dirname(file) |> File.mkdir_p!
File.write!(file, Enum.join(entries, "\n"))
end
@doc """
Extract files from a list of paths.
If any of the paths is a directory, the directory is looped
recursively searching for the given extensions or the given pattern.
When looking up directories, files starting with "." are ignored.
"""
def extract_files(paths, exts_or_pattern)
def extract_files(paths, exts) when is_list(exts) do
extract_files(paths, "*.{#{Enum.join(exts, ",")}}")
end
def extract_files(paths, pattern) do
files = Enum.concat(lc path inlist paths do
if File.regular?(path), do: [path], else: Path.wildcard("#{path}/**/#{pattern}")
end)
files |> exclude_files |> Enum.uniq
end
defp exclude_files(files) do
filter = fn(x) -> not match?("." <> _, Path.basename(x)) end
Enum.filter files, filter
end
@doc """
Merges two configs recursively, merging keyword lists
and concatenating normal lists.
"""
def config_merge(old, new) do
Keyword.merge(old, new, fn(_, x, y) ->
if is_list(x) and is_list(y) do
if Keyword.keyword?(x) and Keyword.keyword?(y) do
config_merge(x, y)
else
x ++ y
end
else
y
end
end)
end
@doc """
Converts the given atom or binary to underscore format.
If an atom is given, it is assumed to be an Elixir module,
so it is converted to a binary and then processed.
## Examples
Mix.Utils.underscore "FooBar" #=> "foo_bar"
Mix.Utils.underscore "Foo.Bar" #=> "foo/bar"
Mix.Utils.underscore Foo.Bar #=> "foo/bar"
In general, `underscore` can be thought of as the reverse of
`camelize`, however, in some cases formatting may be lost:
Mix.Utils.underscore "SAPExample" #=> "sap_example"
Mix.Utils.camelize "sap_example" #=> "SapExample"
"""
def underscore(atom) when is_atom(atom) do
"Elixir." <> rest = atom_to_binary(atom)
underscore(rest)
end
def underscore(<<h, t :: binary>>) do
<<to_lower_char(h)>> <> do_underscore(t, h)
end
defp do_underscore(<<h, t, rest :: binary>>, _) when h in ?A..?Z and not t in ?A..?Z do
<<?_, to_lower_char(h), t>> <> do_underscore(rest, t)
end
defp do_underscore(<<h, t :: binary>>, prev) when h in ?A..?Z and not prev in ?A..?Z do
<<?_, to_lower_char(h)>> <> do_underscore(t, h)
end
defp do_underscore(<<?-, t :: binary>>, _) do
<<?_>> <> do_underscore(t, ?-)
end
defp do_underscore(<<?., t :: binary>>, _) do
<<?/>> <> underscore(t)
end
defp do_underscore(<<h, t :: binary>>, _) do
<<to_lower_char(h)>> <> do_underscore(t, h)
end
defp do_underscore(<<>>, _) do
<<>>
end
@doc """
Converts the given string to CamelCase format.
## Examples
Mix.Utils.camelize "foo_bar" #=> "FooBar"
"""
def camelize(<<?_, t :: binary>>) do
camelize(t)
end
def camelize(<<h, t :: binary>>) do
<<to_upper_char(h)>> <> do_camelize(t)
end
defp do_camelize(<<?_, ?_, t :: binary>>) do
do_camelize(<< ?_, t :: binary >>)
end
defp do_camelize(<<?_, h, t :: binary>>) when h in ?a..?z do
<<to_upper_char(h)>> <> do_camelize(t)
end
defp do_camelize(<<?_>>) do
<<>>
end
defp do_camelize(<<?/, t :: binary>>) do
<<?.>> <> camelize(t)
end
defp do_camelize(<<h, t :: binary>>) do
<<h>> <> do_camelize(t)
end
defp do_camelize(<<>>) do
<<>>
end
@doc """
Takes a module and converts it to a command. The nesting
argument can be given in order to remove the nesting of a
module.
## Examples
module_name_to_command(Mix.Tasks.Compile, 2)
#=> "compile"
module_name_to_command("Mix.Tasks.Compile.Elixir", 2)
#=> "compile.elixir"
"""
def module_name_to_command(module, nesting // 0)
def module_name_to_command(module, nesting) when is_atom(module) do
module_name_to_command(inspect(module), nesting)
end
def module_name_to_command(module, nesting) do
t = Regex.split(%r/\./, to_string(module))
t |> Enum.drop(nesting) |> Enum.map(&first_to_lower(&1)) |> Enum.join(".")
end
@doc """
Takes a command and converts it to the module name format.
## Examples
command_to_module_name("compile.elixir")
#=> "Compile.Elixir"
"""
def command_to_module_name(s) do
Regex.split(%r/\./, to_string(s)) |>
Enum.map(&first_to_upper(&1)) |>
Enum.join(".")
end
defp first_to_upper(<<s, t :: binary>>), do: <<to_upper_char(s)>> <> t
defp first_to_upper(<<>>), do: <<>>
defp first_to_lower(<<s, t :: binary>>), do: <<to_lower_char(s)>> <> t
defp first_to_lower(<<>>), do: <<>>
defp to_upper_char(char) when char in ?a..?z, do: char - 32
defp to_upper_char(char), do: char
defp to_lower_char(char) when char in ?A..?Z, do: char + 32
defp to_lower_char(char), do: char
@doc """
Symlink directory `source` to `target` or copy it recursively
in case symlink fails. Expect source and target to be absolute
paths as it generates a relative symlink.
"""
def symlink_or_copy(source, target) do
if File.exists?(source) do
source_list = String.to_char_list!(source)
case :file.read_link(target) do
{ :ok, ^source_list } ->
:ok
{ :ok, _ } ->
File.rm!(target)
do_symlink_or_copy(source, target)
{ :error, :enoent } ->
do_symlink_or_copy(source, target)
{ :error, _ } ->
File.rm_rf!(target)
do_symlink_or_copy(source, target)
end
else
{ :error, :enoent }
end
end
defp do_symlink_or_copy(source, target) do
symlink_source = make_relative_path(source, target)
case :file.make_symlink(symlink_source, target) do
:ok -> :ok
{ :error, _ } -> File.cp_r!(source, Path.dirname(target))
end
end
# Make a relative path in between two paths.
# Expects both paths to be fully expanded.
defp make_relative_path(source, target) do
do_make_relative_path(Path.split(source), Path.split(target))
end
defp do_make_relative_path([h|t1], [h|t2]) do
do_make_relative_path(t1, t2)
end
defp do_make_relative_path(source, target) do
base = List.duplicate("..", max(length(target) - 1, 0))
Path.join(base ++ source)
end
@doc """
Opens and reads content from either a URL or a local filesystem path.
Used by tasks like `local.install` and `local.rebar` that support
installation either from a URL or a local file.
Raises if the given path is not a url, nor a file or if the
file or url are invalid.
"""
def read_path!(path) do
cond do
url?(path) -> read_url(path)
file?(path) -> read_file(path)
:else -> raise Mix.Error, message: "Expected #{path} to be a url or a local file path"
end
end
defp read_file(path) do
File.read!(path)
end
defp read_url(path) do
if URI.parse(path).scheme == "https" do
:ssl.start
end
:inets.start
case :httpc.request(:binary.bin_to_list(path)) do
{ :ok, { { _, status, _ }, _, body } } when status in 200..299 ->
:binary.list_to_bin(body)
{ :ok, { { _, status, _ }, _, _ } } ->
raise Mix.Error, message: "Could not access url #{path}, got status: #{status}"
{ :error, reason } ->
raise Mix.Error, message: "Could not access url #{path}, error: #{inspect reason}"
end
end
defp file?(path) do
File.regular?(path)
end
defp url?(path) do
URI.parse(path).scheme in ["http", "https"]
end
end
|
lib/mix/lib/mix/utils.ex
| 0.85166 | 0.628692 |
utils.ex
|
starcoder
|
defmodule Kino.JS.Live do
@moduledoc ~S'''
Introduces state and event-driven capabilities to JavaScript
powered widgets.
Make sure to read the introduction to JavaScript widgets in
`Kino.JS` for more context.
Similarly to static widgets, live widgets involve a custom
JavaScript code running in the browser. In fact, this part
of the API is the same. In addition, each live widget has
a server process running on the Elixir side, responsible for
maintaining state and able to communicate with the JavaScript
side at any time. Again, to illustrate the ideas we start
with a minimal example.
## Example
We will follow up on our `Kino.HTML` example by adding support
for replacing the content on demand.
defmodule Kino.LiveHTML do
use Kino.JS
use Kino.JS.Live
def new(html) do
Kino.JS.Live.new(__MODULE__, html)
end
def replace(widget, html) do
Kino.JS.Live.cast(widget, {:replace, html})
end
@impl true
def init(html, ctx) do
{:ok, assign(ctx, html: html)}
end
@impl true
def handle_connect(ctx) do
{:ok, ctx.assigns.html, ctx}
end
@impl true
def handle_cast({:replace, html}, ctx) do
broadcast_event(ctx, "replace", html)
{:noreply, assign(ctx, html: html)}
end
asset "main.js" do
"""
export function init(ctx, html) {
ctx.root.innerHTML = html;
ctx.handleEvent("replace", (html) => {
ctx.root.innerHTML = html;
});
}
"""
end
end
Just as before we define a module, this time calling it
`Kino.LiveHTML` for clarity. Note many similarities to the
previous version, we still call `use Kino.JS`, define the
`main.js` file and define the `new(html)` function for
building the widget. As a matter of fact, the initial result
of `Kino.LiveHTML.new(html)` will render exactly the same
as our previous `Kino.HTMl.new(html)`.
As for the new bits, we added `use Kino.JS.Live` to define
a live widget server. We use `Kino.JS.Live.new/2` for creating
the widget instance and we implement a few `GenServer`-like
callbacks.
Once the widget server is started with `Kino.JS.Live.new/2`,
the `c:init/2` callback is called with the initial argument.
In this case we store the given `html` in server state.
Whenever the widget is rendered on a new client, the `c:handle_connect/1`
callback is called and it builds the initial data for the
client. In this case, we always return the stored `html`.
This initial data is then passed to the JavaScript `init`
function. Keep in mind that while the server is initialized
once, connect may happen at any point, as the users join/refresh
the page.
Finally, the whole point of our example is the ability to
replace the HTML content directly from the Elixir side and
for this purpose we added the public `replace(widget, html)`
function. Underneath the function uses `cast/2` to message
our server and the message is handled with `c:handle_cast/2`.
In this case we store the new `html` in the server state and
broadcast an event with the new value. On the client side,
we subscribe to those events with `ctx.handleEvent(event, callback)`
to update the page accordingly.
## Event handlers
You must eventually register JavaScript handlers for all events
that the client may receive. However, the registration can be
deferred, if the initialization is asynchronous. For example,
the following is perfectly fine:
```js
export function init(ctx, data) {
fetch(data.someUrl).then((resp) => {
ctx.handleEvent("update", (payload) => {
// ...
});
});
}
```
Or alternatively:
```js
export async function init(ctx, data) {
const response = await fetch(data.someUrl);
ctx.handleEvent("update", (payload) => {
// ...
});
}
```
In such case all incoming events are buffered and dispatched once
the handler is registered.
## Binary payloads
The client-server communication supports binary data, both on
initialization and on custom events. On the server side, a binary
payload has the form of `{:binary, info, binary}`, where `info`
is regular JSON-serializable data that can be sent alongside
the plain binary.
On the client side, a binary payload is represented as `[info, buffer]`,
where `info` is the additional data and `buffer` is the binary
as `ArrayBuffer`.
The following example showcases how to send and receive events
with binary payloads.
defmodule Kino.Binary do
use Kino.JS
use Kino.JS.Live
def new() do
Kino.JS.Live.new(__MODULE__, nil)
end
@impl true
def handle_connect(ctx) do
payload = {:binary, %{message: "hello"}, <<1, 2>>}
{:ok, payload, ctx}
end
@impl true
def handle_event("ping", {:binary, _info, binary}, ctx) do
reply_payload = {:binary, %{message: "pong"}, <<1, 2, binary::binary>>}
broadcast_event(ctx, "pong", reply_payload)
{:noreply, ctx}
end
asset "main.js" do
"""
export function init(ctx, payload) {
console.log("initial data", payload);
ctx.handleEvent("pong", ([info, buffer]) => {
console.log("event data", [info, buffer])
});
const buffer = new ArrayBuffer(2);
const bytes = new Uint8Array(buffer);
bytes[0] = 4;
bytes[1] = 250;
ctx.pushEvent("ping", [{ message: "ping" }, buffer]);
}
"""
end
end
'''
defstruct [:module, :pid, :ref]
alias Kino.JS.Live.Context
@opaque t :: %__MODULE__{module: module(), pid: pid(), ref: Kino.Output.ref()}
@type payload :: term() | {:binary, info :: term(), binary()}
@doc """
Invoked when the server is started.
See `c:GenServer.init/1` for more details.
"""
@callback init(arg :: term(), ctx :: Context.t()) :: {:ok, ctx :: Context.t()}
@doc """
Invoked whenever a new client connects to the server.
The returned data is passed to the JavaScript `init` function
of the connecting client.
"""
@callback handle_connect(ctx :: Context.t()) :: {:ok, payload(), ctx :: Context.t()}
@doc """
Invoked to handle client events.
"""
@callback handle_event(event :: String.t(), payload(), ctx :: Context.t()) ::
{:noreply, ctx :: Context.t()}
@doc """
Invoked to handle asynchronous `cast/2` messages.
See `c:GenServer.handle_cast/2` for more details.
"""
@callback handle_cast(msg :: term(), ctx :: Context.t()) :: {:noreply, ctx :: Context.t()}
@doc """
Invoked to handle synchronous `call/3` messages.
See `c:GenServer.handle_call/3` for more details.
"""
@callback handle_call(msg :: term(), from :: term(), ctx :: Context.t()) ::
{:noreply, ctx :: Context.t()} | {:reply, term(), ctx :: Context.t()}
@doc """
Invoked to handle all other messages.
See `c:GenServer.handle_info/2` for more details.
"""
@callback handle_info(msg :: term(), ctx :: Context.t()) :: {:noreply, ctx :: Context.t()}
@doc """
Invoked when the server is about to exit.
See `c:GenServer.terminate/2` for more details.
"""
@callback terminate(reason, ctx :: Context.t()) :: term()
when reason: :normal | :shutdown | {:shutdown, term} | term
@optional_callbacks init: 2,
handle_event: 3,
handle_call: 3,
handle_cast: 2,
handle_info: 2,
terminate: 2
defmacro __using__(_opts) do
quote location: :keep do
@behaviour Kino.JS.Live
import Kino.JS.Live.Context, only: [assign: 2, update: 3, broadcast_event: 3]
@before_compile Kino.JS.Live
end
end
def __before_compile__(env) do
unless Module.defines?(env.module, {:__assets_info__, 0}) do
message = """
make sure to include Kino.JS in #{inspect(env.module)} and define the necessary assets.
use Kino.JS
See Kino.JS for more details.
"""
IO.warn(message, Macro.Env.stacktrace(env))
end
nil
end
@doc """
Instantiates a live JavaScript widget defined by `module`.
The given `init_arg` is passed to the `init/2` callback when
the underlying widget process is started.
"""
@spec new(module(), term()) :: t()
def new(module, init_arg) do
ref = Kino.Output.random_ref()
{:ok, pid} = Kino.start_child({Kino.JS.Live.Server, {module, init_arg, ref}})
%__MODULE__{module: module, pid: pid, ref: ref}
end
@doc false
@spec js_info(t()) :: Kino.Output.js_info()
def js_info(%__MODULE__{} = widget) do
%{
js_view: %{
ref: widget.ref,
pid: widget.pid,
assets: widget.module.__assets_info__()
},
export: nil
}
end
@doc """
Sends an asynchronous request to the widget server.
See `GenServer.cast/2` for more details.
"""
@spec cast(t(), term()) :: :ok
def cast(widget, term) do
Kino.JS.Live.Server.cast(widget.pid, term)
end
@doc """
Makes a synchronous call to the widget server and waits
for its reply.
See `GenServer.call/3` for more details.
"""
@spec call(t(), term(), timeout()) :: term()
def call(widget, term, timeout \\ 5_000) do
Kino.JS.Live.Server.call(widget.pid, term, timeout)
end
end
|
lib/kino/js/live.ex
| 0.876158 | 0.837188 |
live.ex
|
starcoder
|
defmodule Verk do
@moduledoc """
Verk is a job processing system that integrates well with Sidekiq jobs
Each queue will have a pool of workers handled by `poolboy` that will process jobs.
Verk has a retry mechanism similar to Sidekiq that keeps retrying the jobs with a reasonable backoff.
It has an API that provides information about the queues
"""
alias Verk.{Job, Time, Manager}
@schedule_key "schedule"
def worker_assigns do
Application.get_env(:verk, :worker_assigns, [default: [node()], priority: [node()]])
end
def worker_executable?(queue_name) do
node() in Keyword.get(worker_assigns(), queue_name, [])
end
@doc """
Add a new `queue` with a pool of size `size` of workers
"""
@spec add_queue(atom, pos_integer) :: Supervisor.on_start_child
def add_queue(queue, size \\ 25) when is_atom(queue) and size > 0 do
Manager.add(queue, size)
end
@doc """
Remove `queue` from the list of queues that are being processed
"""
@spec remove_queue(atom) :: :ok | {:error, :not_found}
def remove_queue(queue) when is_atom(queue) do
Manager.remove(queue)
end
defdelegate pause_queue(queue), to: Verk.Manager, as: :pause
defdelegate resume_queue(queue), to: Verk.Manager, as: :resume
@doc """
Enqueues a Job to the specified queue returning the respective job id
The job must have:
* a valid `queue`
* a list of `args` to perform
* a module to perform (`class`)
* a valid `jid`
Optionally a Redix server can be passed which defaults to `Verk.Redis`
"""
@spec enqueue(%Job{}, GenServer.server) :: {:ok, binary} | {:error, term}
def enqueue(job, redis \\ Verk.Redis)
def enqueue(job = %Job{queue: nil}, _redis), do: {:error, {:missing_queue, job}}
def enqueue(job = %Job{class: nil}, _redis), do: {:error, {:missing_module, job}}
def enqueue(job = %Job{args: args}, _redis) when not is_list(args), do: {:error, {:missing_args, job}}
def enqueue(job = %Job{max_retry_count: nil}, redis) do
job = %Job{job | max_retry_count: Job.default_max_retry_count()}
enqueue(job, redis)
end
def enqueue(job = %Job{max_retry_count: count}, _redis) when not is_integer(count), do: {:error, {:invalid_max_retry_count, job}}
def enqueue(job = %Job{jid: nil}, redis), do: enqueue(%Job{job | jid: generate_jid()}, redis)
def enqueue(job = %Job{jid: jid, queue: queue}, redis) do
job = %Job{job | enqueued_at: Time.now |> DateTime.to_unix}
case Redix.command(redis, ["LPUSH", "queue:#{queue}", Job.encode!(job)]) do
{:ok, _} -> {:ok, jid}
{:error, reason} -> {:error, reason}
end
end
@doc """
Schedules a Job to the specified queue returning the respective job id
The job must have:
* a valid `queue`
* a list of `args` to perform
* a module to perform (`class`)
* a valid `jid`
Optionally a Redix server can be passed which defaults to `Verk.Redis`
"""
@spec schedule(%Job{}, %DateTime{}, GenServer.server) :: {:ok, binary} | {:error, term}
def schedule(job, datetime, redis \\ Verk.Redis)
def schedule(job = %Job{queue: nil}, %DateTime{}, _redis), do: {:error, {:missing_queue, job}}
def schedule(job = %Job{class: nil}, %DateTime{}, _redis), do: {:error, {:missing_module, job}}
def schedule(job = %Job{args: args}, %DateTime{}, _redis) when not is_list(args), do: {:error, {:missing_args, job}}
def schedule(job = %Job{jid: nil}, perform_at = %DateTime{}, redis) do
schedule(%Job{job | jid: generate_jid()}, perform_at, redis)
end
def schedule(job = %Job{jid: jid}, perform_at = %DateTime{}, redis) do
if Time.after?(Time.now, perform_at) do
#past time to do the job
enqueue(job, redis)
else
case Redix.command(redis, ["ZADD", @schedule_key, DateTime.to_unix(perform_at), Job.encode!(job)]) do
{:ok, _} -> {:ok, jid}
{:error, reason} -> {:error, reason}
end
end
end
defp generate_jid do
<<part1::32, part2::32>> = :crypto.strong_rand_bytes(8)
"#{part1}#{part2}"
end
end
|
lib/verk.ex
| 0.817429 | 0.559651 |
verk.ex
|
starcoder
|
defmodule AbsintheErrorPayload.Payload do
@moduledoc """
Absinthe Middleware to build a mutation payload response.
AbsintheErrorPayload mutation responses (aka "payloads") have three fields
- `successful` - Indicates if the mutation completed successfully or not. Boolean.
- `messages` - a list of validation errors. Always empty on success
- `result` - the data object that was created/updated/deleted on success. Always nil when unsuccesful
## Usage
In your schema file
1. `import AbsintheErrorPayload.Payload`
2. `import_types AbsintheErrorPayload.ValidationMessageTypes`
3. create a payload object for each object using `payload_object(payload_name, object_name)`
4. create a mutation that returns the payload object. Add the payload middleware after the resolver.
```
field :create_user, type: :user_payload, description: "add a user" do
arg :user, :create_user_params
resolve &UserResolver.create/2
middleware &build_payload/2
end
```
## Example Schema
Object Schema:
```elixir
defmodule MyApp.Schema.User do
@moduledoc false
use Absinthe.Schema.Notation
import AbsintheErrorPayload.Payload
import_types AbsintheErrorPayload.ValidationMessageTypes
alias MyApp.Resolvers.User, as: UserResolver
object :user, description: "Someone on our planet" do
field :id, non_null(:id), description: "unique identifier"
field :first_name, non_null(:string), description: "User's first name"
field :last_name, :string, description: "Optional Last Name"
field :age, :integer, description: "Age in Earth years"
field :inserted_at, :time, description: "Created at"
field :updated_at, :time, description: "Last updated at"
end
input_object :create_user_params, description: "create a user" do
field :first_name, non_null(:string), description: "Required first name"
field :last_name, :string, description: "Optional last name"
field :age, :integer, description: "Age in Earth years"
end
payload_object(:user_payload, :user)
object :user_mutations do
field :create_user, type: :user_payload, description: "Create a new user" do
arg :user, :create_user_params
resolve &UserResolver.create/2
middleware &build_payload/2
end
end
```
In your main schema file
```
import_types MyApp.Schema.User
mutation do
...
import_fields :user_mutations
end
```
## Alternate Use
If you'd prefer not to use the middleware style, you can generate AbsintheErrorPayload payloads
in your resolver instead. See `success_payload/1` and `error_payload/1` for examples.
"""
@enforce_keys [:successful]
defstruct successful: nil, messages: [], result: nil
use Absinthe.Schema.Notation
import AbsintheErrorPayload.ChangesetParser
alias __MODULE__
alias AbsintheErrorPayload.ValidationMessage
@doc """
Create a payload object definition
Each object that can be mutated will need its own graphql response object
in order to return typed responses. This is a helper method to generate a
custom payload object
## Usage
payload_object(:user_payload, :user)
is the equivalent of
```elixir
object :user_payload do
field :successful, non_null(:boolean), description: "Indicates if the mutation completed successfully or not. "
field :messages, list_of(:validation_message), description: "A list of failed validations. May be blank or null if mutation succeeded."
field :result, :user, description: "The object created/updated/deleted by the mutation"
end
```
This method must be called after `import_types AbsintheErrorPayload.MutationTypes` or it will fail due to `:validation_message` not being defined.
"""
defmacro payload_object(payload_name, result_object_name) do
quote location: :keep do
object unquote(payload_name) do
field(:successful, non_null(:boolean), description: "Indicates if the mutation completed successfully or not. ")
field(:messages, list_of(:validation_message), description: "A list of failed validations. May be blank or null if mutation succeeded.")
field(:result, unquote(result_object_name), description: "The object created/updated/deleted by the mutation. May be null if mutation failed.")
end
end
end
@doc """
Convert a resolution value to a mutation payload
To be used as middleware by Absinthe.Graphql. It should be placed immediatly after the resolver.
The middleware will automatically transform an invalid changeset into validation errors.
Your resolver could then look like:
```elixir
@doc "
Creates a new user
Results are wrapped in a result monad as expected by absinthe.
"
def create(%{user: attrs}, _resolution) do
case UserContext.create_user(attrs) do
{:ok, user} -> {:ok, user}
{:error, %Ecto.Changeset{} = changeset} -> {:ok, changeset}
end
end
```
The build payload middleware will also accept error tuples with single or lists of
`AbsintheErrorPayload.ValidationMessage` or string errors. However, lists and strings will need to be wrapped in
an :ok tuple or they will be seen as errors by graphql.
An example resolver could look like:
```
@doc "
updates an existing user.
Results are wrapped in a result monad as expected by absinthe.
"
def update(%{id: id, user: attrs}, _resolution) do
case UserContext.get_user(id) do
nil -> {:ok, %ValidationMessage{field: :id, code: "not found", message: "does not exist"}}
user -> do_update_user(user, attrs)
end
end
defp do_update_user(user, attrs) do
case UserContext.update_user(user, attrs) do
{:ok, user} -> {:ok, user}
{:error, %Ecto.Changeset{} = changeset} -> {:ok, changeset}
end
end
```
Valid formats are:
```
%ValidationMessage{}
{:error, %ValidationMessage{}}
{:error, [%ValidationMessage{},%ValidationMessage{}]}
{:error, "This is an error"}
{:error, ["This is an error", "This is another error"]}
```
## Alternate Use
If you'd prefer not to use the middleware style, you can generate AbsintheErrorPayload payloads
in your resolver instead. See `convert_to_payload/1`, `success_payload/1` and `error_payload/1` for examples.
"""
def build_payload(%{value: value, errors: []} = resolution, _config) do
result = convert_to_payload(value)
Absinthe.Resolution.put_result(resolution, {:ok, result})
end
@doc """
Convert resolution errors to a mutation payload
The build payload middleware will accept lists of `AbsintheErrorPayload.ValidationMessage` or string errors.
Valid formats are:
```
[%ValidationMessage{},%ValidationMessage{}]
"This is an error"
["This is an error", "This is another error"]
```
"""
def build_payload(%{errors: errors} = resolution, _config) do
result = convert_to_payload({:error, errors})
Absinthe.Resolution.put_result(resolution, {:ok, result})
end
@doc """
Direct converter from value to a `Payload` struct.
This function will automatically transform an invalid changeset into validation errors.
Changesets, error tuples and lists of `AbsintheErrorPayload.ValidationMessage` will be identified
as errors and will generate an error payload.
Error formats are:
```
%Ecto.Changeset{valid?: false}
%ValidationMessage{}
{:error, %ValidationMessage{}}
{:error, [%ValidationMessage{},%ValidationMessage{}]}
{:error, "This is an error"}
{:error, ["This is an error", "This is another error"]}
```
All other values will be converted to a success payload.
or string errors. However, lists and strings will need to be wrapped in
an :ok tuple or they will be seen as errors by graphql.
An example use could look like:
```
@doc "
Load a user matching an id
Results are wrapped in a result monad as expected by absinthe.
"
def get_user(%{id: id}, _resolution) do
case UserContext.get_user(id) do
nil -> %ValidationMessage{field: :id, code: "not found", message: "does not exist"}}
user -> user
end
|> AbsintheErrorPayload.Payload.convert_to_payload()
end
"""
def convert_to_payload({:error, %ValidationMessage{} = message}) do
error_payload(message)
end
def convert_to_payload(%ValidationMessage{} = message) do
error_payload(message)
end
def convert_to_payload({:error, message}) when is_binary(message) do
message
|> generic_validation_message()
|> error_payload()
end
def convert_to_payload({:error, list}) when is_list(list), do: error_payload(list)
def convert_to_payload(%Ecto.Changeset{valid?: false} = changeset) do
changeset
|> extract_messages()
|> error_payload()
end
def convert_to_payload(value), do: success_payload(value)
@doc """
Generates a mutation error payload.
## Examples
iex> error_payload(%ValidationMessage{code: "required", field: "name"})
%Payload{successful: false, messages: [%ValidationMessage{code: "required", field: "name"}]}
iex> error_payload([%ValidationMessage{code: "required", field: "name"}])
%Payload{successful: false, messages: [%ValidationMessage{code: "required", field: "name"}]}
## Usage
If you prefer not to use the Payload.middleware, you can use this method in your resolvers instead.
```elixir
@doc "
updates an existing user.
Results are wrapped in a result monad as expected by absinthe.
"
def update(%{id: id, user: attrs}, _resolution) do
case UserContext.get_user(id) do
nil -> {:ok, error_payload([%ValidationMessage{field: :id, code: "not found", message: "does not exist"}])}
user -> do_update_user(user, attrs)
end
end
defp do_update_user(user, attrs) do
case UserContext.update_user(user, attrs) do
{:ok, user} -> {:ok, success_payload(user)}
{:error, %Ecto.Changeset{} = changeset} -> {:ok, error_payload(changeset)}
end
end
```
"""
def error_payload(%ValidationMessage{} = message), do: error_payload([message])
def error_payload(messages) when is_list(messages) do
messages = Enum.map(messages, &prepare_message/1)
%Payload{successful: false, messages: messages}
end
@doc "convert validation message field to camelCase format used by graphQL"
def convert_field_name(%ValidationMessage{} = message) do
field =
cond do
message.field == nil -> camelized_name(message.key)
message.key == nil -> camelized_name(message.field)
true -> camelized_name(message.field)
end
%{message | field: field, key: field}
end
defp camelized_name(nil), do: nil
defp camelized_name(field) do
field
|> to_string()
|> Absinthe.Utils.camelize(lower: true)
end
defp prepare_message(%ValidationMessage{} = message) do
convert_field_name(message)
end
defp prepare_message(message) do
generic_validation_message(to_string(message))
end
@doc """
Generates a success payload.
## Examples
iex> success_payload(%User{first_name: "Stich", last_name: "Pelekai", id: 626})
%Payload{successful: true, result: %User{first_name: "Stich", last_name: "Pelekai", id: 626}}
## Usage
If you prefer not to use the `build_payload/2` middleware, you can use this method in your resolvers instead.
```elixir
@doc "
Creates a new user
Results are wrapped in a result monad as expected by absinthe.
"
def create(%{user: attrs}, _resolution) do
case UserContext.create_user(attrs) do
{:ok, user} -> {ok, success_payload(user)}
{:error, %Ecto.Changeset{} = changeset} -> {:ok, error_payload(changeset)}
end
end
```
"""
def success_payload(result) do
%Payload{successful: true, result: result}
end
defp generic_validation_message(message) do
%ValidationMessage{
code: :unknown,
field: nil,
template: message,
message: message,
options: []
}
end
end
|
lib/absinthe_error_payload/payload.ex
| 0.907001 | 0.686127 |
payload.ex
|
starcoder
|
defmodule EctoFixtures do
@moduledoc """
Generates :map or JSON fixture from Ecto.Schema
It's useful for tests
"""
def ecto_json_fixtures(model) do
model
|> ecto_fixtures
|> Poison.encode!
end
def ecto_fixtures(model) when is_map(model), do: map_ecto_values(model)
def ecto_fixtures(model) do
Faker.start
case Keyword.has_key?(model.__info__(:functions), :__schema__) do
true -> map_ecto_values(model.__schema__(:types))
false -> raise "not an Ecto.Model"
end
end
def map_ecto_values(map) when is_map(map) do
{_, res} = Enum.map_reduce(map, %{}, &value_to_json/2)
res
end
defp value_to_json({key, :id}, acc), do: {"", Map.put(acc, key, :rand.uniform(10_000))}
defp value_to_json({key, :integer} , acc), do: {"", Map.put(acc, key, :rand.uniform(10_000))}
defp value_to_json({key, :decimal} , acc), do: {"", Map.put(acc, key, random_float)}
defp value_to_json({key, :boolean} , acc), do: {"", Map.put(acc, key, random_boolean)}
defp value_to_json({key, :string} , acc), do: {"", Map.put(acc, key, Faker.Name.first_name)}
defp value_to_json({key, :map} , acc), do: {"", Map.put(acc, key, %{"last_name" => Faker.Name.last_name})}
defp value_to_json({key, Timex.Ecto.Date} , acc), do: {"", Map.put(acc, key, random_date("%F"))}
defp value_to_json({key, Timex.Ecto.DateTime} , acc), do: {"", Map.put(acc, key, random_date("%FT%T%:z"))}
defp value_to_json({key, Ecto.DateTime} , acc), do: {"", Map.put(acc, key, random_date("%FT%T%:z"))}
def random_date(format), do: Timex.format!(Timex.now, format, :strftime)
def random_float, do: Float.ceil(:rand.uniform + :rand.uniform(1000), 5)
def random_boolean do
case :rand.uniform(2) do
1 -> true
2 -> false
end
end
def random_enum(module) do
module.__enum_map__
|> Enum.random
|> atom_to_string
end
def atom_to_string(val) when is_binary(val), do: val
def atom_to_string(val) when is_atom(val), do: Atom.to_string val
end
defmodule EctoFixtures.EnumMacro do
@moduledoc """
Module for macro
"""
defmacro enum_value(module) do
quote do
defp value_to_json({key, unquote(module) = module}, acc), do: {"", Map.put(acc, key, random_enum(module))}
end
end
end
|
lib/ecto_fixtures.ex
| 0.574275 | 0.432243 |
ecto_fixtures.ex
|
starcoder
|
defmodule Mongo.Cursor do
@moduledoc false
import Record, only: [defrecordp: 2]
@type t :: %__MODULE__{
conn: Mongo.conn,
coll: Mongo.collection,
query: BSON.document,
opts: Keyword.t
}
defstruct [:conn, :coll, :query, :opts]
defimpl Enumerable do
defrecordp :state, [:conn, :cursor, :coll, :buffer]
def reduce(%{conn: conn, coll: coll, query: query, opts: opts}, acc, reduce_fun) do
start_fun = start_fun(conn, coll, query, opts)
next_fun = next_fun(opts)
after_fun = after_fun(opts)
Stream.resource(start_fun, next_fun, after_fun).(acc, reduce_fun)
end
defp start_fun(conn, coll, query, opts) do
opts = Keyword.put(opts, :batch_size, -1)
fn ->
case Mongo.direct_command(conn, query, opts) do
{:ok, %{"ok" => ok,
"cursor" => %{
"id" => cursor,
"ns" => coll,
"firstBatch" => docs}}} when ok == 1 -> state(conn: conn, cursor: cursor, coll: coll, buffer: docs)
{:error, error} -> raise error
end
end
end
defp next_fun(opts) do
fn
state(buffer: [], cursor: 0) = state -> {:halt, state}
state(buffer: [], conn: conn, cursor: cursor, coll: coll) = state ->
case get_more(conn, only_coll(coll), cursor, opts) do
{:ok, %{cursor_id: cursor, docs: []}} -> {:halt, state(state, cursor: cursor)}
{:ok, %{cursor_id: cursor, docs: docs}} -> {docs, state(state, cursor: cursor)}
{:error, error} -> raise error
end
state(buffer: buffer) = state -> {buffer, state(state, buffer: [])}
end
end
@doc """
Calls the GetCore-Command
See https://github.com/mongodb/specifications/blob/master/source/find_getmore_killcursors_commands.rst
"""
def get_more(conn, coll, cursor, opts) do
query = [
{"getMore", cursor},
{"collection", coll},
{"batchSize", 2}, ##opts[:batch_size]},
{"maxTimeMS", opts[:max_time]}
]
query = filter_nils(query)
with {:ok, %{"cursor" => %{ "id" => cursor_id, "nextBatch" => docs}, "ok" => ok}} when ok == 1 <- Mongo.direct_command(conn, query, opts) do
{:ok, %{cursor_id: cursor_id, docs: docs}}
end
end
@doc """
Calls the KillCursors-Command
See https://github.com/mongodb/specifications/blob/master/source/find_getmore_killcursors_commands.rst
"""
def kill_cursors(conn, coll, cursor_ids, opts) do
query = [
{"killCursors", coll},
{"cursors", cursor_ids}
]
query = filter_nils(query)
with {:ok, %{"cursorsAlive" => [],
"cursorsNotFound" => [],
"cursorsUnknown" => [],
"ok" => ok}} when ok == 1 <- Mongo.direct_command(conn, query, opts) do
:ok
end
end
defp filter_nils(keyword) when is_list(keyword) do
Enum.reject(keyword, fn {_key, value} -> is_nil(value) end)
end
defp filter_nils(map) when is_map(map) do
Enum.reject(map, fn {_key, value} -> is_nil(value) end)
|> Enum.into(%{})
end
defp after_fun(opts) do
fn
state(cursor: 0) -> :ok
state(cursor: cursor, coll: coll, conn: conn) -> kill_cursors(conn, only_coll(coll), [cursor], opts)
end
end
defp only_coll(coll) do
[_db, coll] = String.split(coll, ".", parts: 2)
coll
end
# we cannot determinstically slice, so tell Enumerable to
# fall back on brute force
def slice(_cursor), do: { :error, __MODULE__ }
def count(_stream), do: {:error, __MODULE__}
def member?(_stream, _term), do: {:error, __MODULE__}
end
end
|
lib/mongo/cursor.ex
| 0.759404 | 0.445107 |
cursor.ex
|
starcoder
|
defmodule OMG.Watcher.ExitProcessor do
@moduledoc """
Encapsulates managing and executing the behaviors related to treating exits by the child chain and watchers
Keeps a state of exits that are in progress, updates it with news from the root chain, compares to the
state of the ledger (`OMG.API.State`), issues notifications as it finds suitable.
Should manage all kinds of exits allowed in the protocol and handle the interactions between them.
NOTE: Note that all calls return `db_updates` and relay on the caller to do persistence.
"""
alias OMG.API.EventerAPI
alias OMG.API.State
alias OMG.API.State.Transaction
alias OMG.API.Utxo
alias OMG.DB
alias OMG.Eth
alias OMG.Watcher.ExitProcessor
alias OMG.Watcher.ExitProcessor.Challenge
alias OMG.Watcher.ExitProcessor.Core
alias OMG.Watcher.ExitProcessor.InFlightExitInfo
alias OMG.Watcher.Recorder
use OMG.API.LoggerExt
require Utxo
### Client
def start_link(_args) do
GenServer.start_link(__MODULE__, :ok, name: __MODULE__)
end
@doc """
Accepts events and processes them in the state - new exits are tracked.
Returns `db_updates`
"""
def new_exits(exits) do
GenServer.call(__MODULE__, {:new_exits, exits})
end
@doc """
Accepts events and processes them in the state - new in flight exits are tracked.
Returns `db_updates`
"""
def new_in_flight_exits(in_flight_exit_started_events) do
GenServer.call(__MODULE__, {:new_in_flight_exits, in_flight_exit_started_events})
end
@doc """
Accepts events and processes them in the state - finalized exits are untracked _if valid_ otherwise raises alert
Returns `db_updates`
"""
def finalize_exits(finalizations) do
GenServer.call(__MODULE__, {:finalize_exits, finalizations})
end
@doc """
Accepts events and processes them in the state - new piggybacks are tracked, if invalid raises an alert
Returns `db_updates`
"""
def piggyback_exits(piggybacks) do
GenServer.call(__MODULE__, {:piggyback_exits, piggybacks})
end
@doc """
Accepts events and processes them in the state - challenged exits are untracked
Returns `db_updates`
"""
def challenge_exits(challenges) do
GenServer.call(__MODULE__, {:challenge_exits, challenges})
end
@doc """
Accepts events and processes them in the state.
Competitors are stored for future use(i.e. to challenge an in flight exit).
Returns `db_updates`
"""
def new_ife_challenges(challenges) do
GenServer.call(__MODULE__, {:new_ife_challenges, challenges})
end
@doc """
Accepts events and processes them in state.
Returns `db_updates`
"""
def respond_to_in_flight_exits_challenges(responds) do
GenServer.call(__MODULE__, {:respond_to_in_flight_exits_challenges, responds})
end
@doc """
Accepts events and processes them in state.
Challenged piggybacks are forgotten.
Returns `db_updates`
"""
def challenge_piggybacks(challenges) do
GenServer.call(__MODULE__, {:challenge_piggybacks, challenges})
end
@doc """
Accepts events and processes them in state - finalized outputs are applied to the state.
Returns `db_updates`
"""
def finalize_in_flight_exits(finalizations) do
GenServer.call(__MODULE__, {:finalize_in_flight_exits, finalizations})
end
@doc """
Checks validity and causes event emission to `OMG.Watcher.Eventer`. Works with `OMG.API.State` to discern validity
"""
def check_validity do
GenServer.call(__MODULE__, :check_validity)
end
@doc """
Returns a map of requested in flight exits, where keys are IFE hashes and values are IFES
If given empty list of hashes, all IFEs are returned.
"""
@spec get_active_in_flight_exits() :: {:ok, %{binary() => InFlightExitInfo.t()}}
def get_active_in_flight_exits do
GenServer.call(__MODULE__, :get_active_in_flight_exits)
end
@doc """
Returns all information required to produce a transaction to the root chain contract to present a competitor for
a non-canonical in-flight exit
"""
@spec get_competitor_for_ife(binary()) :: {:ok, Core.competitor_data_t()} | {:error, :competitor_not_found}
def get_competitor_for_ife(txbytes) do
GenServer.call(__MODULE__, {:get_competitor_for_ife, txbytes})
end
@doc """
Returns all information required to produce a transaction to the root chain contract to present a proof of canonicity
for a challenged in-flight exit
"""
@spec prove_canonical_for_ife(binary()) :: {:ok, Core.prove_canonical_data_t()} | {:error, :canonical_not_found}
def prove_canonical_for_ife(txbytes) do
GenServer.call(__MODULE__, {:prove_canonical_for_ife, txbytes})
end
@spec get_input_challenge_data(Transaction.Signed.tx_bytes(), Transaction.input_index_t()) ::
{:ok, Core.input_challenge_data()} | {:error, Core.piggyback_challenge_data_error()}
def get_input_challenge_data(txbytes, input_index) do
GenServer.call(__MODULE__, {:get_input_challenge_data, txbytes, input_index})
end
@spec get_output_challenge_data(Transaction.Signed.tx_bytes(), Transaction.input_index_t()) ::
{:ok, Core.output_challenge_data()} | {:error, Core.piggyback_challenge_data_error()}
def get_output_challenge_data(txbytes, output_index) do
GenServer.call(__MODULE__, {:get_output_challenge_data, txbytes, output_index})
end
@doc """
Returns challenge for an exit
"""
@spec create_challenge(Utxo.Position.t()) ::
{:ok, Challenge.t()} | {:error, :utxo_not_spent | :exit_not_found}
def create_challenge(exiting_utxo_pos) do
GenServer.call(__MODULE__, {:create_challenge, exiting_utxo_pos})
end
### Server
use GenServer
def init(:ok) do
{:ok, db_exits} = DB.exit_infos()
{:ok, db_ifes} = DB.in_flight_exits_info()
{:ok, db_competitors} = DB.competitors_info()
sla_margin = Application.fetch_env!(:omg_watcher, :exit_processor_sla_margin)
processor = Core.init(db_exits, db_ifes, db_competitors, sla_margin)
{:ok, _} = Recorder.start_link(%Recorder{name: __MODULE__.Recorder, parent: self()})
_ = Logger.info("Initializing with: #{inspect(processor)}")
processor
end
def handle_call({:new_exits, exits}, _from, state) do
_ = if not Enum.empty?(exits), do: Logger.info("Recognized exits: #{inspect(exits)}")
exit_contract_statuses =
Enum.map(exits, fn %{exit_id: exit_id} ->
{:ok, result} = Eth.RootChain.get_standard_exit(exit_id)
result
end)
{new_state, db_updates} = Core.new_exits(state, exits, exit_contract_statuses)
{:reply, {:ok, db_updates}, new_state}
end
def handle_call({:new_in_flight_exits, events}, _from, state) do
_ = if not Enum.empty?(events), do: Logger.info("Recognized in-flight exits: #{inspect(events)}")
ife_contract_statuses =
Enum.map(
events,
fn %{call_data: %{in_flight_tx: bytes}} ->
{:ok, contract_ife_id} = Eth.RootChain.get_in_flight_exit_id(bytes)
{:ok, {timestamp, _, _, _, _}} = Eth.RootChain.get_in_flight_exit(contract_ife_id)
{timestamp, contract_ife_id}
end
)
{new_state, db_updates} = Core.new_in_flight_exits(state, events, ife_contract_statuses)
{:reply, {:ok, db_updates}, new_state}
end
def handle_call({:finalize_exits, exits}, _from, state) do
_ = if not Enum.empty?(exits), do: Logger.info("Recognized finalizations: #{inspect(exits)}")
exits =
exits
|> Enum.map(fn %{exit_id: exit_id} ->
{:ok, {_, _, _, utxo_pos}} = Eth.RootChain.get_standard_exit(exit_id)
Utxo.Position.decode(utxo_pos)
end)
{:ok, exit_event_triggers, db_updates_from_state, validities} = State.exit_utxos(exits)
_ = if not Enum.empty?(exit_event_triggers), do: Logger.info("Finalized exits: #{inspect(validities)}")
exit_event_triggers
|> Core.create_exit_finalized_events()
|> EventerAPI.emit_events()
{new_state, db_updates} = Core.finalize_exits(state, validities)
{:reply, {:ok, db_updates ++ db_updates_from_state}, new_state}
end
def handle_call({:piggyback_exits, exits}, _from, state) do
_ = if not Enum.empty?(exits), do: Logger.info("Recognized piggybacks: #{inspect(exits)}")
{new_state, db_updates} = Core.new_piggybacks(state, exits)
{:reply, {:ok, db_updates}, new_state}
end
def handle_call({:challenge_exits, exits}, _from, state) do
_ = if not Enum.empty?(exits), do: Logger.info("Recognized challenges: #{inspect(exits)}")
{new_state, db_updates} = Core.challenge_exits(state, exits)
{:reply, {:ok, db_updates}, new_state}
end
def handle_call({:new_ife_challenges, challenges}, _from, state) do
_ = if not Enum.empty?(challenges), do: Logger.info("Recognized ife challenges: #{inspect(challenges)}")
{new_state, db_updates} = Core.new_ife_challenges(state, challenges)
{:reply, {:ok, db_updates}, new_state}
end
def handle_call({:challenge_piggybacks, challenges}, _from, state) do
_ = if not Enum.empty?(challenges), do: Logger.info("Recognized piggyback challenges: #{inspect(challenges)}")
{new_state, db_updates} = Core.challenge_piggybacks(state, challenges)
{:reply, {:ok, db_updates}, new_state}
end
def handle_call({:respond_to_in_flight_exits_challenges, responds}, _from, state) do
_ = if not Enum.empty?(responds), do: Logger.info("Recognized response to IFE challenge: #{inspect(responds)}")
{new_state, db_updates} = Core.respond_to_in_flight_exits_challenges(state, responds)
{:reply, {:ok, db_updates}, new_state}
end
def handle_call({:finalize_in_flight_exits, finalizations}, _from, state) do
_ = if not Enum.empty?(finalizations), do: Logger.info("Recognized ife finalizations: #{inspect(finalizations)}")
case Core.finalize_in_flight_exits(state, finalizations) do
{:ok, state, db_updates} ->
{:reply, {:ok, db_updates}, state}
{:unknown_piggybacks, unknown_piggybacks} ->
_ = Logger.error("Outputs not piggybacked: #{inspect(unknown_piggybacks)}")
{:stop, :unknown_piggybacks, state}
{:unknown_in_flight_exit, unknown_ifes} ->
_ = Logger.error("Unknown in-flight exits: #{inspect(unknown_ifes)}")
{:stop, :unknown_in_flight_exit, Elixir.Agent.Server, state}
end
end
@doc """
Combine data from `ExitProcessor` and `API.State` to figure out what to do about exits
"""
def handle_call(:check_validity, _from, state) do
{state1, request} = prepare_validity_check(state)
{chain_status, events} = Core.invalid_exits(request, state1)
{:reply, {chain_status, events}, state}
end
def handle_call(:get_active_in_flight_exits, _from, state),
do: {:reply, {:ok, Core.get_active_in_flight_exits(state)}, state}
def handle_call({:get_competitor_for_ife, txbytes}, _from, state) do
# NOTE: future of using `ExitProcessor.Request` struct not certain, see that module for details
competitor_result =
%ExitProcessor.Request{}
# TODO: run_status_gets and getting all non-existent UTXO positions imaginable can be optimized out heavily
# only the UTXO positions being inputs to `txbytes` must be looked at, but it becomes problematic as
# txbytes can be invalid so we'd need a with here...
|> run_status_gets()
|> Core.determine_utxo_existence_to_get(state)
|> run_utxo_exists()
|> Core.determine_spends_to_get(state)
|> run_spend_getting()
|> Core.determine_blocks_to_get()
|> run_block_getting()
|> Core.get_competitor_for_ife(state, txbytes)
{:reply, competitor_result, state}
end
def handle_call({:prove_canonical_for_ife, txbytes}, _from, state) do
# NOTE: future of using `ExitProcessor.Request` struct not certain, see that module for details
canonicity_result =
%ExitProcessor.Request{}
# TODO: same comment as above in get_competitor_for_ife
|> run_status_gets()
|> Core.determine_utxo_existence_to_get(state)
|> run_utxo_exists()
|> Core.determine_spends_to_get(state)
|> run_spend_getting()
|> Core.determine_blocks_to_get()
|> run_block_getting()
|> Core.prove_canonical_for_ife(txbytes)
{:reply, canonicity_result, state}
end
def handle_call({:get_input_challenge_data, txbytes, input_index}, _from, state) do
response =
%ExitProcessor.Request{}
|> run_status_gets()
|> Core.determine_utxo_existence_to_get(state)
|> run_utxo_exists()
|> Core.determine_spends_to_get(state)
|> run_spend_getting()
|> Core.determine_blocks_to_get()
|> run_block_getting()
|> Core.get_input_challenge_data(state, txbytes, input_index)
{:reply, response, state}
end
def handle_call({:get_output_challenge_data, txbytes, output_index}, _from, state) do
{state1, request} = prepare_validity_check(state)
response = Core.get_output_challenge_data(request, state1, txbytes, output_index)
{:reply, response, state}
end
def handle_call({:create_challenge, Utxo.position(blknum, txindex, oindex) = exiting_utxo_pos}, _from, state) do
with spending_blknum_response <- exiting_utxo_pos |> Utxo.Position.to_db_key() |> OMG.DB.spent_blknum(),
%{txhash: txhash} <- OMG.Watcher.DB.Transaction.get_by_position(blknum, txindex),
{:ok, exit_id} <- OMG.Eth.RootChain.get_standard_exit_id(txhash, oindex),
{:ok, raw_spending_proof, exit_info} <-
Core.get_challenge_data(spending_blknum_response, exiting_utxo_pos, state) do
# TODO: we're violating the shell/core pattern here, refactor!
spending_proof =
case raw_spending_proof do
raw_blknum when is_number(raw_blknum) ->
{:ok, hashes} = OMG.DB.block_hashes([raw_blknum])
{:ok, [spending_block]} = OMG.DB.blocks(hashes)
spending_block
signed_tx ->
signed_tx
end
{:reply, {:ok, Core.create_challenge(exit_info, spending_proof, exiting_utxo_pos, exit_id)}, state}
else
error -> {:reply, error, state}
end
end
defp prepare_validity_check(state) do
# NOTE: future of using `ExitProcessor.Request` struct not certain, see that module for details
{request, state} =
%ExitProcessor.Request{}
|> run_status_gets()
# To find if IFE was included, see first if its inputs were spent.
|> Core.determine_ife_input_utxos_existence_to_get(state)
|> run_ife_input_utxo_existance()
# Next, check by what transactions they were spent.
|> Core.determine_ife_spends_to_get(state)
|> run_ife_spend_getting()
# Find tx bodies.
|> Core.determine_ife_blocks_to_get()
|> run_ife_block_getting()
# Compare found txes with ife.tx.
# If equal, persist information about position.
|> Core.find_ifes_in_blocks(state)
request =
request
|> Core.determine_utxo_existence_to_get(state)
|> run_utxo_exists()
|> Core.determine_spends_to_get(state)
|> run_spend_getting()
|> Core.determine_blocks_to_get()
|> run_block_getting()
{state, request}
end
defp run_status_gets(%ExitProcessor.Request{} = request) do
{:ok, eth_height_now} = Eth.get_ethereum_height()
{blknum_now, _} = State.get_status()
_ = Logger.debug("eth_height_now: #{inspect(eth_height_now)}, blknum_now: #{inspect(blknum_now)}")
%{request | eth_height_now: eth_height_now, blknum_now: blknum_now}
end
defp run_utxo_exists(%ExitProcessor.Request{utxos_to_check: positions} = request) do
result = positions |> Enum.map(&State.utxo_exists?/1)
_ = Logger.debug("utxos_to_check: #{inspect(positions)}, utxo_exists_result: #{inspect(result)}")
%{request | utxo_exists_result: result}
end
defp run_ife_input_utxo_existance(%ExitProcessor.Request{piggybacked_utxos_to_check: positions} = request) do
result = positions |> Enum.map(&State.utxo_exists?/1)
_ =
Logger.debug(
"piggybacked_utxos_to_check: #{inspect(positions)}, piggybacked_utxo_exists_result: #{inspect(result)}"
)
%{request | piggybacked_utxo_exists_result: result}
end
defp run_spend_getting(%ExitProcessor.Request{spends_to_get: positions} = request) do
result = positions |> Enum.map(&single_spend_getting/1)
_ = Logger.debug("spends_to_get: #{inspect(positions)}, spent_blknum_result: #{inspect(result)}")
%{request | spent_blknum_result: result}
end
defp run_ife_spend_getting(%ExitProcessor.Request{piggybacked_spends_to_get: positions} = request) do
result = positions |> Enum.map(&single_spend_getting/1)
_ =
Logger.debug(
"piggybacked_spends_to_get: #{inspect(positions)}, piggybacked_spent_blknum_result: #{inspect(result)}"
)
%{request | piggybacked_spent_blknum_result: result}
end
defp single_spend_getting(position) do
{:ok, spend_blknum} =
position
|> Utxo.Position.to_db_key()
|> OMG.DB.spent_blknum()
spend_blknum
end
defp run_block_getting(%ExitProcessor.Request{blknums_to_get: blknums} = request) do
_ = Logger.debug("blknums_to_get: #{inspect(blknums)}")
{:ok, hashes} = OMG.DB.block_hashes(blknums)
_ = Logger.debug("hashes: #{inspect(hashes)}")
{:ok, blocks} = OMG.DB.blocks(hashes)
_ = Logger.debug("blocks_result: #{inspect(blocks)}")
%{request | blocks_result: blocks}
end
defp run_ife_block_getting(%ExitProcessor.Request{piggybacked_blknums_to_get: blknums} = request) do
_ = Logger.debug("piggybacked_blknums_to_get: #{inspect(blknums)}")
{:ok, hashes} = OMG.DB.block_hashes(blknums)
_ = Logger.debug("piggybacked_hashes: #{inspect(hashes)}")
{:ok, blocks} = OMG.DB.blocks(hashes)
_ = Logger.debug("piggybacked_blocks_result: #{inspect(blocks)}")
%{request | piggybacked_blocks_result: blocks}
end
end
|
apps/omg_watcher/lib/exit_processor.ex
| 0.815857 | 0.458227 |
exit_processor.ex
|
starcoder
|
defmodule AshGraphql.Resource.ManagedRelationship do
@moduledoc "Represents a managed relationship configuration on a mutation"
defstruct [
:argument,
:action,
:types,
:type_name,
:lookup_with_primary_key?,
:lookup_identities
]
@schema [
argument: [
type: :atom,
doc: "The argument for which an input object should be derived.",
required: true
],
action: [
type: :atom,
doc: "The action that accepts the argument"
],
lookup_with_primary_key?: [
type: :boolean,
doc: """
If the managed_relationship has `on_lookup` behavior, this option determines whether or not the primary key is provided in the input object for looking up.
This option is ignored if there is no `on_lookup`.
"""
],
lookup_identities: [
type: {:list, :atom},
doc: """
If the managed_relationship has `on_lookup` behavior, this option determines which identities are provided in the input object for looking up.
This option is ignored if there is no `on_lookup`. By default *all* identities are provided.
"""
],
type_name: [
type: :atom,
doc: """
The name of the input object that will be derived. Defaults to `<action_type>_<resource>_<argument_name>_input`
Because multiple actions could potentially be managing the same relationship, it isn't suficcient to
default to something like `<resource>_<relationship>_input`. Additionally, Ash doesn't expose resource
action names by default, meaning that there is no automatic way to ensure that all
of these have a default name that will always be unique. If you have multiple actions of the same
type that manage a relationship with an argument of the same name, you will get a compile-time error.
"""
],
types: [
type: :any,
doc: """
A keyword list of field names to their graphql type identifiers.
Since managed relationships can ultimately call multiple actions, there is the possibility
of field type conflicts. Use this to determine the type of fields and remove the conflict warnings.
For `non_null` use `{:non_null, type}`, and for a list, use `{:array, type}`, for example:
`{:non_null, {:array, {:non_null, :string}}}` for a non null list of non null strings.
To *remove* a key from the input object, simply pass `nil` as the type.
"""
]
]
def schema, do: @schema
end
|
lib/resource/managed_relationship.ex
| 0.855836 | 0.428592 |
managed_relationship.ex
|
starcoder
|
defmodule KaufmannEx.ReleaseTasks.MigrateSchemas do
@moduledoc """
Task for registering all schemas in `priv/schemas` with the schema registry.
Expects
- schemas to be defined in `priv/schemas`.
- an `event_metadata.avsc` schema should be defined and required by all events
Can be called in a production attached console, or via a release task. Should not have any requirements beyont itself.
This script will load all required dependencies and should not need further configuration.
```
# Attempt to create or update all schemas in `priv/schemas`
KaufmannEx.ReleaseTasks.MigrateSchemas.migrate_schemas(:app_name)
# delete and recreate all schemas
KaufmannEx.ReleaseTasks.MigrateSchemas.reset_schemas(:app_name)
```
"""
alias KaufmannEx.Schemas
# @schema_path 'priv/schemas'
defp ensure_startup do
:ok = Application.ensure_started(:logger)
{:ok, _} = Application.ensure_all_started(:httpoison)
{:ok, _} = Application.ensure_all_started(:kaufmann_ex)
end
defp priv_dir(app) do
"#{:code.priv_dir(app)}"
end
@doc """
Attempts to update all schemas defined in `app/priv/schemas`.
Expects a `event_metadata.avsc` metadata scheme to be defined for all other schemas.
"""
def migrate_schemas(app \\ :kaufmann_ex)
def migrate_schemas(path) when is_binary(path) do
true = File.exists?(path)
meta_data_schema = load_metadata(path)
path
|> scan_dir()
|> Enum.map(&load_and_parse_schema/1)
|> Enum.map(&inject_metadata(&1, meta_data_schema))
|> Enum.map(®ister_schema/1)
|> Enum.map(&IO.inspect/1)
end
def migrate_schemas(app) do
ensure_startup()
IO.puts("Migrating Schemas")
meta_data_schema = load_metadata(app)
app
|> priv_dir()
|> Path.join("schemas")
|> scan_dir()
|> Enum.map(&load_and_parse_schema/1)
|> Enum.map(&inject_metadata(&1, meta_data_schema))
|> Enum.map(®ister_schema/1)
|> Enum.map(&IO.inspect/1)
end
@doc """
Attempts to delete and recreate all schemas defined in `app/priv/schemas`
Expects a `event_metadata.avsc` metadata scheme to be defined for all other schemas.
"""
def reset_schemas(app \\ :kaufmann_ex) do
ensure_startup()
IO.puts("Resetting Schemas")
meta_data_schema = load_metadata(app)
app
|> priv_dir()
|> Path.join("schemas")
|> scan_dir()
|> Enum.map(&load_and_parse_schema/1)
|> Enum.map(&inject_metadata(&1, meta_data_schema))
|> Enum.map(&reset_schema/1)
|> Enum.map(&IO.inspect/1)
end
def load_metadata(path) when is_binary(path) do
meta_data_schema =
path
|> Path.join("event_metadata.avsc")
|> load_and_parse_schema()
{:ok, _, _} = register_schema(meta_data_schema)
meta_data_schema
end
def load_metadata(app) do
app
|> priv_dir()
|> Path.join("schemas")
|> load_metadata()
end
def schema_registered({schema_name, schema}) do
case Schemas.test(schema_name, schema) do
{:ok, res} -> {:ok, res}
{:error, %{"error_code" => 40_401}} -> {:ok, %{"is_compatible" => false}}
end
rescue
exception -> {:error, exception}
end
@spec register_schema({String.t(), map}) :: {atom, String.t(), any}
def register_schema({event_name, _} = schema) do
with {:ok, status} <- update_schema(schema) do
{:ok, event_name, status}
else
{:error, error} ->
{:error, event_name, error}
end
end
defp update_schema(schema) do
case Schemas.register(schema) do
{:ok, _} ->
{:ok, "Schema updated"}
{:error, %{"error_code" => 409}} ->
{:error, "Incompatible schema"}
{:error, error} ->
{:error, error}
end
end
def reset_schema({event_name, _} = schema) do
_ = Schemas.delete(event_name)
{:ok, _} = Schemas.register(schema)
end
@spec load_and_parse_schema(Path.t()) :: {String.t(), map}
defp load_and_parse_schema(schema_path) do
{:ok, schema} =
schema_path
|> File.read!()
|> Poison.decode()
schema_name = schema_path |> Path.basename() |> String.trim(".avsc")
{schema_name, schema}
end
defp inject_metadata({event_name, event_schema}, {_, meta_data_schema}) do
# Only inject metadata into event-type schemas
if String.match?(event_name, ~r/command\.|event\.|query\./) do
{event_name, [meta_data_schema, event_schema]}
else
{event_name, event_schema}
end
end
defp scan_dir(dir) do
files = File.ls!(dir)
child_schemas =
files
|> Enum.map(&Path.join(dir, &1))
|> Enum.filter(&File.dir?/1)
|> Enum.map(&scan_dir/1)
files
|> Enum.filter(&String.match?(&1, ~r/\.avsc/))
|> Enum.map(&Path.join(dir, &1))
|> Enum.concat(child_schemas)
|> List.flatten()
end
defp ok_and({:ok, right}) do
right
end
end
|
lib/release_tasks/migrate_schemas.ex
| 0.731538 | 0.62581 |
migrate_schemas.ex
|
starcoder
|
defmodule Sanbase.Alert.OperationText.KV do
@moduledoc ~s"""
A module providing a single function to_template_kv/3 which transforms an operation
to human readable text that can be included in the alert's payload
"""
def current_value(%{current: value, previous: previous}, opts) do
special_symbol = Keyword.get(opts, :special_symbol, "")
transform_fun = Keyword.get(opts, :value_transform, fn x -> x end)
template = "Was: #{special_symbol}{{previous}}\nNow: #{special_symbol}{{value}}"
kv = %{
value: transform_fun.(value),
previous: transform_fun.(previous),
human_readable: [:value, :previous]
}
{template, kv}
end
def current_value(%{current: value}, opts) do
special_symbol = Keyword.get(opts, :special_symbol, "")
transform_fun = Keyword.get(opts, :value_transform, fn x -> x end)
template = "Now: #{special_symbol}{{value}}"
kv = %{value: transform_fun.(value), human_readable: [:value]}
{template, kv}
end
defguard is_absolute_value_operation(map)
when (map_size(map) == 1 and
is_map_key(map, :above)) or is_map_key(map, :above_or_equal) or
is_map_key(map, :below) or is_map_key(map, :below_or_equal)
def to_template_kv(value, operation, opts \\ [])
# Absolute value operations (below, below_or_equal, above, above_or_equal)
def to_template_kv(%{current: value}, %{} = op, opts) when is_absolute_value_operation(op),
do: to_template_kv(value, op, opts)
def to_template_kv(value, op, opts) when is_absolute_value_operation(op) do
[op_key | _] = Map.keys(op)
op_value = op[op_key]
form = Keyword.get(opts, :form, :singular) |> form_to_text()
special_symbol = Keyword.get(opts, :special_symbol, "")
transform_fun = Keyword.get(opts, :value_transform, fn x -> x end)
op_to_text = fn op ->
Atom.to_string(op)
|> String.replace("_", " ")
end
template =
case Keyword.get(opts, :negative, false) do
true -> "#{form} not #{op_to_text.(op_key)} #{special_symbol}{{#{op_key}}}"
false -> "#{form} #{op_to_text.(op_key)} #{special_symbol}{{#{op_key}}}"
end
kv = %{
op_key => transform_fun.(op_value),
value: transform_fun.(value),
human_readable: [op_key, :value]
}
{template, kv}
end
# Inside channel
def to_template_kv(%{current: value}, %{inside_channel: _} = op, opts),
do: to_template_kv(value, op, opts)
def to_template_kv(value, %{inside_channel: [lower, upper]}, opts) do
form = Keyword.get(opts, :form, :singular) |> form_to_text()
special_symbol = Keyword.get(opts, :special_symbol, "")
transform_fun = Keyword.get(opts, :value_transform, fn x -> x end)
template =
case Keyword.get(opts, :negative, false) do
true ->
"#{form} not inside the [#{special_symbol}{{lower}}, #{special_symbol}{{upper}}] interval"
false ->
"#{form} inside the [#{special_symbol}{{lower}}, #{special_symbol}{{upper}}] interval"
end
kv = %{
lower: transform_fun.(lower),
upper: transform_fun.(upper),
value: transform_fun.(value),
human_readable: [:lower, :upper, :value]
}
{template, kv}
end
# Outside channel
def to_template_kv(%{current: value}, %{outside_channel: _} = op, opts),
do: to_template_kv(value, op, opts)
def to_template_kv(value, %{outside_channel: [lower, upper]}, opts) do
form = Keyword.get(opts, :form, :singular) |> form_to_text()
special_symbol = Keyword.get(opts, :special_symbol, "")
transform_fun = Keyword.get(opts, :value_transform, fn x -> x end)
template =
case Keyword.get(opts, :negative, false) do
true ->
"#{form} not outside the [#{special_symbol}{{lower}}, #{special_symbol}{{upper}}] interval"
false ->
"#{form} outside the [#{special_symbol}{{lower}}, #{special_symbol}{{upper}}] interval"
end
kv = %{
lower: transform_fun.(lower),
upper: transform_fun.(upper),
value: transform_fun.(value),
human_readable: [:lower, :upper, :value]
}
{template, kv}
end
# Percent up
def to_template_kv(%{percent_change: value}, %{percent_up: _} = op, opts),
do: to_template_kv(value, op, opts)
def to_template_kv(percent_change, %{percent_up: percent_up}, opts) do
transform_fun = Keyword.get(opts, :value_transform, fn x -> x end)
template =
case Keyword.get(opts, :negative, false) do
true -> "did not increase by {{percent_up_required}}%"
false -> "increased by {{percent_up}}%"
end
kv = %{
percent_up: transform_fun.(percent_change),
percent_up_required: transform_fun.(percent_up)
}
{template, kv}
end
# Percent down
def to_template_kv(%{percent_change: value}, %{percent_down: _} = op, opts),
do: to_template_kv(value, op, opts)
def to_template_kv(percent_change, %{percent_down: percent_down}, opts) do
transform_fun = Keyword.get(opts, :value_transform, fn x -> x end)
template =
case Keyword.get(opts, :negative, false) do
true -> "did not decrease by {{percent_down_required}}%"
false -> "decreased by {{percent_down}}%"
end
kv = %{
percent_down: transform_fun.(percent_change) |> abs(),
percent_down_required: transform_fun.(percent_down)
}
{template, kv}
end
# Amount up
def to_template_kv(%{absolute_change: value}, %{amount_up: _} = op, opts),
do: to_template_kv(value, op, opts)
def to_template_kv(amount_change, %{amount_up: amount_up}, opts) do
special_symbol = Keyword.get(opts, :special_symbol, "")
transform_fun = Keyword.get(opts, :value_transform, fn x -> x end)
template =
case Keyword.get(opts, :negative, false) do
true -> "did not increase by #{special_symbol}{{amount_change_up_required}}"
false -> "increased by #{special_symbol}{{amount_change_up}}"
end
kv = %{
amount_change_up: transform_fun.(amount_change),
amount_change_up_required: transform_fun.(amount_up),
human_readable: [:amount_change_up, :amount_change_up_required]
}
{template, kv}
end
# Amount
def to_template_kv(%{absolute_change: value}, %{amount_down: _} = op, opts),
do: to_template_kv(value, op, opts)
def to_template_kv(amount_change, %{amount_down: amount_down}, opts) do
special_symbol = Keyword.get(opts, :special_symbol, "")
transform_fun = Keyword.get(opts, :value_transform, fn x -> x end)
template =
case Keyword.get(opts, :negative, false) do
true -> "did not decrease by #{special_symbol}{{amount_down_change_required}}"
false -> "decreased by #{special_symbol}{{amount_down_change}}"
end
kv = %{
amount_down_change: transform_fun.(amount_change) |> abs(),
amount_down_change_required: transform_fun.(amount_down),
human_readable: [:amount_down_change, :amount_down_change_required]
}
{template, kv}
end
def to_template_kv(value, %{all_of: operations}, opts) when is_list(operations) do
{template, kv} =
Enum.reduce(operations, {[], %{}}, fn op, {template_acc, kv_acc} ->
{template, kv} = to_template_kv(value, op, opts)
{[template | template_acc], Map.merge(kv_acc, kv)}
end)
template = Enum.join(template, " and ")
{template, kv}
end
def to_template_kv(value, %{none_of: operations}, opts) when is_list(operations) do
opts = Keyword.put(opts, :negative, true)
{template, kv} =
Enum.reduce(operations, {[], %{}}, fn op, {template_acc, kv_acc} ->
{template, kv} = to_template_kv(value, op, opts)
{[template | template_acc], Map.merge(kv_acc, kv)}
end)
template = Enum.join(template, " and ")
{template, kv}
end
def to_template_kv(value, %{some_of: operations}, opts) when is_list(operations) do
{template, kv} =
Enum.reduce(operations, {[], %{}}, fn op, {template_acc, kv_acc} ->
if Sanbase.Alert.OperationEvaluation.operation_triggered?(value, op) do
{template, kv} = to_template_kv(value, op, opts)
{[template | template_acc], Map.merge(kv_acc, kv)}
else
{template_acc, kv_acc}
end
end)
template = template |> Enum.join(" and ")
{template, kv}
end
def details(:metric, settings, _opts) do
now = DateTime.utc_now() |> DateTime.truncate(:second)
before =
Sanbase.DateTimeUtils.before_interval(settings.time_window, now)
|> DateTime.truncate(:second)
{:ok, metric_metadata} = Sanbase.Metric.metadata(settings.metric)
template =
generated_by_data_template(
"metric_data_from_human_readable",
"metric_data_to_human_readable",
:metric,
metric_metadata.default_aggregation
)
kv = %{
metric_data_from: before,
metric_data_from_human_readable: Sanbase.DateTimeUtils.to_human_readable(before),
metric_data_to: now,
metric_data_to_human_readable: Sanbase.DateTimeUtils.to_human_readable(now),
metric_data_aggregation: metric_metadata.default_aggregation
}
{template, kv}
end
def details(:signal, settings, _opts) do
now = DateTime.utc_now() |> DateTime.truncate(:second)
before =
Sanbase.DateTimeUtils.before_interval(settings.time_window, now)
|> DateTime.truncate(:second)
{:ok, signal_metadata} = Sanbase.Signal.metadata(settings.signal)
template =
generated_by_data_template(
"signal_data_from_human_readable",
"signal_data_to_human_readable",
:signal,
signal_metadata.default_aggregation
)
kv = %{
signal_data_from: before,
signal_data_from_human_readable: Sanbase.DateTimeUtils.to_human_readable(before),
signal_data_to: now,
signal_data_to_human_readable: Sanbase.DateTimeUtils.to_human_readable(now),
signal_data_aggregation: signal_metadata.default_aggregation
}
{template, kv}
end
def details(_, _, _), do: {"", %{}}
# Private functions
defp form_to_text(:singular), do: "is"
defp form_to_text(:plural), do: "are"
defp generated_by_data_template(_from_template, to_template, entity_type, :last) do
"""
\\*_Generated by the value of the #{entity_type} at {{#{to_template}}}_
"""
end
defp generated_by_data_template(from_template, _to_template, entity_type, :first) do
"""
\\*_Generated by the value of the #{entity_type} at {{#{from_template}}}_
"""
end
defp generated_by_data_template(from_template, to_template, entity_type, :sum) do
"""
\\*_Generated by the sum of all #{entity_type} values in the interval:
{{#{from_template}}} - {{#{to_template}}}_
"""
end
defp generated_by_data_template(from_template, to_template, entity_type, aggregation) do
"""
\\*_Generated by the #{aggregation_to_str(aggregation)} value of the #{entity_type} in the interval:
{{#{from_template}}} - {{#{to_template}}}_
"""
end
defp aggregation_to_str(:avg), do: "average"
defp aggregation_to_str(aggregation), do: "#{aggregation}"
end
|
lib/sanbase/alerts/operation/operation_text_kv.ex
| 0.740362 | 0.691094 |
operation_text_kv.ex
|
starcoder
|
defmodule AvroRPC.Response do
@moduledoc """
A parser for turning AvroRPC responses into Elixir data structures.
It uses the Avro specification's type definition to reconstruct the field names/data structure in
Elixir.
"""
@doc """
Converts an :eavro_rpc_fsm response into an Elixir data structure.
"""
@spec format(any) :: {:ok, any} | {:error, {atom, String.t}}
def format(response) do
try do
convert(response)
catch
{:error, error} -> {:error, error}
else
converted_response ->
{:ok, converted_response}
end
end
defp convert({{:avro_array, {:avro_record, _name, _fields} = record_def}, [values]}) do
do_convert_array(record_def, values)
end
defp convert({{:array, %ExAvro.Record{} = record_def}, [values]}) do
do_convert_array(record_def, values)
end
defp convert({{:avro_array, item_type}, values}) do
do_convert_array(item_type, values)
end
defp convert({{:array, item_type}, values}) do
do_convert_array(item_type, values)
end
defp convert({{:avro_record, _name, fields}, values}) when length(fields) != length(values) do
throw({:error, {:invalid_length, "The number of fields do not match the number of values."}})
end
defp convert({{:avro_record, _name, fields}, values}), do: do_convert_record(fields, values)
defp convert({%ExAvro.Record{fields: fields, name: _name}, values}), do: do_convert_record(fields, values)
defp convert({:ok, response}), do: convert(response)
defp convert({_type, value}), do: convert(value)
defp convert(value), do: value
defp do_convert_array(item_type, values) do
Enum.map(values, &convert({item_type, &1}))
end
defp do_convert_record(fields, values) do
fields
|> convert_field_list_to_keys
|> Enum.zip(values)
|> Map.new
end
defp convert_field_list_to_keys([{_, _} | _] = fields), do: Keyword.keys(fields)
defp convert_field_list_to_keys([%ExAvro.Field{} | _] = fields) do
Enum.map(fields, fn(field) ->
convert_field_to_key(field)
end)
end
defp convert_field_to_key(%ExAvro.Field{name: name, type: _type}), do: name
end
|
lib/avro_rpc/response.ex
| 0.68721 | 0.641247 |
response.ex
|
starcoder
|
defmodule MasteringBitcoin.RPCBlock do
@moduledoc """
Example 3-5. Retrieving a block and adding all the transaction outputs.
Port over of `rpc_block.py` file (with fallback capabilities when Alice's
transaction isn't in the local blockchain).
"""
# Alias is as per the book's naming of its client as RawProxy
alias MasteringBitcoin.Client, as: RawProxy
@bitcoin_server_error """
Bitcoin server still warming up. \
run `brew services start bitcoin` for a while, then try again. \
"""
def run do
# The block height where Alice's transaction was recorded.
block_value =
277_316
|> get_blockhash()
|> get_transactions()
|> calculate_block_value()
IO.puts("(Total value in block: #{block_value})")
end
defp get_blockhash(blockheight) do
# Get the block hash of block with height 277316
case RawProxy.getblockhash(blockheight) do
{:ok, blockhash} ->
blockhash
{:error, %{"code" => -28, "message" => message}} ->
raise @bitcoin_server_error <> "Error message: #{message}"
{:error, _reason} ->
get_blockhash_from_latest_block()
end
end
# NOTE: If you are querying a full node on your local machine that doesn't
# have the entire blockchain on it, then you may not have the block where
# Alice's transaction was recorded. In that case, grab the latest block
# available in your local node.
defp get_blockhash_from_latest_block do
with {:ok, blockheight} <- RawProxy.getblockcount(),
{:ok, blockhash} <- RawProxy.getblockhash(blockheight) do
blockhash
else
{:error, reason} ->
IO.puts("Couldn't get a blockhash")
raise reason
end
end
defp get_transactions(blockhash) do
# Retrieve the block by its hash
with {:ok, block} <- RawProxy.getblock(blockhash),
# Element tx contains the list of all transaction IDs in the block
{:ok, transactions} <- Map.fetch(block, "tx") do
transactions
else
{:error, reason} ->
IO.puts("Couldn't get transactions")
raise reason
end
end
defp calculate_block_value(transactions) do
# Iterate through each transaction ID in the block
Enum.reduce(transactions, 0, fn txid, block_value ->
# Add the value of this transaction to the total
sum_transaction_values(txid) + block_value
end)
end
defp sum_transaction_values(txid) do
# Retrieve the raw transaction by ID
with {:ok, raw_tx} <- RawProxy.getrawtransaction(txid),
# Decode the transaction
{:ok, decoded_tx} <- RawProxy.decoderawtransaction(raw_tx) do
# Iterate through each output in the transaction
# Add up the value of each output
decoded_tx
|> Map.get("vout")
|> Stream.map(&Map.get(&1, "value"))
|> Enum.sum()
else
{:error, reason} ->
IO.puts("Couldn't decode transaction")
raise reason
end
end
end
|
lib/mastering_bitcoin/rpc_block.ex
| 0.802285 | 0.453806 |
rpc_block.ex
|
starcoder
|
defmodule Cldr.Calendar.Parse do
@moduledoc false
@split_reg ~r/[\sT]/
def parse_date(<<"-", year::bytes-4, "-", month::bytes-2, "-", day::bytes-2>>, calendar) do
with {:ok, {year, month, day}} <- return_date(year, month, day, calendar) do
{:ok, {-year, month, day}}
end
end
def parse_date(<<year::bytes-4, "-", month::bytes-2, "-", day::bytes-2>>, calendar) do
return_date(year, month, day, calendar)
end
def parse_date(_string, _calendar) do
{:error, :invalid_date}
end
def parse_week_date(<<"-", year::bytes-4, "-W", month::bytes-2, "-", day::bytes-1>>, calendar) do
with {:ok, {year, month, day}} <- return_date(year, month, day, calendar) do
{:ok, {-year, month, day}}
end
end
def parse_week_date(<<year::bytes-4, "-W", month::bytes-2, "-", day::bytes-1>>, calendar) do
return_date(year, month, day, calendar)
end
def parse_week_date(string, calendar) do
parse_date(string, calendar)
end
defp return_date(year, month, day, calendar) do
year = String.to_integer(year)
month = String.to_integer(month)
day = String.to_integer(day)
if calendar.valid_date?(year, month, day) do
{:ok, {year, month, day}}
else
{:error, :invalid_date}
end
end
[match_time, guard_time, read_time] =
quote do
[
<<h1, h2, ?:, i1, i2, ?:, s1, s2>>,
h1 >= ?0 and h1 <= ?9 and h2 >= ?0 and h2 <= ?9 and i1 >= ?0 and i1 <= ?9 and i2 >= ?0 and
i2 <= ?9 and s1 >= ?0 and s1 <= ?9 and s2 >= ?0 and s2 <= ?9,
{
(h1 - ?0) * 10 + (h2 - ?0),
(i1 - ?0) * 10 + (i2 - ?0),
(s1 - ?0) * 10 + (s2 - ?0)
}
]
end
defdelegate time_to_day_fraction(hour, minute, second, microsecond), to: Calendar.ISO
defdelegate add_day_fraction_to_iso_days(fraction, offset, microseconds), to: Calendar.ISO
defdelegate time_from_day_fraction(fraction), to: Calendar.ISO
defdelegate date_to_iso_days(year, month, day), to: Calendar.ISO
@doc false
def parse_naive_datetime(string, calendar) do
case String.split(string, @split_reg) do
[date, time] ->
with {:ok, {year, month, day}} <- calendar.parse_date(date),
{:ok, {hour, minute, second, microsecond}} <- calendar.parse_time(time) do
{:ok, {year, month, day, hour, minute, second, microsecond}}
end
_ ->
{:error, :invalid_format}
end
end
@doc false
def parse_utc_datetime(string, calendar) do
case String.split(string, @split_reg) do
[date, time_and_offset] ->
with <<unquote(match_time), rest::binary>> when unquote(guard_time) <- time_and_offset,
{microsecond, rest} <- parse_microsecond(rest),
{offset, ""} <- parse_offset(rest),
{:ok, {year, month, day}} <- calendar.parse_date(date) do
{hour, minute, second} = unquote(read_time)
cond do
not calendar.valid_time?(hour, minute, second, microsecond) ->
{:error, :invalid_time}
offset == 0 ->
{:ok, {year, month, day, hour, minute, second, microsecond}, offset}
is_nil(offset) ->
{:error, :missing_offset}
true ->
day_fraction = time_to_day_fraction(hour, minute, second, {0, 0})
{{year, month, day}, {hour, minute, second, _}} =
case add_day_fraction_to_iso_days({0, day_fraction}, -offset, 86400) do
{0, day_fraction} ->
{{year, month, day}, time_from_day_fraction(day_fraction)}
{extra_days, day_fraction} ->
base_days = calendar.date_to_iso_days(year, month, day)
{calendar.date_from_iso_days(base_days + extra_days),
time_from_day_fraction(day_fraction)}
end
{:ok, {year, month, day, hour, minute, second, microsecond}, offset}
end
end
_ ->
{:error, :invalid_format}
end
end
defp parse_microsecond("." <> rest) do
case parse_microsecond(rest, 0, "") do
{"", 0, _} ->
:error
{microsecond, precision, rest} when precision in 1..6 ->
pad = String.duplicate("0", 6 - byte_size(microsecond))
{{String.to_integer(microsecond <> pad), precision}, rest}
{microsecond, _precision, rest} ->
{{String.to_integer(binary_part(microsecond, 0, 6)), 6}, rest}
end
end
defp parse_microsecond("," <> rest) do
parse_microsecond("." <> rest)
end
defp parse_microsecond(rest) do
{{0, 0}, rest}
end
defp parse_microsecond(<<head, tail::binary>>, precision, acc) when head in ?0..?9,
do: parse_microsecond(tail, precision + 1, <<acc::binary, head>>)
defp parse_microsecond(rest, precision, acc), do: {acc, precision, rest}
defp parse_offset(""), do: {nil, ""}
defp parse_offset("Z"), do: {0, ""}
defp parse_offset("-00:00"), do: :error
defp parse_offset(<<?+, hour::2-bytes, ?:, min::2-bytes, rest::binary>>),
do: parse_offset(1, hour, min, rest)
defp parse_offset(<<?-, hour::2-bytes, ?:, min::2-bytes, rest::binary>>),
do: parse_offset(-1, hour, min, rest)
defp parse_offset(<<?+, hour::2-bytes, min::2-bytes, rest::binary>>),
do: parse_offset(1, hour, min, rest)
defp parse_offset(<<?-, hour::2-bytes, min::2-bytes, rest::binary>>),
do: parse_offset(-1, hour, min, rest)
defp parse_offset(<<?+, hour::2-bytes, rest::binary>>), do: parse_offset(1, hour, "00", rest)
defp parse_offset(<<?-, hour::2-bytes, rest::binary>>), do: parse_offset(-1, hour, "00", rest)
defp parse_offset(_), do: :error
defp parse_offset(sign, hour, min, rest) do
with {hour, ""} when hour < 24 <- Integer.parse(hour),
{min, ""} when min < 60 <- Integer.parse(min) do
{(hour * 60 + min) * 60 * sign, rest}
else
_ -> :error
end
end
end
|
lib/cldr/calendar/parse.ex
| 0.665628 | 0.531757 |
parse.ex
|
starcoder
|
defmodule CoopMinesweeper.Game.GameRegistry do
@moduledoc """
This module is responsible for creating, saving and deleting supervised game
agents.
It makes sure that the game id is associated with its game agent and that the
game agents are supervised so that they can't crash the application.
"""
alias CoopMinesweeper.Game.{Game, Field}
@type not_found_error() :: {:error, :not_found_error}
@doc """
Creates a new game agent, supervises it and puts it into the registry.
"""
@spec create(
size :: non_neg_integer(),
mines :: non_neg_integer(),
visibility :: Field.visibility()
) ::
{:ok, {String.t(), pid()}} | {:error, any()}
def create(size, mines, visibility) do
game_id = generate_game_id()
name = {:via, Registry, {CoopMinesweeper.GameRegistry, game_id}}
with {:ok, game_agent} <-
DynamicSupervisor.start_child(
CoopMinesweeper.GameSupervisor,
{Game,
size: size, mines: mines, game_id: game_id, name: name, visibility: visibility}
) do
{:ok, {game_id, game_agent}}
end
end
@doc """
Returns a game agent that is associated the given game id.
"""
@spec get(game_id :: String.t()) :: {:ok, pid()} | not_found_error()
def get(game_id) do
case Registry.lookup(CoopMinesweeper.GameRegistry, game_id) do
[] -> {:error, :not_found_error}
[{game_agent, _} | _] -> {:ok, game_agent}
end
end
@doc """
Deletes a game agent by its game id.
"""
@spec delete(game_id :: String.t()) :: true | not_found_error()
def delete(game_id) do
with {:ok, game_agent} <- get(game_id) do
# The Registry automatically deletes the PID and the DynamicSupervisor
# can handle exits, so it is ok to just kill the game agent.
Process.exit(game_agent, :kill)
end
end
@spec list_game_pids() :: [pid()]
def list_game_pids() do
select_pid = [{{:_, :"$1", :_}, [], [:"$1"]}]
Registry.select(CoopMinesweeper.GameRegistry, select_pid)
end
@spec generate_game_id() :: String.t()
defp generate_game_id() do
9_999_999_999
|> :rand.uniform()
|> to_string()
|> String.pad_leading(10, "0")
end
end
|
lib/coop_minesweeper/game/game_registry.ex
| 0.679179 | 0.446133 |
game_registry.ex
|
starcoder
|
defmodule Croma.Validation do
@moduledoc """
Module for code generation of argument validation (see `Croma.Defun.defun/2`).
This module is intended for internal use.
"""
def make(type_expr, v, caller) do
ast = validation_expr(type_expr, v, caller)
{name, _, _} = v
type_string = Macro.to_string(type_expr)
quote bind_quoted: [name: name, ast: ast, type_string: type_string] do
case ast do
true -> nil
false -> raise "validation error: #{Atom.to_string(name)} is not a valid #{type_string}"
end
end
end
defp validation_expr(type_expr, v, caller) do
case type_expr do
a when is_atom(a) -> validation_expr_equal(v, a)
l when is_list(l) -> validation_expr_module(v, Croma.List)
{_, _} -> validation_expr_module(v, Croma.Tuple)
{:t, _, _} -> validation_expr_module(v, caller.module)
{:|, _, [t1, t2]} -> validation_expr_union(v, t1, t2, caller)
{{:., _, [mod_alias, :t]}, _, _} -> validation_expr_module(v, replace_elixir_type_module(mod_alias, caller))
{first, _, _} -> validation_expr_module(v, module_for(first, type_expr))
_ -> error(type_expr)
end
end
defp module_for(first, type_expr) do
case first do
:integer -> Croma.Integer
:pos_integer -> Croma.PosInteger
:neg_integer -> Croma.NeInteger
:non_neg_integer -> Croma.NonNegInteger
:boolean -> Croma.Boolean
:byte -> Croma.Byte
:char -> Croma.Char
:float -> Croma.Float
:number -> Croma.Number
:binary -> Croma.Binary
:bitstring -> Croma.BitString
:module -> Croma.Atom
:atom -> Croma.Atom
:node -> Croma.Atom
:fun -> Croma.Function
:pid -> Croma.Pid
:port -> Croma.Port
:reference -> Croma.Reference
:char_list -> Croma.List
:list -> Croma.List
:map -> Croma.Map
:tuple -> Croma.Tuple
:%{} -> Croma.Map
:{} -> Croma.Tuple
:<<>> -> Croma.BitString
_ -> error(type_expr)
end
end
defp validation_expr_module(v, mod) do
quote bind_quoted: [v: v, mod: mod] do
mod.valid?(v)
end
end
defp validation_expr_equal(v, value) do
quote bind_quoted: [v: v, value: value] do
v == value
end
end
defp validation_expr_union(v, t1, t2, caller) do
q1 = validation_expr(t1, v, caller)
q2 = validation_expr(t2, v, caller)
quote do
unquote(q1) || unquote(q2)
end
end
defp replace_elixir_type_module(mod_alias, caller) do
mod = Macro.expand(mod_alias, caller)
case mod do
String -> Croma.String
_ -> mod
end
end
defp error(type_expr) do
raise "cannot generate validation code for the given type: #{Macro.to_string(type_expr)}"
end
end
|
lib/croma/validation.ex
| 0.604866 | 0.448487 |
validation.ex
|
starcoder
|
defmodule Grapex.Models.Utils do
@spec filter_by_label(map, integer) :: map
defp filter_by_label(batch, target_label) do
case Stream.zip([batch.heads, batch.tails, batch.relations, batch.labels])
|> Stream.filter(fn {_, _, _, label} -> label == target_label end)
|> Stream.map(fn {head, tail, relation, label} -> [head, tail, relation, label] end)
|> Stream.zip
|> Stream.map(&Tuple.to_list/1)
|> Enum.to_list do
[heads, tails, relations, _] -> %{heads: heads, tails: tails, relations: relations}
end
end
@spec get_positive_and_negative_triples(map) :: map
def get_positive_and_negative_triples(batch) do
%{
positive: filter_by_label(batch, 1),
negative: filter_by_label(batch, -1)
}
end
@spec repeat(list) :: list
def repeat(items) do
items
end
def repeat(_, times) when times <= 0 do
{:error, "Cannot repeat collection negative or zero number of times"}
end
def repeat(items, times) when times == 1 do
repeat items
end
@spec repeat(list, integer) :: list
def repeat(items, times) do
Stream.cycle(items)
|> Stream.take(times * length(items))
|> Enum.to_list
end
@n_triple_classes 2 # positive and negative
@n_entities_per_triple 2 # head and tail
@n_relations_per_triple 1
@spec to_model_input(map, float, integer, integer) :: tuple
def to_model_input(batch, margin \\ 2.0, entity_negative_rate \\ 1, relation_negative_rate \\ 0) do
n_positive_iterations = entity_negative_rate + relation_negative_rate
{
{
Nx.tensor(
[
repeat(batch.positive.heads, n_positive_iterations),
repeat(batch.positive.tails, n_positive_iterations),
batch.negative.heads,
batch.negative.tails
]
)
|> Nx.reshape({@n_triple_classes, @n_entities_per_triple, :auto})
|> Nx.transpose(axes: [0, 2, 1]), # make batch size the second axis
Nx.tensor(
[
repeat(batch.positive.relations, n_positive_iterations),
batch.negative.relations
]
)
|> Nx.reshape({@n_triple_classes, @n_relations_per_triple, :auto})
|> Nx.transpose(axes: [0, 2, 1]),
},
Nx.tensor(for _ <- 1..(length(batch.positive.heads) * n_positive_iterations) do [margin] end)
}
end
def to_model_input_for_testing(batch, batch_size \\ 17) do
{
Nx.tensor(
[
batch.heads,
batch.tails
]
)
# |> Nx.reshape({1, 2, :auto})
# |> Nx.transpose(axes: [0, 2, 1]),
|> Nx.transpose
# |> Grapex.IOutils.inspect_shape("transposed shape")
|> Nx.to_batched_list(batch_size)
|> Stream.map(fn x ->
x
|> Nx.transpose
|> Nx.reshape({1, 2, :auto})
|> Nx.transpose(axes: [0, 2, 1])
end
)
|> Enum.to_list
|> Nx.concatenate,
# |> IO.inspect,
# |> Nx.tensor,
Nx.tensor(
[
batch.relations
]
)
# |> Nx.reshape({1, 1, :auto})
# |> Nx.transpose(axes: [0, 2, 1]),
|> Nx.transpose
|> Nx.to_batched_list(batch_size)
|> Stream.map(fn x ->
x
|> Nx.transpose
|> Nx.reshape({1, 1, :auto})
|> Nx.transpose(axes: [0, 2, 1])
end
)
|> Enum.to_list
|> Nx.concatenate
# |> Nx.tensor
}
# |> Stream.zip
# |> Enum.to_list
end
end
|
lib/grapex/models/utils.ex
| 0.849644 | 0.559651 |
utils.ex
|
starcoder
|
defmodule Bitcoin.Protocol.Messages.GetHeaders do
@moduledoc """
Return a headers packet containing the headers of blocks starting right after the last known hash in the block
locator object, up to hash_stop or 2000 blocks, whichever comes first. To receive the next block headers, one needs
to issue getheaders again with a new block locator object. The getheaders command is used by thin clients to
quickly download the block chain where the contents of the transactions would be irrelevant (because they are not
ours). Keep in mind that some clients may provide headers of blocks which are invalid if the block locator object
contains a hash on the invalid branch.
For the block locator object in this packet, the same rules apply as for the getblocks packet.
https://en.bitcoin.it/wiki/Protocol_specification#getheaders
"""
alias Bitcoin.Protocol.Types.Integer
defstruct version: 0, # the protocol version
block_locator_hashes: [], # block locator object; newest back to genesis block (dense to start, but then sparse)
hash_stop: <<0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>> # hash of the last desired block; set to zero to get as many headers as possible (up to 2000)
@type t :: %__MODULE__{
version: non_neg_integer,
block_locator_hashes: list,
hash_stop: bitstring
}
def parse(data) do
<<version :: unsigned-little-integer-size(32), payload :: binary>> = data
[count, payload] = Integer.parse_stream(payload)
[block_locator_hashes, payload] = Enum.reduce(1..count, [[], payload], fn (_, [collection, payload]) ->
<<element :: bytes-size(32), payload :: binary>> = payload
[collection ++ [element], payload]
end)
<< hash_stop :: bytes-size(32) >> = payload
%__MODULE__{
version: version,
block_locator_hashes: block_locator_hashes,
hash_stop: hash_stop
}
end
def serialize(%__MODULE__{} = s) do
<<
s.version :: unsigned-little-integer-size(32),
>> <>
Integer.serialize(s.block_locator_hashes |> Enum.count)
<> (
s.block_locator_hashes |> Enum.reduce(<<>>, &(&2 <> &1))
) <>
<<
s.hash_stop :: bytes-size(32)
>>
end
end
|
lib/bitcoin/protocol/messages/get_headers.ex
| 0.834542 | 0.513546 |
get_headers.ex
|
starcoder
|
defmodule Faker.Cannabis.En do
import Faker, only: [sampler: 2]
@moduledoc """
Functions for generating Cannabis related data in English
"""
@doc """
Returns a Cannabis strain string
## Examples
iex> Faker.Cannabis.En.strain()
"Critical Kush"
iex> Faker.Cannabis.En.strain()
"Blue Dream"
iex> Faker.Cannabis.En.strain()
"Mr. Nice Guy"
iex> Faker.Cannabis.En.strain()
"Gorilla Glue"
"""
@spec strain() :: String.t()
sampler(:strain, [
"24k",
"Acapulco Gold",
"ACDC",
"Alien Diesel",
"Banana OG",
"Bio-Diesel",
"Black Diamond OG",
"Black Jack",
"Blackberry Cookies",
"Blackberry Soda",
"Blackwater OG",
"Blue Dream",
"Blueberry Tangie",
"Boss Hog",
"Bubba Kush",
"Cali Mist",
"Candy Skunk",
"Candyland",
"Canna Sutra",
"Cheese",
"Chem Berry",
"Cherry Cheese",
"Cherry Lime",
"Chocolate Hashberry",
"Chronic",
"Church OG",
"Clementine",
"Colfax Cookies",
"Colfax Jack",
"Colfax Platinum",
"Critical Kush",
"Diablo OG",
"Double Dutch Bus",
"Dutch Treat",
"Forbidden Fruit",
"Gelato",
"Ghost OG",
"Girl Scout Cookies",
"Godzilla Glue",
"Golden Strawberries",
"Gooberry",
"Gorilla Glue",
"Gorilla Princess",
"Granddaddy Purple Kush",
"Grapefruit Kush",
"Green Cobra",
"Green Queen",
"Hardcore Grapes",
"Heavy OG",
"Hell's OG",
"<NAME>",
"<NAME>",
"<NAME>",
"Key Lime Pie",
"King Kong OG",
"King Louis OG",
"Kings Reserve OG",
"Koffee Cake",
"<NAME>",
"<NAME>",
"<NAME>",
"Lemonade",
"Lime #5",
"Lucky Charms",
"Master OG",
"<NAME>",
"<NAME>",
"Mojito",
"Moon Glue",
"Mr. <NAME>",
"NY Sour Diesel",
"Nectarine",
"<NAME>",
"<NAME>",
"OG Chem",
"OG Kush",
"OG Salad",
"ONAC OG",
"Panama Punch",
"Pineapple Express",
"Pineapple Sage",
"Pinesol OG",
"Pink Lotus",
"Pitbull",
"Platinum OG",
"Presidential OG",
"Private Reserve",
"Purple Punch",
"Queen Dream",
"Rainbow Sherbet",
"Raskal OG",
"Redwood Bubba Kush",
"Rude Boi OG",
"Satellite OG",
"Saturn OG",
"Sherbert",
"Skywalker OG",
"Snozzberry OG",
"Sonoma Glue",
"Soul Assasin OG",
"Soul Assassin",
"Sour Cheese",
"Strawberry Banana",
"Strawberry Cough",
"Strawberry Moonrocks",
"Strawberry Shortcake",
"Sugar Momma",
"Sunset Sherbert",
"Super Glue",
"Super Jack",
"Super Silver Haze",
"Tangie",
"Trainwreck",
"Watermelon",
"White Tahoe Cookies",
"Whitewalker OG",
"XJ-13"
])
@doc """
Returns a Cannabis cannabinoid string
## Examples
iex> Faker.Cannabis.En.cannabinoid()
"Cannabinol"
iex> Faker.Cannabis.En.cannabinoid()
"Cannabigerolic Acid"
iex> Faker.Cannabis.En.cannabinoid()
"Cannabinolic Acid"
iex> Faker.Cannabis.En.cannabinoid()
"Cannabicyclol"
"""
@spec cannabinoid() :: String.t()
sampler(:cannabinoid, [
"Tetrahydrocannabinol",
"Tetrahydrocannabinolic Acid",
"Tetrahydrocannabivarin",
"Cannabidiol",
"Cannabidiolic Acid",
"Cannabidivarin",
"Cannabinol",
"Cannabinolic Acid",
"Cannabigerol",
"Cannabigerolic Acid",
"Cannabichromene",
"Cannabichromic Acid",
"Cannabicyclol",
"Cannabicyclic Acid"
])
@doc """
Returns a Cannabis cannabinoid abbreviation string
## Examples
iex> Faker.Cannabis.En.cannabinoid_abbreviation()
"THCa"
iex> Faker.Cannabis.En.cannabinoid_abbreviation()
"THCv"
iex> Faker.Cannabis.En.cannabinoid_abbreviation()
"CBC"
iex> Faker.Cannabis.En.cannabinoid_abbreviation()
"CBG"
"""
@spec cannabinoid_abbreviation() :: String.t()
sampler(:cannabinoid_abbreviation, [
"THC",
"THCa",
"∆9THC",
"∆8THC",
"THCv",
"THCv",
"CBD",
"CBDa",
"CBDv",
"CBN",
"CBNa",
"CBG",
"CBGa",
"CBC",
"CBCa",
"CBL",
"CBLa"
])
@doc """
Returns a Cannabis terpene string
## Examples
iex> Faker.Cannabis.En.terpene()
"Camphor"
iex> Faker.Cannabis.En.terpene()
"Camphene"
iex> Faker.Cannabis.En.terpene()
"α Pinene"
iex> Faker.Cannabis.En.terpene()
"Sabinene"
"""
@spec terpene() :: String.t()
sampler(:terpene, [
"α Pinene",
"Myrcene",
"α Phellandrene",
"∆ 3 Carene",
"Terpinene",
"Limonene",
"α Terpinolene",
"Linalool",
"Fenchol",
"Borneol",
"Terpineol",
"Geraniol",
"α Humulene",
"β Caryophyllene",
"Caryophyllene Oxide",
"α Bisabolol",
"Camphene",
"β Pinene",
"Ocimene",
"Sabinene",
"Camphor",
"Isoborneol",
"Menthol",
"α Cedrene",
"Nerolidol",
"R-(+)-Pulegone",
"Eucalyptol",
"p-Cymene",
"(-)-Isopulegol",
"Geranyl",
"Acetate",
"Guaiol",
"Valencene",
"Phytol",
"Citronellol"
])
@doc """
Returns a Cannabis medical use string
## Examples
iex> Faker.Cannabis.En.medical_use()
"analgesic"
iex> Faker.Cannabis.En.medical_use()
"anti-cancer"
iex> Faker.Cannabis.En.medical_use()
"anti-cancer"
iex> Faker.Cannabis.En.medical_use()
"anti-fungal"
"""
@spec medical_use() :: String.t()
sampler(:medical_use, [
"analgesic",
"anti-bacterial",
"anti-diabetic",
"anti-emetic",
"anti-epileptic",
"anti-fungal",
"anti-depressant",
"anti-inflammatory",
"anti-insomnia",
"anti-ischemic",
"anti-cancer",
"anti-psoriatic",
"anti-psychotic",
"anti-spasmodic",
"anti-anxiety",
"appetite stimulant",
"anorectic",
"bone stimulant",
"immunoregulation",
"immunostimulant",
"intestinal anti-prokinetic",
"neuroprotective",
"bronchodilator",
"anti-stress",
"anti-septic",
"psychoactive",
"non-psychoactive",
"decongestant",
"anti-histamine",
"anti-oxidant",
"anti-viral"
])
@doc """
Returns a Cannabis health benefit string
## Examples
iex> Faker.Cannabis.En.health_benefit()
"relieves pain"
iex> Faker.Cannabis.En.health_benefit()
"inhibits cell growth in tumors/cancer cells"
iex> Faker.Cannabis.En.health_benefit()
"inhibits cell growth in tumors/cancer cells"
iex> Faker.Cannabis.En.health_benefit()
"treats fungal infection"
"""
@spec health_benefit() :: String.t()
sampler(:health_benefit, [
"relieves pain",
"kills or slows bacteria growth",
"reduces blood sugar levels",
"reduces vomiting and nausea",
"reduces seizures and convulsion",
"treats fungal infection",
"treats depression",
"reduces inflammation",
"aids sleep",
"reduces risk of artery blockage",
"inhibits cell growth in tumors/cancer cells",
"treats psoriasis",
"tranquilizing",
"suppresses muscle spasms",
"relieves anxiety",
"stimulates appetite",
"suppresses appetite",
"promotes bone growth",
"regulates function in the immune system",
"stimulates function in the immune system",
"reduces contractions in the small intestines",
"protects against nervous system degeneration",
"improves airflow to lungs",
"prevents stress",
"prevents infection",
"affects mental activity",
"does not affect mental activity",
"relieves congestion",
"treats allergy symptoms",
"cell protectant",
"treats viral infections"
])
@doc """
Returns a Cannabis category string
## Examples
iex> Faker.Cannabis.En.category()
"flower"
iex> Faker.Cannabis.En.category()
"medical"
iex> Faker.Cannabis.En.category()
"seeds & clones"
iex> Faker.Cannabis.En.category()
"live resin"
"""
@spec category() :: String.t()
sampler(:category, [
"capsules",
"concentrates",
"crumble",
"crystalline",
"distillate",
"edibles",
"flower",
"ice hash",
"live resin",
"medical",
"rosin",
"seeds & clones",
"shatter",
"tinctures",
"topicals",
"vape pens"
])
@doc """
Returns a Cannabis type string
## Examples
iex> Faker.Cannabis.En.type()
"hybrid"
iex> Faker.Cannabis.En.type()
"sativa"
iex> Faker.Cannabis.En.type()
"hybrid"
iex> Faker.Cannabis.En.type()
"sativa"
"""
@spec type() :: String.t()
sampler(:type, ["hybrid", "indica", "sativa"])
@doc """
Returns a Cannabis buzzword string
## Examples
iex> Faker.Cannabis.En.buzzword()
"toke"
iex> Faker.Cannabis.En.buzzword()
"cbd"
iex> Faker.Cannabis.En.buzzword()
"stoned"
iex> Faker.Cannabis.En.buzzword()
"stoned"
"""
@spec buzzword() :: String.t()
sampler(:buzzword, [
"blunt wrap",
"bong",
"bottom shelf",
"bubbler",
"cashed",
"cbd",
"dank",
"eighth",
"gram",
"high",
"hydroponic",
"keef",
"marijuana",
"mary jane",
"munchies",
"ounce",
"papers",
"pipe",
"pound",
"private reserve",
"ripped",
"spliff",
"stoned",
"terpene",
"thc",
"toke",
"top shelf",
"wake and bake",
"weed"
])
@doc """
Returns a Cannabis brand string
## Examples
iex> Faker.Cannabis.En.brand()
"Evolab"
iex> Faker.Cannabis.En.brand()
"CI Wholesale"
iex> Faker.Cannabis.En.brand()
"Muy"
iex> Faker.Cannabis.En.brand()
"Chong's Choice"
"""
@spec brand() :: String.t()
sampler(:brand, [
"8 | FOLD Cultivation",
"Apothecanna",
"Auntie Dolores",
"Big Pete's Treats",
"Bloom Farms",
"CI Wholesale",
"California's Finest",
"Cannapunch",
"Cannavore Confections",
"Caviar Gold",
"Cheeba Chews",
"Chong's Choice",
"Claw",
"Coda Signature",
"Colorado Cannabis Company",
"Dixie Edibles",
"Dixie Elixirs",
"Dosist",
"Evolab",
"Hashman Infused",
"Hiku",
"Jetty Extracts",
"K.I.N.D. Concentrates",
"Kiva Confections",
"Leafs by Snoop",
"Legal Drinks",
"Level Blends",
"L<NAME>",
"<NAME>",
"Mar<NAME>",
"Muy",
"Nature’s Medicines",
"Oil Stix",
"Omaha Farms",
"Orchid Essentials",
"OreKron",
"Roots",
"Seven Point",
"Shore Natural RX",
"S<NAME>",
"Summit",
"THC Design",
"THC Factory",
"The Goodship Company",
"The Lab",
"Timeless Vapes",
"True Humboldt",
"Wana Brands",
"Whoopie & Maya",
"Willie’s Reserve",
"marQaha"
])
end
|
lib/faker/cannabis/en.ex
| 0.722331 | 0.502747 |
en.ex
|
starcoder
|
defmodule ExTorch.ModuleMixin do
@moduledoc """
Utilities used to define a module mixin that inherits documentation and specs.
"""
@signature_regex ~r/[a-zA-Z_]\w*[(]((\w,? ?)*)[)]/
@doc """
This macro enables a module to import the functions from another module
and expose them as they were defined on it.
defmodule BaseModule do
def call1(arg1, arg2) do
arg1 + arg2
end
def call2() do
:ok
end
end
defmodule Mixin do
import ExTorch.ModuleMixin
extends(BaseModule)
end
By using the `extends/1` macro, the `Mixin` module will have the definitions
of `call1/2` and `call2/0`.
## Implementation notes
The function definitions are given via `defdelegate` internally.
"""
defmacro extends(module) do
module = Macro.expand(module, __CALLER__)
functions = module.__info__(:functions)
{signature_args, doc_funcs} =
module
|> Code.fetch_docs()
|> fetch_signatures()
agg_functions =
Enum.reduce(functions, %{}, fn {func, arity}, acc ->
current_max = Map.get(acc, func, 0)
Map.put(acc, func, max(current_max, arity))
end)
signatures =
Enum.reduce(functions, [], fn {name, arity}, acc ->
str_name = Atom.to_string(name)
case String.starts_with?(str_name, "__") do
false ->
add_max_header = is_max_arity_call(doc_funcs, name, arity)
args = get_arguments(signature_args, name, arity)
[{{name, [], args}, add_max_header} | acc]
true ->
acc
end
end)
# zipped = List.zip([signatures, functions])
Enum.map(signatures, fn
{sig, true} ->
{func_name, _, _} = sig
max_arity = Map.get(agg_functions, func_name)
quote do
ExTorch.DelegateWithDocs.defdelegate(unquote(sig),
to: unquote(module),
point_to: unquote(max_arity)
)
end
{sig, false} ->
quote do
ExTorch.DelegateWithDocs.defdelegate(unquote(sig), to: unquote(module))
end
end)
end
defp get_arguments(signature_args, name, arity) do
signature = Map.get(signature_args, {name, arity})
[_, args, _] = Regex.run(@signature_regex, signature)
args
|> String.split(",", trim: true)
|> Enum.map(fn arg -> Macro.var(String.to_atom(arg), nil) end)
end
defp is_max_arity_call(doc_funcs, name, arity) do
docstring = Map.get(doc_funcs, {name, arity})
case docstring do
nil -> false
docstring -> String.contains?(docstring, "Available signature calls")
end
end
defp fetch_signatures({:docs_v1, _, :elixir, "text/markdown", _, %{}, funcs}) do
Enum.reduce(funcs, {%{}, %{}}, fn
{{:function, func_name, arity}, _, [signature], docstring, _}, {acc, docs} ->
docstring =
case docstring do
%{"en" => doc} -> doc
_ -> nil
end
acc = Map.put(acc, {func_name, arity}, signature)
docs = Map.put(docs, {func_name, arity}, docstring)
{acc, docs}
_, acc ->
acc
end)
end
end
|
lib/extorch/utils/module_mixin.ex
| 0.748536 | 0.50531 |
module_mixin.ex
|
starcoder
|
defmodule Grizzly.Inclusions do
@moduledoc """
Module for adding and removing Z-Wave nodes
In Z-Wave the term "inclusions" means two things:
1. Adding a new Z-Wave device to the Z-Wave Network
2. Removing a Z-Wave device to the Z-Wave Network
In practice though it is more common to speak about adding a Z-Wave node in
the context of "including" and removing an Z-Wave mode in the context of
"excluding." This module provides functionality for working will all contexts
of inclusion, both adding and removing.
## Adding a Z-Wave Node (including)
When adding a device that does not required any security authentication is
as simple as calling `Grizzly.Inclusions.add_node/0`.
```elixir
iex> Grizzly.Inclusions.add_node()
:ok
```
After starting the inclusion on the controller, which the above function
does, you can then put your device into inclusion as well. From here the new
device and your controller will communicate and if all goes well you should
receive a message in the form of
`{:grizzly, :inclusion, NodeAddStatus}` where the the `NodeAddStatus` is a
Z-Wave command the contains information about the inclusion status (status,
node id, supported command classes, security levels, etc.). See
`Grizzly.ZWave.Commands.NodeAddStatus` for more information about the values
in that command. For example:
```elixir
defmodule MyInclusionServer do
use GenServer
require Logger
alias Grizzly.Inclusions
alias Grizzly.ZWave.Command
def start_link(_) do
GenServer.start_link(__MODULE__, nil)
end
def add_node(pid) do
GenServer.call(pid, :add_node)
end
def init(_) do
{:ok, nil}
end
def handle_call(:add_node, _from, state) do
:ok = Inclusions.add_node()
{:reply, :ok, state}
end
def handle_info({:grizzly, :inclusion, report}, state) do
case Command.param!(report.command, :status) do
:done ->
node_id = Command.param!(report.command, :node_id)
Logger.info("Node added with id: " <> node_id)
:failed ->
Logger.warn("Adding node failed :(")
:security_failed ->
node_id = Command.param!(report.command, :node_id)
Logger.warn("Node added with id: " <> node_id <> "but the security failed")
end
{:noreply, state}
end
end
```
### Stop Adding a Node
If you need you need to stop trying to add a node to the Z-Wave network you
can use the `Grizzly.Inclusions.remove_node/0` function.
This should stop the controller from trying to add a node and return it to
a normal functions state.
### Security
There are five security levels in Z-Wave: unsecured, S0, S2 unauthenticated,
S2 authenticated, and S2 access control. The first 2 requires nothing
special from the calling process to able to use, as the controller and the
including node will figure out which security scheme to use.
#### S2
The process of adding an S2 device is a little more involved. The process is
the same up until right after you put the including node into the inclusion
mode. At that point including will request security keys, which really means
it tells you which S2 security scheme it supports. You then use the
`Grizzly.Inclusions.grant_keys/1` function to pass a list of allowed security
schemes.
After that the node will response with a `NodeAddDSKReport` where it reports
the DSK and something called the `:dsk_input_length`. If the input length is
`0`, that means it is trying to do S2 unauthenticated inclusion. You can
just call `Grizzly.Inclusions.set_input_dsk/0` function and the rest of the
inclusion process should continue until complete.
If the `:dsk_input_length` has number, normally will be `2` that means the
including device is requesting a 5 digit digit pin that is normally found on
a label somewhere on the physical device it.
From here you can call `Grizzly.Inclusions.set_input_dsk/1` with the 5 digit
integer as the argument. The inclusion process should continue until complete.
## Removing a Z-Wave Node (excluding)
To remove a Z-Wave node from the network the
`Grizzly.Inclusions.remove_node/0` will start an inclusion process for removing
a Z-Wave node. After calling this function you can place your device into the
inclusion (normally the same way you included the device is the way the device
is excluded) mode. At the end of the exclusion the `NodeRemoveStatus` command
is received and can be inspected for success of failure.
### Removed Node ID 0?
Any Z-Wave controller can excluded a device from another controller. In
practice this means your Z-Wave controller can make a device "forget" the
controller it is currently attached to. Most the time Z-Wave products will
have you excluded your device and then included just to make sure the
including node isn't connected to another Z-Wave controller.
When this happens you will a successful `NodeRemoveStatusReport` but the node
id will be `0`. This is consider successful and most the time intend.
## Stopping Remove Node Process
To stop the removal inclusion process on your controller you can call the
`Grizzly.Inclusions.remove_node_stop/0` function.
## Inclusion Handler
To tie into the inclusion process we default to sending messages to the
calling process. However, there is a better way to tie into this system.
When starting any inclusion process you can pass the `:handler` option
which can be either another pid or a module that implements the
`Grizzly.InclusionHandler` behaviour, or a tuple with the module and callback arguments.
A basic implementation might look like:
```elixir
defmodule MyApp.InclusionHandler do
@behaviour Grizzly.InclusionHandler
require Logger
def handle_report(report, opts) do
Logger.info("Got command: " <> report.command.name <> " with callback arguments " <> inspect opts)
:ok
end
end
```
This is recommended for applications using Grizzly over a `GenServer` that
wraps `Grizzly.Inclusions`.
"""
alias Grizzly.Inclusions.InclusionRunnerSupervisor
alias Grizzly.Inclusions.InclusionRunner
alias Grizzly.ZWave.Security
@type opt ::
{:controller_id, Grizzly.node_id()} | {:handler, pid() | module() | {module, keyword()}}
@doc """
Start the process to add a Z-Wave node to the network
"""
@spec add_node([opt()]) :: :ok
def add_node(opts \\ []) do
case InclusionRunnerSupervisor.start_runner(opts) do
{:ok, runner} ->
InclusionRunner.add_node(runner)
end
end
@doc """
Start the process to remove a Z-Wave node from the network
"""
@spec remove_node([opt()]) :: :ok
def remove_node(opts \\ []) do
case InclusionRunnerSupervisor.start_runner(opts) do
{:ok, runner} ->
InclusionRunner.remove_node(runner)
end
end
@doc """
Tell the inclusion process which keys to use during the inclusion process
During S2 inclusion the node being included with send a `DSKAddKeysReport`
to request which keys it can use to included securely. This function is
useful for passing back to the node which keys it is allowed to use and
depending on that answer the including node might request more information.
"""
@spec grant_keys([Security.key()]) :: :ok
def grant_keys(s2_keys) do
InclusionRunner.grant_keys(InclusionRunner, s2_keys)
end
@doc """
Tell the inclusion process what the input DSK is
If the `NodeAddDSKReport`'s `:input_dsk_length` is `0` you can just call this
function without any arguments:
```elixir
Grizzly.Inclusions.set_input_dsk()
```
If you are doing `:s2_authenticated` or `:s2_access_control` the
`NodeAddDSKReport` will probably ask for input DSK length of `2`. This means
it is expecting a 2 byte (16 bit) number, which is normally a 5 digit pin
located somewhere on the node that is being added. After locating the pin and
you can pass it as an argument like so:
```elixir
Grizzly.Inclusions.set_input_dsk(12345)
```
"""
@spec set_input_dsk(non_neg_integer()) :: :ok
def set_input_dsk(input_dsk \\ 0) do
InclusionRunner.set_dsk(InclusionRunner, input_dsk)
end
@doc """
Stop an add node inclusion process
"""
@spec add_node_stop() :: :ok
def add_node_stop() do
InclusionRunner.add_node_stop(InclusionRunner)
end
@doc """
Stop a remove node inclusion process
"""
@spec remove_node_stop() :: :ok
def remove_node_stop() do
InclusionRunner.remove_node_stop(InclusionRunner)
end
@doc """
Start learn mode on the controller
"""
@spec learn_mode([opt()]) :: any
def learn_mode(opts \\ []) do
case InclusionRunnerSupervisor.start_runner(opts) do
{:ok, runner} ->
InclusionRunner.learn_mode(runner)
end
end
@doc """
Stop learn mode on the controller
"""
@spec learn_mode_stop :: any
def learn_mode_stop() do
InclusionRunner.learn_mode_stop(InclusionRunner)
end
@doc """
Stop the inclusion runner
"""
@spec stop :: :ok
def stop() do
InclusionRunner.stop(InclusionRunner)
end
@doc """
Check to see if there is an inclusion process running
"""
@spec inclusion_running?() :: boolean()
def inclusion_running?() do
child_count = DynamicSupervisor.count_children(InclusionRunnerSupervisor)
child_count.active == 1
end
end
|
lib/grizzly/inclusions.ex
| 0.90417 | 0.900048 |
inclusions.ex
|
starcoder
|
defmodule TheFuzz.Similarity.Tversky do
@moduledoc """
This module contains functions to calculate the [Tversky index
](https://en.wikipedia.org/wiki/Tversky_index) between two given
strings
"""
import TheFuzz.Util, only: [ngram_tokenize: 2, intersect: 2]
@behaviour TheFuzz.StringMetric
@default_ngram_size 1
@default_alpha 1
@default_beta 1
@doc """
Calculates the Tversky index between two given strings with
a default ngram size of 1, alpha of 1 and beta of 1
This is equivalent of Tanimoto coefficient
## Examples
iex> TheFuzz.Similarity.Tversky.compare("contact", "context")
0.5555555555555556
iex> TheFuzz.Similarity.Tversky.compare("ht", "hththt")
0.3333333333333333
"""
def compare(a, b) do
compare(a, b, %{n_gram_size: @default_ngram_size, alpha: @default_alpha, beta: @default_beta})
end
@doc """
Calculates the Tversky index between two given strings with
the specified options passed as a map of key, value pairs.
#### Options
- **n_gram_size**: positive integer greater than 0, to tokenize the strings
- **alpha**: weight of the prototype sequence
- **beta**: weight of the variant sequence
Note: If any of them is not specified as part of the options object
they are set to the default value of 1
## Examples
iex> TheFuzz.Similarity.Tversky.compare("contact", "context", %{n_gram_size: 4, alpha: 2, beta: 0.8})
0.10638297872340426
iex> TheFuzz.Similarity.Tversky.compare("contact", "context", %{n_gram_size: 2, alpha: 0.5, beta: 0.5})
0.5
"""
def compare(a, b, %{n_gram_size: n}) when n <= 0 or byte_size(a) < n or byte_size(b) < n,
do: nil
def compare(a, b, _n) when a == b, do: 1
def compare(a, b, %{n_gram_size: n, alpha: alpha, beta: beta}) do
n = n || @default_ngram_size
alpha = alpha || @default_alpha
beta = beta || @default_beta
a_ngrams = a |> ngram_tokenize(n)
b_ngrams = b |> ngram_tokenize(n)
nmatches = intersect(a_ngrams, b_ngrams) |> length
a_diff_length = (a_ngrams -- b_ngrams) |> length
b_diff_length = (b_ngrams -- a_ngrams) |> length
nmatches / (alpha * a_diff_length + beta * b_diff_length + nmatches)
end
end
|
lib/the_fuzz/similarity/tversky.ex
| 0.89935 | 0.723602 |
tversky.ex
|
starcoder
|
defmodule ExRogue.Map do
defmodule Tile.Wall do
defstruct id: 0, position: {0, 0}
@type t :: %__MODULE__{id: integer, position: {integer, integer}}
end
defmodule Tile.Room do
defstruct id: 0, position: {0, 0}
@type t :: %__MODULE__{id: integer, position: {integer, integer}}
end
defmodule Tile.Hall do
defstruct id: 0, position: {0, 0}
@type t :: %__MODULE__{id: integer, position: {integer, integer}}
end
defmodule Tile.Door do
defstruct id: 0, position: {0, 0}
@type t :: %__MODULE__{id: integer, position: {integer, integer}}
end
alias Tile.{Door, Hall, Room, Wall}
defstruct map: [], top_left: {0, 0}, bottom_right: {0, 0}
@type point :: {non_neg_integer, non_neg_integer}
@type t :: %__MODULE__{
map: list(list(any)),
top_left: point,
bottom_right: point
}
@type iterate_mapper :: (t, point -> :ok | {:ok, t} | {:error, atom})
@type tile :: Door.t() | Hall.t() | Room.t() | Wall.t()
# Public Functions
@spec build(keyword) :: t()
def build(options \\ []) do
width = Keyword.get(options, :width, 50)
height = Keyword.get(options, :height, 30)
width
|> new(height)
|> place_rooms(options)
|> carve_halls()
|> carve_doors()
|> remove_dead_ends()
|> remove_extra_walls()
end
@spec new(integer, integer) :: t()
def new(width, height) do
map =
for _ <- 0..(height - 1) do
for _ <- 0..(width - 1) do
nil
end
end
%__MODULE__{map: map, top_left: {0, 0}, bottom_right: {width - 1, height - 1}}
end
# Public Functions
@spec place_rooms(t(), keyword) :: t()
def place_rooms(%__MODULE__{} = map, options \\ []) do
rooms = Keyword.get(options, :rooms, 4)
Enum.reduce(1..rooms, map, fn _, map -> place_room(map, options) end)
end
@spec place_room(t(), keyword, integer, integer) :: t()
def place_room(
%__MODULE__{} = map,
options \\ [],
id \\ System.unique_integer([:positive, :monotonic]),
attempts \\ 10
) do
min_size = Keyword.get(options, :min_size, 8)
max_size = Keyword.get(options, :max_size, 60)
size = Enum.random(min_size..max_size)
do_place_room(map, size, id, attempts)
end
defp carve_halls(%__MODULE__{} = map) do
case find_empty(map) do
nil ->
map
{_, _} = point ->
points = trace_hall(map, point)
id = System.unique_integer([:positive, :monotonic])
with {:ok, map} <- carve_points(map, points, Hall, id) do
map
|> place_walls(points, 0)
|> carve_halls()
end
end
end
@spec carve_doors(t()) :: t()
def carve_doors(%__MODULE__{map: data} = map) do
data
|> List.flatten()
|> Enum.flat_map(fn tile ->
case tile do
%Wall{position: position} ->
surrounding_points = surrounding_points(position, map)
for {a, b} <- combinations(surrounding_points),
%mod_a{id: a_id} = get(map, a),
%mod_b{id: b_id} = get(map, b),
a_id != b_id,
mod_a != Wall,
mod_b != Wall,
!(mod_a == Hall and mod_b == Hall) do
{position, Enum.sort([{mod_a, a_id}, {mod_b, b_id}])}
end
_ ->
[]
end
end)
|> Enum.group_by(fn {_v, k} -> k end, fn {v, _k} -> v end)
|> Enum.map(fn {_k, v} -> Enum.random(v) end)
|> Enum.reduce(map, fn point, map ->
id = System.unique_integer([:positive, :monotonic])
update(map, point, %Door{id: id, position: point})
end)
end
@spec place_walls(t(), list(point), integer) :: t()
def place_walls(%__MODULE__{} = map, points, id) do
points
|> Enum.flat_map(&adjacent_points(&1, map))
|> Enum.uniq()
|> Enum.filter(fn point ->
is_nil(get(map, point))
end)
|> Enum.reduce(map, fn point, map ->
update(map, point, %Wall{id: id, position: point})
end)
end
@spec remove_dead_ends(t()) :: t()
def remove_dead_ends(%__MODULE__{top_left: top_left, bottom_right: bottom_right} = map) do
top_left
|> points_for_region(bottom_right)
|> Enum.reduce(map, fn point, map ->
case is_dead_end?(map, point) do
true ->
remove_dead_end(map, point)
false ->
map
end
end)
end
@spec remove_dead_end(t(), point) :: t()
def remove_dead_end(%__MODULE__{} = map, point) do
map = update(map, point, %Wall{id: 0, position: point})
next =
point
|> surrounding_points(map)
|> Enum.find(fn point ->
tile = get(map, point)
case tile do
%Hall{} -> true
%Door{} -> true
_ -> false
end
end)
case next do
nil ->
map
next ->
case is_dead_end?(map, next) do
true ->
remove_dead_end(map, next)
false ->
map
end
end
end
@spec remove_extra_walls(t()) :: t()
def remove_extra_walls(%__MODULE__{top_left: top_left, bottom_right: bottom_right} = map) do
top_left
|> points_for_region(bottom_right)
|> Enum.reduce(map, fn point, map ->
adjacent_points = adjacent_points(point, map)
walls =
Enum.filter(adjacent_points, fn point ->
case get(map, point) do
%Wall{} -> true
nil -> true
_ -> false
end
end)
case length(adjacent_points) - length(walls) do
0 -> update(map, point, nil)
_ -> map
end
end)
end
# Private Functions
defp adjacent_points({x, y}, %__MODULE__{bottom_right: {max_x, max_y}}) do
for nx <- (x - 1)..(x + 1),
ny <- (y - 1)..(y + 1),
nx >= 0,
ny >= 0,
{nx, ny} != {x, y},
nx <= max_x,
ny <= max_y do
{nx, ny}
end
end
@spec can_trace_point?(t(), list(point), point) :: boolean
defp can_trace_point?(
%__MODULE__{top_left: {min_x, min_y}, bottom_right: {max_x, max_y}},
_,
{x, y}
)
when x == max_x or y == max_y or x == min_x or y == min_y do
false
end
@dialyzer [{:nowarn_function, can_trace_point?: 3}, :no_match]
defp can_trace_point?(%__MODULE__{} = map, [], point) do
is_nil(get(map, point))
end
defp can_trace_point?(
%__MODULE__{} = map,
[last_point | _] = traced_points,
point
) do
empty_tile = is_nil(get(map, point))
in_traced = point in traced_points
surrounding_points = surrounding_points(point, map) -- [last_point]
any_surrounding_traced = Enum.any?(surrounding_points, &(&1 in traced_points))
empty_tile and !in_traced and !any_surrounding_traced
end
@spec carve(t(), point, point, atom, integer) :: {:ok, t()} | {:error, atom}
defp carve(%__MODULE__{} = map, top_left, bottom_right, type, id) do
points = points_for_region(top_left, bottom_right)
with {:ok, map} <- carve_points(map, points, type, id) do
map = place_walls(map, points, id)
{:ok, map}
end
end
@spec carve_points(t(), list(point), atom, integer) :: {:ok, t()} | {:error, atom}
defp carve_points(%__MODULE__{} = map, points, type, id) do
case Enum.any?(points, fn point -> !is_nil(get(map, point)) end) do
true ->
{:error, :collision}
false ->
map =
Enum.reduce(points, map, fn point, map ->
update(map, point, struct(type, %{id: id, position: point}))
end)
{:ok, map}
end
end
@spec combinations(list()) :: list({any, any})
defp combinations([]), do: []
defp combinations([head | tail]) do
for i <- tail do
{head, i}
end ++ combinations(tail)
end
@spec do_place_room(t(), non_neg_integer, non_neg_integer, non_neg_integer) :: t
defp do_place_room(%__MODULE__{} = map, _size, _, 0), do: map
defp do_place_room(%__MODULE__{bottom_right: {max_x, max_y}} = map, size, id, attempts) do
width = Enum.random(2..floor(size / 2))
height = floor(size / width)
x_max = max_x - width - 1
y_max = max_y - height - 1
left = Enum.random(1..x_max)
right = left + width - 1
top = Enum.random(1..y_max)
bottom = top + height - 1
map
|> carve({left, top}, {right, bottom}, Room, id)
|> case do
{:ok, map} -> map
{:error, :collision} -> do_place_room(map, size, id, attempts - 1)
end
end
@spec find_empty(t()) :: point | nil
defp find_empty(%__MODULE__{top_left: top_left, bottom_right: bottom_right} = map) do
top_left
|> points_for_region(bottom_right)
|> Enum.find(fn point ->
is_nil(get(map, point))
end)
end
@spec get(t(), point) :: tile
defp get(%__MODULE__{map: map}, {x, y}) do
get_in(map, [Access.at(y), Access.at(x)])
end
@spec is_dead_end?(t, point) :: boolean
defp is_dead_end?(%__MODULE__{} = map, point) do
case get(map, point) do
%str{} when str == Hall or str == Door ->
surrounding_points = surrounding_points(point, map)
walls =
surrounding_points
|> Enum.filter(fn point ->
tile = get(map, point)
case tile do
%Wall{} -> true
nil -> true
_ -> false
end
end)
case length(surrounding_points) - length(walls) do
0 -> true
1 -> true
_ -> false
end
_ ->
false
end
end
@spec points_for_region(point, point) :: list(point)
defp points_for_region({tx, ty}, {bx, by}) do
for x <- tx..bx, y <- ty..by do
{x, y}
end
end
@spec surrounding_points(point, t()) :: list(point)
defp surrounding_points({x, y}, %__MODULE__{bottom_right: {max_x, max_y}}) do
points = [
{x, y - 1},
{x - 1, y},
{x + 1, y},
{x, y + 1}
]
for {nx, ny} <- points,
nx >= 0,
ny >= 0,
nx <= max_x,
ny <= max_y do
{nx, ny}
end
end
@spec trace_hall(t(), point) :: list(point)
defp trace_hall(%__MODULE__{} = map, start_point) do
trace_hall(map, [start_point], [start_point])
end
@spec trace_hall(t(), list(point), list(point)) :: list(point)
defp trace_hall(%__MODULE__{}, [], traced_points) do
Enum.reverse(traced_points)
end
defp trace_hall(%__MODULE__{} = map, available_points, traced_points) do
point = Enum.random(available_points)
possible_points =
point
|> surrounding_points(map)
|> Enum.shuffle()
case Enum.find(possible_points, &can_trace_point?(map, traced_points, &1)) do
{_, _} = new_point ->
trace_hall(map, [new_point | available_points], [new_point | traced_points])
nil ->
trace_hall(map, List.delete(available_points, point), traced_points)
end
end
@spec update(t(), point, any) :: t()
defp update(%__MODULE__{map: data} = map, {x, y}, value) do
data = put_in(data, [Access.at(y), Access.at(x)], value)
%__MODULE__{map | map: data}
end
end
defimpl Inspect, for: ExRogue.Map do
import Inspect.Algebra
alias ExRogue.Map
alias ExRogue.Map.Tile.{Door, Hall, Room, Wall}
def inspect(%Map{map: map, bottom_right: {max_x, _}}, _opts) do
data =
[
String.duplicate("_", max_x + 2),
for row <- map do
col_string =
for col <- row, into: "" do
case col do
nil -> " "
%Room{} -> "."
%Wall{} -> "#"
%Hall{} -> "."
%Door{} -> "D"
end
end
Enum.join(["|", col_string, "|"])
end,
String.duplicate("_", max_x + 2)
]
|> List.flatten()
data
|> Enum.intersperse(break("\n"))
|> concat()
end
end
|
lib/ex_rogue/map.ex
| 0.826187 | 0.562717 |
map.ex
|
starcoder
|
defmodule ECS.Sim.Engine do
use GenServer
@moduledoc """
The simulation engine is responsible for starting, pausing, stopping, and changing the speed of the simulation.
It is also responsible for starting all required services, like the entity registry or the messenger.
It keeps the main 'game' loop running, calling each System's update function in turn.
"""
@name { :global, __MODULE__}
## Client API
@doc """
Starts the simulation engine.
"""
def start_link(systems \\ []) do
# opts = Map.get_and_update(opts, :name, @name)
GenServer.start_link(__MODULE__, %ECS.Sim.State{ systems: systems }, name: @name)
end
def add_system(system) do
GenServer.cast(@name, {:system, system})
end
@doc """
Sets the simulation parameters:
- startTime
- endTime
- dt: step size in seconds
"""
def set(startTime: startTime, endTime: endTime, dt: dt) do
GenServer.cast(@name, {:set, startTime, endTime, dt })
end
@doc """
Starts the simulation.
"""
def start() do
GenServer.cast(@name, :initLoop)
GenServer.cast(@name, :start)
end
@doc """
Initializes the simulation.
"""
def init() do
GenServer.cast(@name, :initLoop)
end
@doc """
Takes one step in the simulation.
"""
def step() do
GenServer.cast(@name, :step)
end
@doc """
Pauzes the simulation.
"""
def pause() do
GenServer.cast(@name, :pause)
end
@doc """
Stops the simulation.
"""
def stop() do
GenServer.cast(@name, :stop)
end
## Server Callbacks
def handle_cast({:system, system}, state) do
newState = %{ state | systems: system ++ state.systems }
{:noreply, newState}
end
def handle_cast({:set, startTime, endTime, dt}, state) do
if state.running do
{:noreply, state}
else
newState = %{ state | curTime: startTime, startTime: startTime, endTime: endTime, dt: dt }
IO.puts ""
IO.puts "SIMULATION SETTINGS:"
IO.puts ""
IO.puts " From #{startTime} till #{endTime}, steps #{dt}s."
IO.puts ""
IO.puts "Enter 'ECS.Sim.Engine.init' and 'ECS.Sim.Engine.step' to run the simulation stepwise."
IO.puts "Enter 'ECS.Sim.Engine.start' to run the simulation."
IO.puts ""
IO.inspect newState
{:noreply, newState}
end
end
def handle_cast(:start, state) do
IO.puts "START:"
IO.inspect state
if state.running do
{:noreply, state}
else
newState = %{ state | running: true }
run()
{:noreply, newState}
end
end
def handle_cast(:step, state) do
IO.puts ""
IO.puts "STEP, current time #{state.curTime}:"
IO.puts ""
IO.inspect state
if !state.running do
run()
end
{:noreply, state}
end
def handle_cast(:initLoop, state) do
IO.puts "INITIALIZING:"
state.systems
|> Enum.each(&{ &1.init(state.startTime, state.dt)})
{:noreply, state}
end
def handle_cast(:stop, state) do
IO.puts "STOP:"
if state.running do
newState = %{ state | running: false }
{:noreply, newState}
end
end
defp run() do
Process.send(self(), :run, [])
end
def handle_info(:run, state) do
# IO.puts "RUN:"
IO.puts "Current time: #{state.curTime}"
# time = DateTime.utc_now()
# Do the desired work here
# Enum.each(state.systems, fn system ->
# IO.inspect system
# system.update(time, state.dt)
# end)
state.systems
|> Enum.each(&{ &1.update(state.curTime, state.dt)})
updatedTime = DateTime.to_unix(state.curTime, :millisecond) + 1000 * state.dt
|> DateTime.from_unix!(:millisecond)
state = %{state | curTime: updatedTime }
# time = DateTime.utc_now()
# IO.puts "It is now #{time}"
if state.running do
run()
end
{:noreply, state}
end
end
|
packages/sim/apps/ecs/lib/ecs/sim/engine.ex
| 0.572842 | 0.44571 |
engine.ex
|
starcoder
|
defmodule EllipticCurve.Utils.Math do
@moduledoc false
alias EllipticCurve.Utils.Integer, as: IntegerUtils
alias EllipticCurve.Utils.{Point}
@doc """
Fast way to multily point and scalar in elliptic curves
:param p: First Point to mutiply
:param n: Scalar to mutiply
:param cN: Order of the elliptic curve
:param cP: Prime number in the module of the equation Y^2 = X^3 + cA*X + B (mod p)
:param cA: Coefficient of the first-order term of the equation Y^2 = X^3 + cA*X + B (mod p)
:return: Point that represents the sum of First and Second Point
"""
def multiply(p, n, cN, cA, cP) do
p
|> toJacobian()
|> jacobianMultiply(n, cN, cA, cP)
|> fromJacobian(cP)
end
@doc """
Fast way to add two points in elliptic curves
:param p: First Point you want to add
:param q: Second Point you want to add
:param cP: Prime number in the module of the equation Y^2 = X^3 + cA*X + B (mod p)
:param cA: Coefficient of the first-order term of the equation Y^2 = X^3 + cA*X + B (mod p)
:return: Point that represents the sum of First and Second Point
"""
def add(p, q, cA, cP) do
jacobianAdd(toJacobian(p), toJacobian(q), cA, cP)
|> fromJacobian(cP)
end
@doc """
Extended Euclidean Algorithm. It's the 'division' in elliptic curves
:param x: Divisor
:param n: Mod for division
:return: Value representing the division
"""
def inv(x, _n) when x == 0 do
0
end
def inv(x, n) do
invOperator(1, 0, IntegerUtils.modulo(x, n), n)
|> IntegerUtils.modulo(n)
end
defp invOperator(lm, hm, low, high) when low > 1 do
r = div(high, low)
invOperator(
hm - lm * r,
lm,
high - low * r,
low
)
end
defp invOperator(lm, _hm, _low, _high) do
lm
end
# Converts point back from Jacobian coordinates
# :param p: First Point you want to add
# :param cP: Prime number in the module of the equation Y^2 = X^3 + cA*X + B (mod p)
# :return: Point in default coordinates
defp toJacobian(p) do
%Point{x: p.x, y: p.y, z: 1}
end
defp fromJacobian(p, cP) do
z = inv(p.z, cP)
%Point{
x:
IntegerUtils.modulo(
p.x * IntegerUtils.ipow(z, 2),
cP
),
y:
IntegerUtils.modulo(
p.y * IntegerUtils.ipow(z, 3),
cP
)
}
end
# Doubles a point in elliptic curves
# :param p: Point you want to double
# :param cP: Prime number in the module of the equation Y^2 = X^3 + cA*X + B (mod p)
# :param cA: Coefficient of the first-order term of the equation Y^2 = X^3 + cA*X + B (mod p)
# :return: Point that represents the sum of First and Second Point
defp jacobianDouble(p, cA, cP) do
if p.y == 0 do
%Point{x: 0, y: 0, z: 0}
else
ysq =
IntegerUtils.ipow(p.y, 2)
|> IntegerUtils.modulo(cP)
s =
(4 * p.x * ysq)
|> IntegerUtils.modulo(cP)
m =
(3 * IntegerUtils.ipow(p.x, 2) + cA * IntegerUtils.ipow(p.z, 4))
|> IntegerUtils.modulo(cP)
nx =
(IntegerUtils.ipow(m, 2) - 2 * s)
|> IntegerUtils.modulo(cP)
ny =
(m * (s - nx) - 8 * IntegerUtils.ipow(ysq, 2))
|> IntegerUtils.modulo(cP)
nz =
(2 * p.y * p.z)
|> IntegerUtils.modulo(cP)
%Point{x: nx, y: ny, z: nz}
end
end
# Adds two points in the elliptic curve
# :param p: First Point you want to add
# :param q: Second Point you want to add
# :param cP: Prime number in the module of the equation Y^2 = X^3 + cA*X + B (mod p)
# :param cA: Coefficient of the first-order term of the equation Y^2 = X^3 + cA*X + B (mod p)
# :return: Point that represents the sum of First and Second Point
defp jacobianAdd(p, q, cA, cP) do
if p.y == 0 do
q
else
if q.y == 0 do
p
else
u1 =
(p.x * IntegerUtils.ipow(q.z, 2))
|> IntegerUtils.modulo(cP)
u2 =
(q.x * IntegerUtils.ipow(p.z, 2))
|> IntegerUtils.modulo(cP)
s1 =
(p.y * IntegerUtils.ipow(q.z, 3))
|> IntegerUtils.modulo(cP)
s2 =
(q.y * IntegerUtils.ipow(p.z, 3))
|> IntegerUtils.modulo(cP)
if u1 == u2 do
if s1 != s2 do
%Point{x: 0, y: 0, z: 1}
else
jacobianDouble(p, cA, cP)
end
else
h = u2 - u1
r = s2 - s1
h2 =
(h * h)
|> IntegerUtils.modulo(cP)
h3 =
(h * h2)
|> IntegerUtils.modulo(cP)
u1h2 =
(u1 * h2)
|> IntegerUtils.modulo(cP)
nx =
(IntegerUtils.ipow(r, 2) - h3 - 2 * u1h2)
|> IntegerUtils.modulo(cP)
ny =
(r * (u1h2 - nx) - s1 * h3)
|> IntegerUtils.modulo(cP)
nz =
(h * p.z * q.z)
|> IntegerUtils.modulo(cP)
%Point{x: nx, y: ny, z: nz}
end
end
end
end
# Multily point and scalar in elliptic curves
# :param p: First Point to mutiply
# :param n: Scalar to mutiply
# :param cN: Order of the elliptic curve
# :param cP: Prime number in the module of the equation Y^2 = X^3 + cA*X + B (mod p)
# :param cA: Coefficient of the first-order term of the equation Y^2 = X^3 + cA*X + B (mod p)
# :return: Point that represents the sum of First and Second Point
defp jacobianMultiply(_p, n, _cN, _cA, _cP) when n == 0 do
%Point{x: 0, y: 0, z: 1}
end
defp jacobianMultiply(p, n, _cN, _cA, _cP) when n == 1 do
if p.y == 0 do
%Point{x: 0, y: 0, z: 1}
else
p
end
end
defp jacobianMultiply(p, n, cN, cA, cP) when n < 0 or n >= cN do
if p.y == 0 do
%Point{x: 0, y: 0, z: 1}
else
jacobianMultiply(p, IntegerUtils.modulo(n, cN), cN, cA, cP)
end
end
defp jacobianMultiply(p, n, cN, cA, cP) when rem(n, 2) == 0 do
if p.y == 0 do
%Point{x: 0, y: 0, z: 1}
else
jacobianMultiply(p, div(n, 2), cN, cA, cP)
|> jacobianDouble(cA, cP)
end
end
defp jacobianMultiply(p, n, cN, cA, cP) do
if p.y == 0 do
%Point{x: 0, y: 0, z: 1}
else
# rem(n, 2) == 1
jacobianMultiply(p, div(n, 2), cN, cA, cP)
|> jacobianDouble(cA, cP)
|> jacobianAdd(p, cA, cP)
end
end
end
|
lib/utils/math.ex
| 0.884296 | 0.895019 |
math.ex
|
starcoder
|
defmodule Atlas.Schema do
import Atlas.FieldConverter
@moduledoc """
Provides schema definitions and Struct generation through a `field` macro and
`%__MODULE__{}` struct to hold model state.
`field` definitions provide handling conversion of binary database results
into schema defined types.
Field Types
:string
:integer
:float
:boolean
`field` accepts the column name as its first argument, followed by a field type, and
finally an optional default value as the last argument
Examples
defmodule User do
use Atlas.Model
field :email, :string
field :active, :boolean, default: true
end
iex> User.new
%User{active: true, email: nil}
"""
defmacro __using__(_options) do
quote do
use Atlas.Relationships
Module.register_attribute __MODULE__, :fields, accumulate: true,
persist: false
import unquote(__MODULE__)
@table nil
@primary_key nil
@default_primary_key :id
@before_compile unquote(__MODULE__)
end
end
defmacro __before_compile__(_env) do
quote do
@primary_key (@primary_key || @default_primary_key)
defstruct struct_fields(@fields, __MODULE__, preloaded_fields(__MODULE__, @belongs_to, @has_many))
def __atlas__(:table), do: @table
def __atlas__(:fields), do: @fields
def table, do: @table
def primary_key, do: @primary_key
def primary_key_value(record), do: Map.get(record, @primary_key)
def to_list(record) do
reserved_fields = [:model, :__struct__, :__preloaded__]
for {key, _} <- Map.to_list(record), !Enum.member?(reserved_fields, key) do
{key, Map.get(record, key)}
end
end
def raw_query_results_to_records(results) do
results
|> Enum.map(fn row -> raw_kwlist_to_field_types(row) end)
|> Enum.map(fn row -> struct(__MODULE__, row) end)
end
def raw_kwlist_to_field_types(kwlist) do
Enum.map kwlist, fn {key, val} ->
{key, value_to_field_type(val, field_type_for_name(key))}
end
end
def field_type_for_name(field_name) do
field = @fields |> Enum.find(fn field -> elem(field, 0) == field_name end)
if field, do: elem(field, 1)
end
@doc """
Returns the attribute value from the record converted to its field type
"""
def get(record, attribute) do
value_to_field_type(Map.get(record, attribute),
field_type_for_name(attribute))
end
@doc """
Return the preloaded association results for the given record
record - The %__MODULE__{}
association_name - The atom of the association name
Examples
iex> user = Repo.first User.preloads(:orders) |> User.where(id: 5)
[%User{id: 5, __preloaded__: %{orders: [%Order{id: 123..}]}}]
iex> User.preloaded(user, :orders)
[%Order{id: 123...}]
"""
def preloaded(record, association_name) do
Keyword.get(record.__preloaded__, association_name)
end
end
end
def struct_fields(fields, model, preload_map) do
fields_to_kwlist(fields) ++ [model: model, __preloaded__: preload_map]
end
@doc """
Return all defined preloaded fields for has_many and belongs_to relationships
"""
def preloaded_fields(_model, belongs_to, has_many) do
Enum.map(belongs_to, fn rel -> {rel.name, nil} end)
|> Kernel.++(Enum.map(has_many, fn rel -> {rel.name, []} end))
end
@doc """
Converts @fields attribute to keyword list to be used for Struct definition
iex> Schema.fields_to_kwlist([{:active, :boolean, [default: true]}, {:id, :integer, []}])
[id: nil, active: true]
"""
def fields_to_kwlist(fields) do
Enum.map fields, fn field -> {elem(field, 0), default_for_field(field)} end
end
def default_for_field({_field_name, _type, options, _func}) do
Keyword.get options, :default, nil
end
defmacro field(field_name, field_type, options \\ [], func \\ nil) do
quote do
@fields {unquote(field_name), unquote(field_type), unquote(options), unquote(func)}
end
end
end
|
lib/atlas/schema.ex
| 0.826467 | 0.432782 |
schema.ex
|
starcoder
|
defmodule Rop do
defmacro __using__(_) do
quote do
import Rop
end
end
@doc ~s"""
Extracts the value from a tagged tuple like {:ok, value}
Raises the value from a tagged tuple like {:error, value}
Raise the arguments else
For example:
iex> ok({:ok, 1})
1
iex> ok({:error, "some"})
** (RuntimeError) some
iex> ok({:anything, "some"})
** (ArgumentError) raise/1 and reraise/2 expect a module name, string or exception as the first argument, got: {:anything, \"some\"}
"""
def ok({:ok, x}), do: x
def ok({:error, x}), do: raise x
def ok(x), do: raise x
@doc ~s"""
No need to stop pipelining in case of an error somewhere in the middle
Example:
iex> inc = fn(x)-> {:ok, x+1} end
iex> 1 |> (inc).() >>> (inc).()
{:ok, 3}
"""
defmacro left >>> right do
quote do
(fn ->
case unquote(left) do
{:ok, x} -> x |> unquote(right)
{:error, _} = expr -> expr
end
end).()
end
end
@doc ~s"""
Wraps a simple function to return a tagged tuple with `:ok` to comply to the protocol `{:ok, result}`
Example:
iex> 1 |> Integer.to_string
"1"
iex> 1 |> bind(Integer.to_string)
{:ok, "1"}
iex> inc = fn(x)-> x+1 end
iex> 1 |> bind((inc).()) >>> (inc).()
3
iex> 1 |> bind((inc).()) >>> bind((inc).())
{:ok, 3}
"""
defmacro bind(args, func) do
quote do
(fn ->
result = unquote(args) |> unquote(func)
{:ok, result}
end).()
end
end
@doc ~s"""
Wraps raising functions to return a tagged tuple `{:error, ErrorMessage}` to comply with the protocol
Example:
iex> r = fn(_)-> raise "some" end
iex> inc = fn(x)-> x + 1 end
iex> 1 |> bind((inc).()) >>> try_catch((r).()) >>> bind((inc).())
{:error, %RuntimeError{message: "some"}}
"""
defmacro try_catch(args, func) do
quote do
(fn ->
try do
unquote(args) |> unquote(func)
rescue
e -> {:error, e}
end
end).()
end
end
@doc ~s"""
Like a similar Unix utility it does some work and returns the input.
See [tee (command), Unix](https://en.wikipedia.org/wiki/Tee_(command)).
Example:
iex> inc = fn(x)-> IO.inspect(x); {:ok, x + 1} end
iex> 1 |> tee((inc).()) >>> tee((inc).()) >>> tee((inc).())
{:ok, 1}
"""
defmacro tee(args, func) do
quote do
(fn ->
unquoted_args = unquote(args)
unquoted_args |> unquote(func)
{:ok, unquoted_args}
end).()
end
end
end
|
lib/rop.ex
| 0.62395 | 0.553324 |
rop.ex
|
starcoder
|
defmodule CoinmarketcapApi do
alias CoinmarketcapApi.Ticker
use Tesla
plug Tesla.Middleware.BaseUrl, "https://api.coinmarketcap.com/v2"
plug CoinmarketcapApi.ResponseMiddleware
plug Tesla.Middleware.JSON
plug Tesla.Middleware.FollowRedirects
@doc """
Returns cryptocurrency ticker data ordered by marketcap rank. The maximum number of results per call is 100. Pagination is possible by using the start and limit parameters.
Example response:
```
{
"data": {
"1": {
"id": 1,
"name": "Bitcoin",
"symbol": "BTC",
"website_slug": "bitcoin",
"rank": 1,
"circulating_supply": 17008162.0,
"total_supply": 17008162.0,
"max_supply": 21000000.0,
"quotes": {
"USD": {
"price": 9024.09,
"volume_24h": 8765400000.0,
"market_cap": 153483184623.0,
"percent_change_1h": -2.31,
"percent_change_24h": -4.18,
"percent_change_7d": -0.47
}
},
"last_updated": 1525137271
},
"1027": {
"id": 1027,
"name": "Ethereum",
"symbol": "ETH",
"website_slug": "ethereum",
"rank": 2,
"circulating_supply": 99151888.0,
"total_supply": 99151888.0,
"max_supply": null,
"quotes": {
"USD": {
"price": 642.399,
"volume_24h": 2871290000.0,
"market_cap": 63695073558.0,
"percent_change_1h": -3.75,
"percent_change_24h": -7.01,
"percent_change_7d": -2.32
}
},
"last_updated": 1525137260
}
...
},
"metadata": {
"timestamp": 1525137187,
"num_cryptocurrencies": 1602,
"error": null
}
]
```
"""
def fetch_ticker() do
get("/ticker/")
end
def fetch_ticker(query) when is_list(query) do
get("/ticker/", query: query)
end
@deprecated "Use fetch_ticker with query option passed as keyword list"
def fetch_ticker(opts) when is_map(opts) do
query =
for {k, v} <- Map.to_list(opts) do
{k, v}
end
get("/ticker/", query: query)
end
@doc """
Returns ticker data for a specific cryptocurrency. Use the "id" field from the Listings endpoint in the URL.
Example response:
```
{
"data": {
"id": 1,
"name": "Bitcoin",
"symbol": "BTC",
"website_slug": "bitcoin",
"rank": 1,
"circulating_supply": 17008162.0,
"total_supply": 17008162.0,
"max_supply": 21000000.0,
"quotes": {
"USD": {
"price": 9024.09,
"volume_24h": 8765400000.0,
"market_cap": 153483184623.0,
"percent_change_1h": -2.31,
"percent_change_24h": -4.18,
"percent_change_7d": -0.47
}
},
"last_updated": 1525137271
},
"metadata": {
"timestamp": 1525237332,
"error": null
}
}
```
"""
def fetch_ticker(id, query \\ []) do
get("/ticker/#{id}/", query: query)
end
@doc """
Returns all active cryptocurrency listings. Use fetch_ticker to query more information for a specific cryptocurrency.
Example response:
```
{
"data": [
{
"id": 1,
"name": "Bitcoin",
"symbol": "BTC",
"website_slug": "bitcoin"
},
{
"id": 2,
"name": "Litecoin",
"symbol": "LTC",
"website_slug": "litecoin"
},
...
},
"metadata": {
"timestamp": 1525137187,
"num_cryptocurrencies": 1602,
"error": null
}
]
```
"""
def fetch_listings() do
get("/listings/")
end
def fetch_global_data() do
get("/global/")
end
@doc """
Returns the global data found at the top of coinmarketcap.com
Example response:
```
{
"data": {
"active_cryptocurrencies": 1594,
"active_markets": 10526,
"bitcoin_percentage_of_market_cap": 37.65,
"quotes": {
"USD": {
"total_market_cap": 407690157494.0,
"total_volume_24h": 30969801118.0
}
},
"last_updated": 1525137271
},
"metadata": {
"timestamp": 1525237332,
"error": null
}
}
```
"""
def fetch_global_data(%{convert: currency}) do
get("/global/", query: [convert: currency])
end
end
|
lib/coinmarketcap_api.ex
| 0.873728 | 0.737158 |
coinmarketcap_api.ex
|
starcoder
|
defmodule LDAPoolex.Schema do
use GenServer
require Logger
def start_link(args) do
GenServer.start_link(__MODULE__,
args,
name: String.to_atom(Atom.to_string(args[:name]) <> "_schema_loader"))
end
@impl GenServer
def init(args) do
table_name = table_name(args[:name])
:ets.new(table_name, [:named_table, :set, :protected, {:read_concurrency, true}])
state = Keyword.put(args, :table, table_name)
{:ok, state, {:continue, :load_schema}}
end
@doc """
Returns the schema information of an attribute
Takes into parameter the pool name (an `atom()`) and the attribute name (a `String.t()`)
and returns the associated schema data, or `nil` if the attribute doesn't exist in the
schema.
## Example
```elixir
iex> LDAPoolex.SchemaLoader.get(:poule_do, "cn")
%{
equality: "caseIgnoreMatch",
name: "cn",
ordering: nil,
single_valued: false,
syntax: "1.3.6.1.4.1.1466.172.16.58.3{32768}"
}
iex> LDAPoolex.SchemaLoader.get(:poule_do, "createTimestamp")
%{
equality: "generalizedTimeMatch",
name: "createTimestamp",
ordering: "generalizedTimeOrderingMatch",
single_valued: true,
syntax: "1.3.6.1.4.1.1466.192.168.3.11"
}
iex> LDAPoolex.SchemaLoader.get(:poule_do, "uid")
%{
equality: "caseIgnoreMatch",
name: "uid",
ordering: nil,
single_valued: false,
syntax: "1.3.6.1.4.1.1466.172.16.58.3{256}"
}
```
"""
@spec get(atom(), String.t())
:: %{
name: String.t(),
syntax: String.t(),
single_valued: boolean(),
equality: String.t() | nil,
ordering: String.t() | nil
} | nil
def get(pool_name, attribute) do
[{_attribute, syntax, single_valued, equality, ordering}] =
pool_name
|> table_name()
|> :ets.lookup(attribute)
%{
name: attribute,
syntax: to_str(syntax),
single_valued: single_valued,
equality: to_str(equality),
ordering: to_str(ordering)
}
rescue
_ ->
nil
end
defp table_name(pool_name) do
String.to_atom("LDAPoolex_" <>Atom.to_string(pool_name) <> "_schema")
end
defp to_str(nil), do: nil
defp to_str(charlist), do: to_string(charlist)
@impl GenServer
def handle_continue(:load_schema, state) do
load_schema(state)
{:noreply, state}
end
@impl GenServer
def handle_call(:load_schema, _from, state) do
load_schema(state)
{:reply, :ok, state}
end
@impl GenServer
def handle_info(:load_schema, state) do
load_schema(state)
{:noreply, state}
end
defp load_schema(state) do
get_subschema_subentry_dn(state)
|> get_matching_rule_use(state)
|> save_attribute_types_to_table(state)
rescue
e ->
Logger.warn("Error while loading schema for pool `#{state[:name]}` (#{Exception.message(e)})")
Process.send_after(self(), :load_schema, state[:connection_retry_delay] || 3000)
end
defp get_subschema_subentry_dn(state) do
{:eldap_search_result,
[
{:eldap_entry, _root_dse,
[{'subschemaSubentry', [subschema_subentry_dn]}]}
], []
} =
LDAPoolex.search(state[:name],
[
base: state[:ldap_args][:base],
filter: :eldap.present('objectClass'),
attributes: ['subschemaSubentry'],
scope: :eldap.baseObject()]
)
subschema_subentry_dn
end
defp get_matching_rule_use(subschema_subentry_dn, state) do
{:eldap_search_result,
[
{:eldap_entry, _root_dse,
[{_dn, attribute_types}]}
], []
} =
LDAPoolex.search(state[:name],
[
base: subschema_subentry_dn,
filter: :eldap.equalityMatch('objectClass', 'subschema'),
scope: :eldap.baseObject(),
attributes: ['attributeTypes']
]
)
attribute_types
end
defp save_attribute_types_to_table(attribute_types, state) do
attribute_type_list =
Enum.into(
attribute_types,
[],
fn
attribute_type ->
# we just add the prefix to make it work with the parser
'attributetype ' ++ attribute_type
|> :schema_lexer.string()
|> elem(1)
|> :schema_parser.parse()
|> case do
{:ok, [{:attribute_type, key_values}]} ->
Enum.into(key_values, %{}, fn {key, value} -> {key, value} end)
error ->
raise error
end
end
)
object_list = Enum.map(
attribute_type_list,
fn
%{syntax: _} = entry ->
{
List.first(entry[:name]),
elem(entry[:syntax], 2),
entry[:single_value] || false,
case entry[:equality] do
{_, _, val} ->
val
_ ->
nil
end,
case entry[:ordering] do
{_, _, val} ->
val
_ ->
nil
end
}
%{sup: _} = entry ->
sup_entry = get_root_sup_entry(entry, attribute_type_list)
{
List.first(entry[:name]),
elem(sup_entry[:syntax], 2),
sup_entry[:single_value] || false,
case sup_entry[:equality] do
{_, _, val} ->
val
_ ->
nil
end,
case sup_entry[:ordering] do
{_, _, val} ->
val
_ ->
nil
end
}
end
)
:ets.delete_all_objects(state[:table])
:ets.insert(state[:table], object_list)
end
defp get_root_sup_entry(entry, entry_list) do
case Enum.find(entry_list, fn m -> m[:name] == entry[:sup] end) do
%{sup: _} = sup_entry ->
get_root_sup_entry(sup_entry, entry_list)
sup_entry ->
sup_entry
end
end
end
|
lib/ldapoolex/schema_loader.ex
| 0.799403 | 0.655681 |
schema_loader.ex
|
starcoder
|
defmodule Classifiers.NaiveBayes.Bernoulli do
defstruct class_instances: %{},
training_instances: 0,
features: 0,
conditional_probabilities: %{}
@doc """
Get a new classifier pid.
"""
def new do
{:ok, pid} = Agent.start_link fn ->
%Classifiers.NaiveBayes.Bernoulli{}
end
pid
end
@doc """
Fit a stream of data to an existing classifier.
Currently expects input in the form of a stream of lists as the following:
[ feature_1, feature_2, ... feature_n, class ]
"""
def fit(stream, pid) do
Agent.get_and_update pid, fn classifier ->
{c, rows_with_term} = Enum.reduce(
stream, { classifier, %{} },
fn row, { classifier, rows_with_term } ->
class = row |> List.last
features = row |> Enum.drop(-1) |> to_binary_features
rows_with_term = rows_with_term |>
update_rows_with_term(
class,
features
)
classifier = classifier |> update_classifier(class, features)
{ classifier, rows_with_term }
end
)
c = c |> update_classifier(rows_with_term)
{:ok, c}
end
end
@doc """
Predict the class for one set of features.
"""
def predict_one(features, pid) do
pid |> classifier |> make_prediction(features)
end
@doc """
Predict the classes for a stream of features
"""
def predict(stream, pid) do
c = pid |> classifier
Stream.transform stream, [], fn row, acc ->
features = row |> Enum.drop(-1) |> to_binary_features
{ [c |> make_prediction(features)], acc }
end
end
defp update_rows_with_term(rows, class, features) do
Map.update rows, class, features, fn existing ->
Enum.zip(existing, features) |> Enum.map fn {a, b} -> a + b end
end
end
defp update_classifier(classifier, class, features) do
%{
classifier |
class_instances: Map.update(
classifier.class_instances,
class,
1,
&(&1 + 1)
),
features: features |> Enum.count,
training_instances: classifier.training_instances + 1
}
end
defp update_classifier(classifier, rows_with_term) do
%{
classifier |
conditional_probabilities: Enum.reduce(
classifier.class_instances, %{},
fn {class, instances}, conditional_probabilities ->
conditional_probabilities = Map.put(
conditional_probabilities,
class,
Enum.map(
rows_with_term[class],
fn contained -> (contained + 1) / (instances + 1) end
)
)
conditional_probabilities
end
)
}
end
defp classifier(pid) do
Agent.get pid, fn c -> c end
end
defp make_prediction(classifier, features) do
{prediction, _} = Enum.max_by(
classifier.class_instances,
fn { class, class_count } ->
score = :math.log(class_count / classifier.training_instances)
features
|> Stream.with_index
|> Enum.reduce score, fn { feature, index }, score ->
conditional_probability = :math.log(
classifier.conditional_probabilities[class] |> Enum.fetch! index
)
case { feature, conditional_probability } do
{ 1, p } ->
score + p
{ 0, 1 } ->
score + :math.log(1 / class_count + 1)
{ 0, p } ->
score + :math.log(1 - p)
end
end
end
)
prediction
end
defp to_binary_features(row) do
row |> Enum.map fn f ->
case f do
"0" -> 0
_ -> 1
end
end
end
end
|
lib/classifiers/naive_bayes/bernoulli.ex
| 0.806853 | 0.656039 |
bernoulli.ex
|
starcoder
|
defmodule Blockfrost.HTTP do
@moduledoc """
HTTP requests to Blockfrost APIs.
This module is not meant to be use directly. Use the higher level modules to do calls
to the Blockfrost API.
"""
@retryable_statuses [403, 429, 500]
@type error_response ::
{:error,
:bad_request
| :unauthenticated
| :ip_banned
| :usage_limit_reached
| :internal_server_error
| Finch.Error.t()}
@doc """
Builds a request and sends it.
Supports pagination.
If pagination.page is `:all`, will fetch all pages concurrently, with retries
according to retry options. If some page fails to be fetched, the first error
found will be returned.
If you're fetching all pages, maximum concurrency can be configured by using
the :max_concurrency option. Default is `10`.
Keeps data in the order requested.
"""
@spec build_and_send(atom(), atom(), String.t(), Keyword.t()) :: {:ok, term} | {:error, term}
def build_and_send(
name,
method,
path,
opts \\ []
) do
pagination = opts[:pagination] || %{}
query_params = opts[:query_params] || %{}
case pagination do
%{page: :all} ->
fetch_all_pages(name, method, path, query_params, opts[:body], opts)
_ ->
req = build(name, method, path, Map.merge(pagination, query_params), opts)
request(name, req, opts)
end
end
defp fetch_all_pages(name, method, path, query_params, body, opts) do
max_concurrency = opts[:max_concurrency] || 10
fetch_page = fn page ->
pagination = %{count: 100, page: page, order: "asc"}
req = build(name, method, path, Map.merge(pagination, query_params), body)
request(name, req, opts)
end
do_fetch_all_pages(name, 1..max_concurrency, fetch_page, max_concurrency)
|> Enum.reduce_while([], fn {:ok, handled}, acc ->
case handled do
{:ok, response} ->
{:cont, [response | acc]}
err ->
{:halt, err}
end
end)
|> case do
responses when is_list(responses) -> {:ok, Enum.reverse(responses)}
err -> err
end
end
# will stop earlier if some error is not solved by retries
defp do_fetch_all_pages(
name,
%Range{first: first, last: last} = range,
fetch_page,
max_concurrency,
acc \\ []
) do
responses =
name
|> Module.concat(TaskSupervisor)
|> Task.Supervisor.async_stream(range, fetch_page,
max_concurrency: max_concurrency,
ordered: true
)
|> Enum.to_list()
next_range = %Range{range | first: last + 1, last: last + (1 + last - first)}
if should_fetch_more?(responses),
do: do_fetch_all_pages(name, next_range, fetch_page, max_concurrency, acc ++ responses),
else: acc ++ responses
end
defp should_fetch_more?(responses) do
expected_count = Enum.count(responses) * 100
result_count =
responses
|> Enum.map(&elem(&1, 1))
|> Enum.map(fn
{:ok, response} ->
Jason.decode!(response.body)
_e ->
[]
end)
|> List.flatten()
|> Enum.count()
expected_count == result_count
end
@doc """
Builds a request to a Blockfrost API
This function only builds the request. You can execute it with `request/3`.
"""
@spec build(atom, Finch.Request.method(), binary, map, binary) :: Finch.Request.t()
def build(name, method, path, query_params \\ %{}, opts \\ []) do
config = Blockfrost.config(name)
path = resolve_path(config, path, query_params)
headers = resolve_headers(config, opts)
Finch.build(method, path, headers, opts[:body])
end
defp resolve_path(%Blockfrost.Config{network_uri: base_uri}, path, query_params) do
query = URI.encode_query(query_params)
%{base_uri | path: base_uri.path <> path, query: query}
end
defp resolve_headers(%Blockfrost.Config{api_key: api_key}, opts) do
{:ok, version} = :application.get_key(:blockfrost, :vsn)
content_type = opts[:content_type] || "application/json"
content_length =
if length = opts[:content_length],
do: [{"Content-Length", inspect(length)}],
else: []
[
{"project_id", api_key},
{"User-Agent", "blockfrost-elixir/#{version}"},
{"Content-Type", content_type}
] ++ content_length
end
@doc """
Does a request to a Blockfrost API.
Receives the following options:
- `:retry_enabled?`: whether it should retry failing requests
- `:retry_max_attempts`: max retry attempts
- `:retry_interval`: interval between attempts
All these options fall back to the config. If they're not defined there,
they fall back to default values. See `Blockfrost.Config` for more info.
For additional options, see `Finch.request/3`
Build requests with `build/4`.
"""
@spec request(atom, Finch.Request.t(), Keyword.t()) :: Finch.Response.t()
def request(name, request, opts \\ []) do
finch = Module.concat(name, Finch)
config = Blockfrost.config(name)
client = Application.get_env(:blockfrost, :__http_client__, Finch)
fn ->
client.request(request, finch, opts)
end
|> with_retry(opts, config)
|> handle_response(opts)
end
defp with_retry(fun, opts, config, attempt \\ 1) do
enabled? = opts[:retry_enabled?] || config.retry_enabled?
max_attempts = opts[:retry_max_attempts] || config.retry_max_attempts
interval = opts[:retry_interval] || config.retry_interval
if enabled? and attempt < max_attempts do
case fun.() do
{:ok, %{status: status}} when status in @retryable_statuses ->
:timer.sleep(interval)
with_retry(fun, opts, config, attempt + 1)
{:ok, response} ->
{:ok, response}
{:error, _} ->
:timer.sleep(interval)
with_retry(fun, opts, config, attempt + 1)
end
else
fun.()
end
end
defp handle_response({:ok, response}, opts) do
if opts[:skip_error_handling?] do
{:ok, response}
else
case response do
%{status: status} when status in 199..399 ->
{:ok, response}
%{status: 400} ->
{:error, :bad_request}
%{status: 403} ->
{:error, :unauthenticated}
%{status: 418} ->
{:error, :ip_banned}
%{status: 429} ->
{:error, :usage_limit_reached}
%{status: 500} ->
{:error, :internal_server_error}
end
end
end
defp handle_response(err, _opts), do: err
end
|
lib/blockfrost/http.ex
| 0.808974 | 0.412767 |
http.ex
|
starcoder
|
defmodule Taggart.Tags do
@doc "See `taggart/1`"
defmacro taggart() do
quote location: :keep do
{:safe, ""}
end
end
@doc """
Allows grouping tags in a block.
Groups tags such that they all become part of the result. Normally,
with an Elixir block, only the last expression is part of the value.
This is useful, for example, as the do block of
`Phoenix.HTML.Form.form_for/4`.
```
form_for(conn, "/users", [as: :user], fn f ->
taggart do
label do
"Name:"
text_input(f, :name)
end
label do
"Age:"
select(f, :age, 18..100)
end
end
end
```
## Examples
iex> taggart() |> Phoenix.HTML.safe_to_string()
""
iex> (taggart do div() ; span() end) |> Phoenix.HTML.safe_to_string()
"<div></div><span></span>"
"""
defmacro taggart(do: content) do
content = case content do
{:__block__, _, inner} -> inner
_ -> content
end
quote location: :keep, generated: true do
content = unquote(content)
case content do
# monadically combine array of [{:safe, content}, ...] -> {:safe, [content, ...]}
clist when is_list(clist) ->
inners =
for c <- List.flatten(clist) do
{:safe, inner} = c
inner
end
{:safe, [inners]}
{:safe, _} = c -> c
c -> Phoenix.HTML.html_escape(c)
end
end
end
@moduledoc """
Define HTML tags.
Contains macros for creating a tag-based DSL.
"""
@doc """
Define a new tag.
```
deftag :span
deftag :div
div do
span("Foo")
end
```
"""
defmacro deftag(tag) do
quote location: :keep, bind_quoted: [
tag: Macro.escape(tag, unquote: true)
] do
defmacro unquote(tag)(content_or_attrs \\ [])
defmacro unquote(tag)(attrs)
when is_list(attrs)
do
tag = unquote(tag) # push tag down to next quote
{content, attrs} = Keyword.pop(attrs, :do, "")
Taggart.Tags.normalized_call(tag, attrs, content)
end
defmacro unquote(tag)(content) do
tag = unquote(tag) # push tag down to next quote
attrs = Macro.escape([])
Taggart.Tags.normalized_call(tag, attrs, content)
end
@doc """
Produce a "#{tag}" tag.
## Examples
iex> #{tag}() |> Phoenix.HTML.safe_to_string()
"<#{tag}></#{tag}>"
iex> #{tag}("content") |> Phoenix.HTML.safe_to_string()
"<#{tag}>content</#{tag}>"
iex> #{tag}("content", class: "foo") |> Phoenix.HTML.safe_to_string()
"<#{tag} class=\\"foo\\">content</#{tag}>"
iex> #{tag}() do end |> Phoenix.HTML.safe_to_string()
"<#{tag}></#{tag}>"
iex> #{tag}() do "content" end |> Phoenix.HTML.safe_to_string()
"<#{tag}>content</#{tag}>"
iex> #{tag}(nil, class: "foo") do "content" end |> Phoenix.HTML.safe_to_string()
"<#{tag} class=\\"foo\\">content</#{tag}>"
"""
defmacro unquote(tag)(content, attrs)
when not is_list(content)
do
tag = unquote(tag)
Taggart.Tags.normalized_call(tag, attrs, content)
end
# Main method
defmacro unquote(tag)(attrs, do: content) do
tag = unquote(tag)
content =
case content do
{:__block__, _, inner} -> inner
_ -> content
end
Taggart.Tags.content_tag(tag, attrs, content)
end
# Keep below the main method above, otherwise macro expansion loops forever
defmacro unquote(tag)(content, attrs) when is_list(attrs) do
tag = unquote(tag)
Taggart.Tags.normalized_call(tag, attrs, content)
end
# div/3
defmacro unquote(tag)(_ignored, attrs, do: content) do
tag = unquote(tag)
content =
case content do
{:__block__, _, inner} -> inner
_ -> content
end
Taggart.Tags.content_tag(tag, attrs, content)
end
end
end
@doc """
Define a new void tag.
```
deftag :hr, void: true
deftag :img, void: true
hr()
img(class: "red")
```
"""
defmacro deftag(tag, void: true) do
quote location: :keep, bind_quoted: [
tag: Macro.escape(tag, unquote: true)
] do
@doc """
Produce a void "#{tag}" tag.
## Examples
iex> #{tag}() |> Phoenix.HTML.safe_to_string()
"<#{tag}>"
iex> #{tag}(class: "foo") |> Phoenix.HTML.safe_to_string()
"<#{tag} class=\\"foo\\">"
"""
defmacro unquote(tag)(attrs \\ []) do
tag = unquote(tag)
quote location: :keep do
Phoenix.HTML.Tag.tag(unquote(tag), unquote(attrs))
end
end
end
end
def normalized_call(tag, attrs, content) do
quote location: :keep do
unquote(tag)(unquote(attrs)) do
unquote(content)
end
end
end
def content_tag(tag, attrs, content) do
quote location: :keep do
content = unquote(content)
{:safe, escaped} = Phoenix.HTML.html_escape(content)
name = to_string(unquote(tag))
attrs = unquote(attrs)
{:safe, [?<, name, Taggart.Tags.build_attrs(name, attrs), ?>, escaped, ?<, ?/, name, ?>]}
end
end
@tag_prefixes [:aria, :data]
def build_attrs(_tag, []), do: []
def build_attrs(tag, attrs), do: build_attrs(tag, attrs, [])
def build_attrs(_tag, [], acc),
do: acc |> Enum.sort |> tag_attrs
def build_attrs(tag, [{k, v}|t], acc) when k in @tag_prefixes and is_list(v) do
build_attrs(tag, t, nested_attrs(dasherize(k), v, acc))
end
def build_attrs(tag, [{k, true}|t], acc) do
build_attrs(tag, t, [{dasherize(k)}|acc])
end
def build_attrs(tag, [{_, false}|t], acc) do
build_attrs(tag, t, acc)
end
def build_attrs(tag, [{_, nil}|t], acc) do
build_attrs(tag, t, acc)
end
def build_attrs(tag, [{k, v}|t], acc) do
build_attrs(tag, t, [{dasherize(k), v}|acc])
end
defp dasherize(value) when is_atom(value), do: dasherize(Atom.to_string(value))
defp dasherize(value) when is_binary(value), do: String.replace(value, "_", "-")
defp tag_attrs([]), do: []
defp tag_attrs(attrs) do
for a <- attrs do
case a do
{k, v} -> [?\s, k, ?=, ?", attr_escape(v), ?"]
{k} -> [?\s, k]
end
end
end
defp attr_escape({:safe, data}),
do: data
defp attr_escape(nil),
do: []
defp attr_escape(other) when is_binary(other),
do: Plug.HTML.html_escape(other)
defp attr_escape(other),
do: Phoenix.HTML.Safe.to_iodata(other)
defp nested_attrs(attr, dict, acc) do
Enum.reduce dict, acc, fn {k,v}, acc ->
attr_name = "#{attr}-#{dasherize(k)}"
case is_list(v) do
true -> nested_attrs(attr_name, v, acc)
false -> [{attr_name, v}|acc]
end
end
end
end
|
lib/taggart/tags.ex
| 0.869285 | 0.819496 |
tags.ex
|
starcoder
|
defmodule Interp.Canvas do
alias Interp.Functions
alias Interp.Interpreter
alias Reading.Reader
alias Parsing.Parser
require Interp.Functions
defstruct canvas: %{},
cursor: [0, 0],
on_stack: false
def is_single_direction_list?(directions) do
case directions do
[] -> true
[[[_, _] | _] | _] -> false
_ -> true
end
end
defp pattern_templates, do: %{
"+" => "04402662",
"×" => "15513773"
}
defp special_ops, do: %{
"8" => :return_to_origin
}
defp map_to_regex(map), do: "(" <> (Map.keys(map) |> Enum.map(&Regex.escape/1) |> Enum.join("|")) <> ")"
defp pattern_regex, do: map_to_regex pattern_templates()
defp special_regex, do: map_to_regex special_ops()
defp parse_directions(list) when Functions.is_iterable(list), do: list |> Enum.map(fn x -> parse_directions(x) end)
defp parse_directions(string), do: parse_directions(string, [])
defp parse_directions("", parsed), do: parsed
defp parse_directions(string, parsed) do
cond do
Regex.match?(~r/^[0-7]/, string) ->
captures = Regex.named_captures(~r/^(?<direction>[0-7])(?<remaining>.*)/, string)
case captures["direction"] do
"0" -> parse_directions(captures["remaining"], parsed ++ [[:direction, :up]])
"1" -> parse_directions(captures["remaining"], parsed ++ [[:direction, :up_right]])
"2" -> parse_directions(captures["remaining"], parsed ++ [[:direction, :right]])
"3" -> parse_directions(captures["remaining"], parsed ++ [[:direction, :down_right]])
"4" -> parse_directions(captures["remaining"], parsed ++ [[:direction, :down]])
"5" -> parse_directions(captures["remaining"], parsed ++ [[:direction, :down_left]])
"6" -> parse_directions(captures["remaining"], parsed ++ [[:direction, :left]])
"7" -> parse_directions(captures["remaining"], parsed ++ [[:direction, :up_left]])
end
Regex.match?(~r/^#{special_regex()}/, string) ->
captures = Regex.named_captures(~r/^(?<op>#{special_regex()})(?<remaining>.*)/, string)
parse_directions(captures["remaining"], parsed ++ [[:special_op, Map.get(special_ops(), captures["op"])]])
Regex.match?(~r/^#{pattern_regex()}/, string) ->
captures = Regex.named_captures(~r/^(?<pattern>#{pattern_regex()})(?<remaining>.*)/, string)
parse_directions(Map.get(pattern_templates(), captures["pattern"]) <> captures["remaining"], parsed)
true ->
case List.last(parsed) do
[:code, prev_op] -> parse_directions(String.slice(string, 1..-1), Enum.slice(parsed, 0..-2) ++ [[:code, prev_op <> String.first(string)]])
_ -> parse_directions(String.slice(string, 1..-1), parsed ++ [[:code, String.first(string)]])
end
end
end
def write(canvas, len, characters, direction, environment) do
directions = parse_directions(direction)
{new_canvas, _, _} = write_canvas(canvas, len, characters, directions, environment)
new_canvas
end
# Normalizes the length of the given canvas environment. This means that if the returned environment
# denotes the length as 'nil', it will be reassigned to the given curr_length. Otherwise it will return
# the given environment untouched.
defp normalize_length({canvas, characters, nil}, curr_length), do: {canvas, characters, curr_length}
defp normalize_length({canvas, characters, length}, _), do: {canvas, characters, length}
# If the head of directions is not a direction, but a code snippet, run the code snippet on a stack
# which contains the length as an element and reassign the length to the result of running that code snippet.
defp write_canvas(canvas, len, characters, [[:code, op] | remaining], environment) do
list = [[:code, op] | remaining]
code = list |> Enum.take_while(fn [type, _] -> type == :code end) |> Enum.map(fn [_, op] -> op end) |> Enum.join("")
result = Functions.to_number Interpreter.flat_interp(Parser.parse(Reader.read(code)), [len], environment)
normalize_length(write_canvas(canvas, result, characters, remaining, environment), result)
end
# Interpretation of the special op.
defp write_canvas(canvas, len, characters, [[:special_op, op] | remaining], environment) do
case op do
:return_to_origin -> write_canvas(%{canvas | cursor: [0, 0]}, len, characters, remaining, environment)
end
end
# When the rounded version of the length is <= 0 or <= 1. We cannot round the length since we
# need to keep the unrounded version in memory in case commands will be run against them.
defp write_canvas(canvas, length, characters, _, _) when length < 0.5, do: {canvas, characters, nil}
defp write_canvas(canvas, length, characters, _, _) when length < 1.5 do
new_canvas = cond do
Functions.is_iterable(characters) -> write_char(canvas, List.first Enum.to_list(characters))
String.length(characters) > 1 -> write_char(canvas, List.first String.graphemes(characters))
true -> write_char(canvas, characters)
end
{new_canvas, characters, nil}
end
# Main case for writing to the canvas.
defp write_canvas(canvas, len, characters, direction, environment) do
cond do
# var - var - var
Functions.is_single?(len) and Functions.is_single?(characters) and String.length(characters) == 1 and is_single_direction_list?(direction) and length(direction) == 1 ->
write_canvas(move_cursor(write_char(canvas, characters), List.first direction), len - 1, characters, [List.first direction], environment)
# var - var - vars
Functions.is_single?(len) and Functions.is_single?(characters) and String.length(characters) == 1 and is_single_direction_list?(direction) ->
direction |> Enum.reduce({canvas, nil, len}, fn (dir, {canvas_acc, _, curr_length}) ->
normalize_length(write_canvas(canvas_acc, curr_length, characters, [dir], environment), curr_length) end)
# var - vars - var
Functions.is_single?(len) and Functions.is_single?(characters) and is_single_direction_list?(direction) and length(direction) == 1 ->
normalize_length(write_canvas(canvas, len, String.graphemes(characters), direction, environment), len)
# var - vars - vars
Functions.is_single?(len) and Functions.is_single?(characters) and is_single_direction_list?(direction) ->
direction |> Enum.reduce({canvas, characters, len}, fn (dir, {canvas_acc, chars, curr_length}) ->
normalize_length(write_canvas(canvas_acc, curr_length, chars, [dir], environment), curr_length) end)
# var - var - list
Functions.is_single?(len) and Functions.is_single?(characters) and String.length(characters) == 1 and not is_single_direction_list?(direction) ->
direction |> Enum.reduce({canvas, nil, len}, fn (dir, {canvas_acc, _, curr_length}) ->
normalize_length(write_canvas(canvas_acc, curr_length, characters, dir, environment), curr_length) end)
# var - vars - list
Functions.is_single?(len) and Functions.is_single?(characters) and not is_single_direction_list?(direction) ->
direction |> Enum.reduce({canvas, String.graphemes(characters), len}, fn (dir, {canvas_acc, chars, curr_length}) ->
normalize_length(write_canvas(canvas_acc, curr_length, chars, dir, environment), curr_length) end)
# var - list - var
Functions.is_single?(len) and Functions.is_iterable(characters) and is_single_direction_list?(direction) and length(direction) == 1 ->
[head | remaining] = Enum.to_list characters
write_canvas(move_cursor(write_char(canvas, head), List.first direction), len - 1, remaining ++ [head], [List.first direction], environment)
# var - list - vars
Functions.is_single?(len) and Functions.is_iterable(characters) and is_single_direction_list?(direction) ->
direction |> Enum.reduce({canvas, characters, len}, fn (dir, {canvas_acc, chars, curr_length}) ->
normalize_length(write_canvas(canvas_acc, curr_length, chars, [dir], environment), curr_length) end)
# var - list - list
Functions.is_single?(len) and Functions.is_iterable(characters) and not is_single_direction_list?(direction) ->
direction |> Enum.reduce({canvas, characters, len}, fn (dir, {canvas_acc, chars, curr_length}) ->
normalize_length(write_canvas(canvas_acc, curr_length, chars, dir, environment), curr_length) end)
# list - var - var(s)
Functions.is_iterable(len) and Functions.is_single?(characters) and String.length(characters) == 1 and is_single_direction_list?(direction) ->
len |> Enum.reduce({canvas, characters, nil}, fn (curr_len, {canvas_acc, chars, _}) -> write_canvas(canvas_acc, curr_len, chars, direction, environment) end)
# list - vars - var(s)
Functions.is_iterable(len) and Functions.is_single?(characters) and is_single_direction_list?(direction) ->
len |> Enum.reduce({canvas, String.graphemes(characters), nil}, fn (curr_len, {canvas_acc, chars, _}) -> write_canvas(canvas_acc, curr_len, chars, direction, environment) end)
# list - var - list
Functions.is_iterable(len) and Functions.is_single?(characters) and not is_single_direction_list?(direction) ->
Stream.zip(len, Stream.cycle(direction)) |> Enum.to_list |> Enum.reduce({canvas, characters, nil}, fn ({curr_len, curr_dir}, {canvas_acc, chars, _}) -> write_canvas(canvas_acc, curr_len, chars, curr_dir, environment) end)
# list - list - var(s)
Functions.is_iterable(len) and Functions.is_iterable(characters) and is_single_direction_list?(direction) ->
characters |> Enum.reduce({canvas, direction, nil}, fn (curr_char, {canvas_acc, _, _}) -> write_canvas(canvas_acc, len, curr_char, direction, environment) end)
# list - list - list
Functions.is_iterable(len) and Functions.is_iterable(characters) and not is_single_direction_list?(direction) ->
Stream.zip([len, characters, Stream.cycle(direction)]) |> Enum.to_list |> Enum.reduce({canvas, characters, nil}, fn ({curr_len, curr_char, curr_dir}, {canvas_acc, _, _}) ->
write_canvas(canvas_acc, curr_len, curr_char, curr_dir, environment) end)
end
end
defp at(list, x, y), do: Enum.at(Enum.at(list, y), x)
def canvas_to_string(canvas) do
keys = Map.keys(canvas.canvas)
if keys == [] do
""
else
min_x = Enum.reduce(keys, at(keys, 0, 0), fn ([x, _], acc) -> min(x, acc) end)
max_x = Enum.reduce(keys, at(keys, 0, 0), fn ([x, _], acc) -> max(x, acc) end)
min_y = Enum.reduce(keys, at(keys, 1, 0), fn ([_, y], acc) -> min(y, acc) end)
max_y = Enum.reduce(keys, at(keys, 1, 0), fn ([_, y], acc) -> max(y, acc) end)
canvas_array = List.duplicate(List.duplicate(" ", max_x - min_x + 1), max_y - min_y + 1)
Enum.reduce(keys, canvas_array, fn (key, acc) ->
# Get the position of the current key where [0, 0] represents the [min_x, max_y] coordinate.
x = Enum.at(key, 0) - min_x
y = Enum.at(key, 1) - min_y
inner_list = List.replace_at(Enum.at(acc, y), x, Map.get(canvas.canvas, key));
List.replace_at(acc, y, inner_list)
end)
|> Enum.reverse
|> Enum.map(fn x -> Enum.join(x, "") end) |> Enum.join("\n")
end
end
defp move_cursor(canvas, [:direction, direction]) do
x = Enum.at(canvas.cursor, 0)
y = Enum.at(canvas.cursor, 1)
# Directions:
# 7 0 1
# \|/
# 6-x-2
# /|\
# 5 4 3
new_cursor = case direction do
:up -> [x, y + 1]
:up_right -> [x + 1, y + 1]
:right -> [x + 1, y]
:down_right -> [x + 1, y - 1]
:down -> [x, y - 1]
:down_left -> [x - 1, y - 1]
:left -> [x - 1, y]
:up_left -> [x - 1, y + 1]
_ -> [x, y]
end
%{canvas | cursor: new_cursor}
end
defp write_char(canvas, char) do
%{canvas | canvas: Map.put(canvas.canvas, canvas.cursor, char)}
end
end
|
lib/interp/canvas.ex
| 0.577019 | 0.576453 |
canvas.ex
|
starcoder
|
defmodule Snor.Parser do
@moduledoc """
Convert a string into an intermediate representation - a list of nodes.
A node could be one of (mainly) -
- A plaintext node
- A function node
- A block node
- An interpolation node
"""
@typedoc """
A parsed node
"""
@type parsed_node :: plaintext_node() | interpolation_node() | block_node() | function_node()
@typedoc "A grapheme from the template that was passed in"
@type token :: String.grapheme()
@typedoc "Represents remaining tokens after a node was parsed and extracted"
@type remaining_tokens :: [token]
@typedoc "A block node"
@type block_node :: %{with_scope: String.t(), children: [parsed_node()], negative: boolean()}
@typedoc "A plaintext node"
@type plaintext_node :: %{plaintext: String.t()}
@typedoc "An interpolation node"
@type interpolation_node :: %{interpolation: String.t()}
@typedoc "An argument pair"
@type argument_pair :: %{key: String.t(), value: [parsed_node]}
@typedoc "A function node"
@type function_node :: %{function: String.t(), arguments: [argument_pair]}
@typedoc "The result of a `parse_*` operation"
@type parse_result :: {:error, String.t()} | {:ok, parsed_node | any, binary()}
@typedoc "A parser"
@type parser :: function()
@doc ~S"""
Parse a string into a list of nodes
## Examples
iex> Snor.Parser.parse("Hello")
[%{plaintext: "Hello"}]
iex> Snor.Parser.parse("{{name}}")
[%{interpolation: "name", escape: true}]
iex> Snor.Parser.parse("{{{name}}}")
[%{interpolation: "name", escape: false}]
iex> Snor.Parser.parse("{{upcase item='Jane'}}")
[%{arguments: [%{key: "item", value: [%{plaintext: "Jane"}]}], function: "upcase"}]
iex> Snor.Parser.parse("{{#person}}{{name}}{{/person}}")
[%{children: [%{interpolation: "name", escape: true}], negative: false, with_scope: "person"}]
"""
@spec parse(binary()) :: [parsed_node()]
def parse(input) do
parser = nodes()
case parser.(input) do
{:ok, nodes, <<>>} -> nodes
{:ok, _, _} -> raise ArgumentError, "Couldn't parse"
{:error, error} -> raise ArgumentError, "Error #{error}"
end
end
@spec nodes() :: parser()
defp nodes,
do:
any_node()
|> many()
|> non_zero()
@spec any_node() :: parser()
defp any_node,
do:
[
interpolation(),
block(),
function(),
comment_node(),
plaintext_node()
]
|> choice()
|> non_zero()
@spec plaintext_node() :: parser()
defp plaintext_node do
fn input ->
with {:ok, contents, rest} <- plaintext().(input),
<<_::utf8, _::binary>> <- contents do
{:ok, %{plaintext: contents}, rest}
else
_ -> {:error, "Was expecting a plaintext node"}
end
end
end
@spec function() :: parser()
defp function do
fn input ->
with {:ok, <<?{, ?{>>, rest} <- nchars(2).(input),
{:ok, function_name, rest} <- plaintext_apart_from([?\s]).(rest),
{:ok, arguments, rest} <- argument_pairs().(rest),
{:ok, <<?}, ?}>>, rest} <- nchars(2).(rest) do
{:ok, %{function: function_name, arguments: arguments}, rest}
else
_ -> {:error, "Expected function"}
end
end
end
@spec argument_pairs() :: parser()
defp argument_pairs,
do:
argument_pair_with_leading_space()
|> many
|> non_zero()
@spec argument_pair_with_leading_space() :: parser()
defp argument_pair_with_leading_space do
fn input ->
with {:ok, ?\s, rest} <- char().(input),
{:ok, pair, rest} <- argument_pair().(rest) do
{:ok, pair, rest}
else
_ -> {:error, "Could not parse argument pair with leading space"}
end
end
end
@spec argument_pair() :: parser()
defp argument_pair do
fn input ->
with {:ok, key, rest} <- plaintext_apart_from([?=]).(input),
{:ok, ?=, rest} <- char().(rest),
{:ok, value, rest} <- argument_value().(rest) do
{:ok, %{key: key, value: value}, rest}
else
_ ->
{:error, "Was expecting an argument pair"}
end
end
end
@spec argument_value() :: parser()
defp argument_value do
fn input ->
with {:ok, ?', rest} <- char().(input),
{:ok, contents, rest} <- argument_contents().(rest),
{:ok, ?', rest} <- char().(rest),
{:ok, inner_nodes, <<>>} <- nodes().(contents) do
{:ok, inner_nodes, rest}
else
_ -> {:error, "Expected a quoted argument"}
end
end
end
@spec argument_contents() :: parser()
defp argument_contents,
do:
char()
|> satisfy(&(&1 != ?'), "anything but a '")
|> many
|> non_zero
|> map(&to_string/1)
@spec comment_node() :: parser()
defp comment_node do
fn input ->
with {:ok, <<?{, ?{, ?!>>, rest} <- nchars(3).(input),
{:ok, _, rest} <- plaintext_chars().(rest),
{:ok, <<?}, ?}>>, rest} <- nchars(2).(rest) do
{:ok, %{plaintext: ""}, rest}
else
_ ->
{:error, "Weird comment"}
end
end
end
@spec interpolation() :: parser()
defp interpolation do
fn input ->
with {:ok, %{escape: e, type: type}, rest} <- opening_braces().(input),
{:ok, _, rest} <- whitespaces().(rest),
{:ok, tag_name, rest} <- tag_name().(rest),
{:ok, _, rest} <- whitespaces().(rest),
{:ok, %{type: ^type}, rest} <- closing_braces().(rest) do
{:ok, %{interpolation: tag_name, escape: e}, rest}
else
_ ->
{:error, "Expected an interpolation"}
end
end
end
@spec opening_braces :: parser
defp opening_braces do
fn input ->
case input do
<<?{, ?{, ?{, rest::binary>> -> {:ok, %{escape: false, type: :triple}, rest}
<<?{, ?{, ?&, rest::binary>> -> {:ok, %{escape: false, type: :double}, rest}
<<?{, ?{, rest::binary>> -> {:ok, %{escape: true, type: :double}, rest}
_ -> {:error, "Opening braces expected"}
end
end
end
@spec closing_braces :: parser
defp closing_braces do
fn input ->
case input do
<<?}, ?}, ?}, rest::binary>> -> {:ok, %{type: :triple}, rest}
<<?}, ?}, rest::binary>> -> {:ok, %{type: :double}, rest}
_ -> {:error, "Opening braces expected"}
end
end
end
@spec whitespaces :: parser
defp whitespaces,
do: char() |> satisfy(&(&1 == ?\s)) |> many
@spec tag_name() :: parser()
defp tag_name do
char()
|> satisfy(&(&1 in ?a..?z || &1 in ?A..?Z || &1 in ?0..?9 || &1 in [?_, ?.]))
|> many()
|> non_zero()
|> map(&to_string/1)
end
@spec nodes_until(any) :: parser()
defp nodes_until(needle),
do:
any_node()
|> satisfy(&(&1 != needle), "anything but #{inspect(needle)}")
|> many()
|> non_zero()
@spec block() :: parser()
defp block do
fn input ->
with {:ok, %{opening_scope: scope, negative: negative}, rest} <-
open_scope().(input),
{:ok, contents, rest} <- nodes_until(%{closing_scope: scope}).(rest),
{:ok, %{closing_scope: ^scope}, rest} <- close_scope().(rest) do
{:ok, %{with_scope: scope, children: contents, negative: negative}, rest}
else
{:ok, %{closing_scope: scope}, _} ->
{:error, "Closed #{scope}, but it was not opened"}
_ ->
{:error, "Was expecting a block"}
end
end
end
@spec block_type_specifier :: parser
defp block_type_specifier do
fn input ->
case input do
<<?#, rest::binary>> -> {:ok, :positive, rest}
<<?^, rest::binary>> -> {:ok, :negative, rest}
<<c::utf8, _::binary>> -> {:error, "A block cannot start with #{c}"}
end
end
end
@spec open_scope() :: parser()
defp open_scope do
fn input ->
with {:ok, <<?{, ?{>>, rest} <- nchars(2).(input),
{:ok, block_type, rest} <- block_type_specifier().(rest),
{:ok, _, rest} <- whitespaces().(rest),
{:ok, tag_name, rest} <- plaintext().(rest),
{:ok, _, rest} <- whitespaces().(rest),
{:ok, <<?}, ?}>>, rest} <- nchars(2).(rest) do
{:ok, %{opening_scope: tag_name, negative: block_type == :negative}, rest}
else
_ -> {:error, "Expected a block"}
end
end
end
@spec close_scope() :: parser()
defp close_scope do
fn input ->
with {:ok, <<?{, ?{, ?/>>, rest} <- nchars(3).(input),
{:ok, _, rest} <- whitespaces().(rest),
{:ok, tag_name, rest} <- plaintext().(rest),
{:ok, _, rest} <- whitespaces().(rest),
{:ok, <<?}, ?}>>, rest} <- nchars(2).(rest) do
{:ok, %{closing_scope: tag_name}, rest}
else
_ -> {:error, "Expected a block close"}
end
end
end
@spec map(parser(), fun()) :: parser()
defp map(parser, mapper) do
fn input ->
with {:ok, term, rest} <- parser.(input),
do: {:ok, mapper.(term), rest}
end
end
@spec plaintext() :: parser()
defp plaintext,
do:
plaintext_chars()
|> map(&to_string/1)
@spec plaintext_apart_from([byte()]) :: parser()
defp plaintext_apart_from(chars),
do:
plaintext_char()
|> satisfy(&(&1 not in chars), "anything but #{chars}")
|> many()
|> non_zero()
|> map(&to_string/1)
@spec non_zero(parser()) :: parser()
defp non_zero(parser) do
fn input ->
with {:ok, [], _rest} <- parser.(input),
do: {:error, "Wasn't expecting nothing"}
end
end
@spec choice([parser()]) :: parser()
defp choice(parsers) do
fn input ->
case parsers do
[] ->
{:error, "No way to parse - #{input}"}
[h | t] ->
with {:error, _} <- h.(input),
do: choice(t).(input)
end
end
end
@spec many(parser()) :: parser()
defp many(parser) do
fn input ->
case parser.(input) do
{:error, _error} ->
{:ok, [], input}
{:ok, term, rest} ->
with {:ok, other_terms, rest} <- many(parser).(rest),
do: {:ok, [term | other_terms], rest}
end
end
end
@spec satisfy(parser(), fun(), String.t()) :: parser()
defp satisfy(parser, predicate, expectation \\ "unexpected input") do
fn input ->
with {:ok, term, rest} <- parser.(input) do
if predicate.(term),
do: {:ok, term, rest},
else: {:error, "Expected #{expectation} before #{rest}"}
end
end
end
@spec consume_plaintext(binary, [byte], binary) :: {:ok, [byte], binary}
defp consume_plaintext(<<>>, buf, rest), do: {:ok, Enum.reverse(buf), rest}
defp consume_plaintext(<<?}::utf8, ?}::utf8, _::binary>>, buf, rest),
do: {:ok, Enum.reverse(buf), rest}
defp consume_plaintext(<<?{::utf8, ?{::utf8, _::binary>>, buf, rest),
do: {:ok, Enum.reverse(buf), rest}
defp consume_plaintext(<<char::utf8, rest::binary>>, buf, _),
do: consume_plaintext(rest, [char | buf], rest)
@spec plaintext_chars() :: parser()
defp plaintext_chars,
do: &consume_plaintext(&1, [], &1)
@spec plaintext_char() :: parser()
defp plaintext_char do
fn input ->
case input do
<<>> ->
{:error, "Unexpected end of input"}
<<?}::utf8, ?}::utf8, _::binary>> ->
{:error, "reached closing braces"}
<<?{::utf8, ?{::utf8, _::binary>> ->
{:error, "reached opening braces"}
<<char::utf8, rest::binary>> ->
{:ok, char, rest}
end
end
end
@spec maybe(parser) :: parser
defp maybe(parser) do
fn input ->
case parser.(input) do
{:ok, term, rest} -> {:ok, term, rest}
_ -> {:ok, nil, input}
end
end
end
@spec char(char()) :: parser
defp char(c) do
fn input ->
case input do
<<^c::utf8, rest::binary>> -> {:ok, c, rest}
_ -> {:error, "Was expecting #{c}"}
end
end
end
@spec char() :: parser()
defp char do
fn input ->
case input do
"" -> {:error, "Unexpected end of input"}
<<char::utf8, rest::binary>> -> {:ok, char, rest}
end
end
end
@spec nchars(non_neg_integer()) :: parser()
defp nchars(n) do
fn input ->
case input do
<<chars::binary-size(n), rest::binary>> -> {:ok, chars, rest}
_ -> {:error, "Unexpected end of input"}
end
end
end
end
|
lib/snor/parser.ex
| 0.887125 | 0.6081 |
parser.ex
|
starcoder
|
defmodule Chronic.Tokenizer do
@moduledoc false
@day_names ["sunday", "monday", "tuesday", "wednesday", "thursday", "friday", "saturday"]
@abbr_months ["jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec"]
def tokenize([token | remainder]) do
[tokenize(token) | tokenize(remainder)]
end
def tokenize([]) do
[]
end
def tokenize(token) do
token = String.downcase(token)
ordinal_regex = ~r/\A(?<number>\d+)(st|nd|rd|th)\Z/
# credo:disable-for-next-line
time_regex = ~r/(?<hour>\d{1,2}):?(?<minute>\d{1,2})?:?(?<second>\d{1,2})?\.?(?<microsecond>\d{1,6})?(?<am_or_pm>am|pm)?/
cond do
Enum.any?(@abbr_months, fn (month) -> matches_month?(token, month) end) ->
{:month, month_number(token)}
Enum.any?(@day_names, fn (dotw) -> matches_day_of_the_week?(token, dotw) end) ->
{:day_of_the_week, day_of_the_week_number(token)}
Regex.match?(ordinal_regex, token) ->
%{"number" => number} = Regex.named_captures(ordinal_regex, token)
{:number, String.to_integer(number)}
Regex.match?(~r/\A\d+\Z/, token) ->
{:number, String.to_integer(token)}
Regex.match?(time_regex, token) ->
process_time(time_regex, token)
Regex.match?(~r/\A\w+\Z/, token) ->
{:word, token }
true ->
nil
end
end
def matches_month?(token, month) do
String.starts_with?(token, month)
end
defp month_number(token) do
Enum.find_index(@abbr_months, fn (month) -> matches_month?(token, month) end) + 1
end
defp matches_day_of_the_week?(token, day_of_the_week) do
day_of_the_week == token || String.starts_with?(day_of_the_week, token)
end
defp day_of_the_week_number(token) do
Enum.find_index(@day_names, fn (day_of_the_week) -> matches_day_of_the_week?(token, day_of_the_week) end)
end
defp process_time(time_regex, token) do
%{
"hour" => hour,
"minute" => minute,
"second" => second,
"microsecond" => microsecond,
"am_or_pm" => am_or_pm
} = Regex.named_captures(time_regex, token)
hour = String.to_integer(hour)
hour = shift_hour(hour, am_or_pm)
minute = if minute == "", do: 0, else: String.to_integer(minute)
second = if second == "", do: 0, else: String.to_integer(second)
microsecond = if microsecond == "", do: 0, else: String.to_integer(microsecond)
{:time, [hour: hour, minute: minute, second: second, microsecond: {microsecond, 6}]}
end
defp shift_hour(12, "am"), do: 0
defp shift_hour(12, "pm"), do: 12
defp shift_hour(hr, "pm"), do: hr + 12
defp shift_hour(hr, _ ), do: hr
end
|
lib/chronic/tokenizer.ex
| 0.535584 | 0.434701 |
tokenizer.ex
|
starcoder
|
defmodule Base85.Decode do
@moduledoc """
Implements decoding functionality for Base85 encoding.
"""
import Base85.{Charsets, Padding}
@typedoc "available character sets"
@type charset() :: Base85.Charsets.charset()
@typedoc "available padding techniques"
@type padding() :: Base85.Padding.padding()
@typedoc "options for decoding"
@type decoding_opts() :: [charset: charset(), padding: padding()]
@typedoc "decoding errors"
@type decoding_error() ::
:unrecognized_character_set
| :unrecognized_padding
| :invalid_encoded_length
| :invalid_character_for_character_set
| :invalid_padding_data
| :internal_error
@doc """
Decodes binary data from a Base85-encoded string.
This version returns the value or raises an error.
## Examples
iex> Base85.Decode.decode!("N.Xx21Kf++HD3`AI>AZp$Aer7", charset: :safe85, padding: :pkcs7)
"some binary data"
iex> Base85.Decode.decode!("f!$Kwf!$Kwf!$Kw", charset: :zeromq, padding: :none)
"123412341234"
iex> Base85.Decode.decode!("123", charset: :safe85, padding: :none)
** (Base85.InvalidEncodedLength) encoded data had invalid encoded length, expected multiple of 5 characters
## Options
* `binary` - the binary data to decode, must be a multiple of 5-characters
long;
* `:charset` - an atom indicating the character set to use for decoding;
* `:padding` - an atom indicating which padding technique to use;
Padding methods and encodings may use additional options.
"""
@spec decode!(binary(), decoding_opts()) :: binary()
def decode!(bin, opts \\ []) when is_binary(bin) do
dec_fun = get_dec_fun(opts)
unpad_fun = get_unpad_fun(opts)
if rem(byte_size(bin), 5) != 0 do
raise Base85.InvalidEncodedLength, hint: "multiple of 5 characters"
end
if bin == <<>> do
# special case for all encodings (for now)
<<>>
else
bin
|> decode_chunks(dec_fun, opts)
|> IO.iodata_to_binary()
|> unpad_fun.(opts)
end
end
@doc """
Decodes binary data from a Base85-encoded string.
This version returns an `:ok`-tuple or `:error`-tuple.
## Examples
iex> Base85.Decode.decode("N.Xx21Kf++HD3`AI>AZp$Aer7", charset: :safe85, padding: :pkcs7)
{:ok, "some binary data"}
iex> Base85.Decode.decode("f!$Kwf!$Kwf!$Kw", charset: :zeromq, padding: :none)
{:ok, "123412341234"}
iex> Base85.Decode.decode("123", charset: :safe85, padding: :none)
{:error, :invalid_encoded_length}
## Options
* `binary` - the binary data to decode, must be a multiple of 5-characters
long;
* `:charset` - an atom indicating the character set to use for decoding;
* `:padding` - an atom indicating which padding technique to use;
Padding methods and encodings may use additional options.
"""
@spec decode(binary(), decoding_opts()) :: {:ok, binary()} | {:error, decoding_error()}
def decode(bin, opts) when is_binary(bin) do
{:ok, decode!(bin, opts)}
rescue
Base85.UnrecognizedCharacterSet ->
{:error, :unrecognized_character_set}
Base85.UnrecognizedPadding ->
{:error, :unrecognized_padding}
Base85.InvalidEncodedLength ->
{:error, :invalid_encoded_length}
Base85.InvalidCharacterForCharacterSet ->
{:error, :invalid_character_for_character_set}
Base85.InvalidPaddingData ->
{:error, :invalid_padding_data}
Base85.InternalError ->
{:error, :internal_error}
end
# private functions
defp decode_chunks(bin, dec_fun, opts, decoded \\ [])
defp decode_chunks(<<>>, _dec_fun, _opts, decoded) do
Enum.reverse(decoded)
end
defp decode_chunks(<<d1::8, d2::8, d3::8, d4::8, d5::8, rest::binary>>, dec_fun, opts, decoded) do
val = dec_fun.(d1)
val = val * 85 + dec_fun.(d2)
val = val * 85 + dec_fun.(d3)
val = val * 85 + dec_fun.(d4)
val = val * 85 + dec_fun.(d5)
decode_chunks(rest, dec_fun, opts, [<<val::integer-big-unsigned-32>> | decoded])
end
defp get_dec_fun(opts) do
charset = Keyword.get(opts, :charset, :safe85)
if not Map.has_key?(charsets(), charset) do
raise Base85.UnrecognizedCharacterSet, charset: charset, operation: :decoding
end
dec_map = charsets()[charset] |> Enum.with_index() |> Map.new()
&case dec_map[&1] do
nil ->
raise Base85.InvalidCharacterForCharacterSet, charset: charset, character: &1
val ->
val
end
end
end
|
lib/base85/decode.ex
| 0.947308 | 0.451145 |
decode.ex
|
starcoder
|
defmodule OliWeb.Common.ManualModal do
@moduledoc """
A reusable LivewComponent for a Bootstrap modal, one that is manually controlled via JavaScript, and that
must be added and removed from a parent LiveView programmatically when it needs to be shown.
This component should be used in place of `OliWeb.Common.Modal` when there is a need to programmatically
vary the content that the modal displays from invocation to invocation. The only way to do this
is to completely remove the component from the DOM and remount it with the new content. Doing this, however,
forces the launching of the modal to be done via JavaScript (notice the phx-hook that is present on
the root div of this modal)
Minimal example usage specifying only the required properties:
```
<%= live_component ManualModal, title: "Confirm your request", modal_id: "my_unique_modal_id", ok_action: "confirm" do %>
<p class="mb-4">Are you sure you want to do this?</p>
<% end %>
```
Required properties:
`title`: The string title that the modal will display
`modal_id`: The DOM id that will be attached to this modal. This is the id that another part of the UI needs
to target to trigger the modal
'ok_action': The phx-click action to invoke upon clicking the 'Ok' button
Optional properties:
`ok_label`: The label to use for the 'Ok' button, defaults to 'Ok'
`ok_style`: The Bootstrap button style to use for the 'Ok' button, defaults to `btn-primary`
"""
use Phoenix.LiveComponent
def mount(socket) do
# Default property values
{:ok,
assign(socket,
ok_label: "Ok",
ok_style: "btn-primary"
)}
end
def render(assigns) do
~L"""
<div class="modal fade show" style="display: block" id="<%= @modal_id %>" tabindex="-1" role="dialog" aria-hidden="true" phx-hook="ModalLaunch">
<div class="modal-dialog modal-dialog-centered modal-lg" role="document">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title"><%= @title %></h5>
<button type="button" class="close" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">
<%= render_block(@inner_block) %>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-secondary" data-dismiss="modal" phx-click="cancel_modal">Cancel</button>
<button type="button" class="btn <%= @ok_style %>" data-dismiss="modal" phx-click="<%= @ok_action %>"><%= @ok_label %></button>
</div>
</div>
</div>
</div>
"""
end
end
|
lib/oli_web/live/common/manual_modal.ex
| 0.772917 | 0.636819 |
manual_modal.ex
|
starcoder
|
defmodule Sanbase.PricePair.MetricAdapter do
@behaviour Sanbase.Metric.Behaviour
alias Sanbase.PricePair
@aggregations [:any, :sum, :avg, :min, :max, :last, :first, :median, :ohlc]
@default_aggregation :last
@timeseries_metrics ["price_usd", "price_btc"]
@histogram_metrics []
@table_metrics []
@metrics @histogram_metrics ++ @timeseries_metrics ++ @table_metrics
@access_map Enum.into(@metrics, %{}, fn metric -> {metric, :free} end)
@min_plan_map Enum.into(@metrics, %{}, fn metric -> {metric, :free} end)
@free_metrics Enum.filter(@access_map, fn {_, level} -> level == :free end)
|> Enum.map(&elem(&1, 0))
@restricted_metrics Enum.filter(@access_map, fn {_, level} -> level == :restricted end)
|> Enum.map(&elem(&1, 0))
@required_selectors Enum.into(@metrics, %{}, &{&1, []})
@default_complexity_weight 0.3
@impl Sanbase.Metric.Behaviour
def has_incomplete_data?(_), do: false
@impl Sanbase.Metric.Behaviour
def complexity_weight(_), do: @default_complexity_weight
@impl Sanbase.Metric.Behaviour
def required_selectors(), do: @required_selectors
@impl Sanbase.Metric.Behaviour
def broken_data(_metric, _selector, _from, _to), do: {:ok, []}
@impl Sanbase.Metric.Behaviour
def timeseries_data(metric, %{slug: slug}, from, to, interval, opts) do
quote_asset = metric_to_quote_asset(metric)
opts = update_opts(opts)
PricePair.timeseries_data(slug, quote_asset, from, to, interval, opts)
end
@impl Sanbase.Metric.Behaviour
def timeseries_data_per_slug(metric, %{slug: slug}, from, to, interval, opts) do
quote_asset = metric_to_quote_asset(metric)
opts = update_opts(opts)
PricePair.timeseries_data_per_slug(slug, quote_asset, from, to, interval, opts)
end
@impl Sanbase.Metric.Behaviour
def aggregated_timeseries_data(metric, %{slug: slug}, from, to, opts) do
quote_asset = metric_to_quote_asset(metric)
opts = update_opts(opts)
PricePair.aggregated_timeseries_data(slug, quote_asset, from, to, update_opts(opts))
end
@impl Sanbase.Metric.Behaviour
def slugs_by_filter(metric, from, to, operator, threshold, opts) do
quote_asset = metric_to_quote_asset(metric)
opts = update_opts(opts)
PricePair.slugs_by_filter(quote_asset, from, to, operator, threshold, opts)
end
@impl Sanbase.Metric.Behaviour
def slugs_order(metric, from, to, direction, opts) do
quote_asset = metric_to_quote_asset(metric)
opts = update_opts(opts)
PricePair.slugs_order(quote_asset, from, to, direction, opts)
end
@impl Sanbase.Metric.Behaviour
def first_datetime(metric, %{slug: slug}) do
quote_asset = metric_to_quote_asset(metric)
opts = update_opts([])
PricePair.first_datetime(slug, quote_asset, opts)
end
@impl Sanbase.Metric.Behaviour
def last_datetime_computed_at(metric, %{slug: slug}) do
quote_asset = metric_to_quote_asset(metric)
opts = update_opts([])
PricePair.last_datetime_computed_at(slug, quote_asset, opts)
end
@impl Sanbase.Metric.Behaviour
def metadata(metric) do
{:ok,
%{
metric: metric,
min_interval: "1s",
default_aggregation: @default_aggregation,
available_aggregations: @aggregations,
available_selectors: [:slug],
data_type: :timeseries,
complexity_weight: @default_complexity_weight
}}
end
@impl Sanbase.Metric.Behaviour
def human_readable_name(metric) do
case metric do
"price_usd" -> {:ok, "Price in USD"}
"price_btc" -> {:ok, "Price in BTC"}
end
end
@impl Sanbase.Metric.Behaviour
def available_aggregations(), do: @aggregations
@impl Sanbase.Metric.Behaviour
def available_timeseries_metrics(), do: @timeseries_metrics
@impl Sanbase.Metric.Behaviour
def available_histogram_metrics(), do: @histogram_metrics
@impl Sanbase.Metric.Behaviour
def available_table_metrics(), do: @table_metrics
@impl Sanbase.Metric.Behaviour
def available_metrics(), do: @metrics
@impl Sanbase.Metric.Behaviour
def available_metrics(%{slug: slug}) do
cache_key = {__MODULE__, :has_price_data?, slug} |> Sanbase.Cache.hash()
cache_key_with_ttl = {cache_key, 600}
case Sanbase.Cache.get_or_store(cache_key_with_ttl, fn ->
PricePair.available_quote_assets(slug)
end) do
{:ok, quote_assets} ->
metrics =
if("BTC" in quote_assets, do: ["price_btc"], else: []) ++
if "USD" in quote_assets, do: ["price_usd"], else: []
{:ok, metrics}
{:error, error} ->
{:error, error}
end
end
@impl Sanbase.Metric.Behaviour
def available_slugs() do
cache_key = {__MODULE__, :slugs_with_prices} |> Sanbase.Cache.hash()
Sanbase.Cache.get_or_store({cache_key, 600}, fn -> PricePair.available_slugs() end)
end
@impl Sanbase.Metric.Behaviour
def available_slugs(metric) when metric in @metrics do
quote_asset = metric_to_quote_asset(metric)
opts = update_opts([])
Sanbase.Cache.get_or_store({__MODULE__, :available_slugs_for_metric, 600}, fn ->
PricePair.available_slugs(quote_asset, opts)
end)
end
@impl Sanbase.Metric.Behaviour
def free_metrics(), do: @free_metrics
@impl Sanbase.Metric.Behaviour
def restricted_metrics(), do: @restricted_metrics
@impl Sanbase.Metric.Behaviour
def access_map(), do: @access_map
@impl Sanbase.Metric.Behaviour
def min_plan_map(), do: @min_plan_map
# Private functions
defp update_opts(opts) do
Keyword.update(opts, :aggregation, @default_aggregation, &(&1 || @default_aggregation))
end
defp metric_to_quote_asset("price_btc"), do: "BTC"
defp metric_to_quote_asset("price_usd"), do: "USD"
end
|
lib/sanbase/prices/price_pair/metric_adapter.ex
| 0.803868 | 0.402803 |
metric_adapter.ex
|
starcoder
|
defmodule ReceiptDecoder.Verifier do
@moduledoc """
Verify receipt
"""
alias ReceiptDecoder.Extractor
alias ReceiptDecoder.Verifier.PublicKey
alias ReceiptDecoder.Verifier.AppleRootCertificate
require PublicKey
@wwdr_cert_policies_extension_oid {1, 2, 840, 113_635, 100, 6, 2, 1}
@itunes_cert_marker_extension_oid {1, 2, 840, 113_635, 100, 6, 11, 1}
@doc """
Verify the receipt
"""
@spec verify(Extractor.receipt_t()) :: :ok | {:error, any}
def verify(receipt) do
{[itunes_cert, wwdr_cert, root_cert], signer} = destruct_receipt(receipt)
with :ok <- verify_root_cert_fingerprint(root_cert),
:ok <- verify_wwdr_cert(wwdr_cert),
:ok <- verify_wwdr_cert_policies_extension_oid(wwdr_cert),
:ok <- verify_itunes_cert(itunes_cert, wwdr_cert),
:ok <- verify_itunes_cert_marker_extension_oid(itunes_cert),
:ok <- verify_signature(signer, receipt, itunes_cert) do
:ok
else
{:error, msg} ->
{:error, msg}
end
end
defp verify_root_cert_fingerprint({:certificate, root_cert}) do
cert_bin = :public_key.pkix_encode(:Certificate, root_cert, :plain)
fingerprint = :crypto.hash(:sha256, cert_bin)
case AppleRootCertificate.fingerprint() do
^fingerprint ->
:ok
_ ->
{:error, :invalid_root_cert_fingerprint}
end
end
defp verify_wwdr_cert({:certificate, cert}) do
cert = :public_key.der_encode(:Certificate, cert)
case :public_key.pkix_verify(cert, AppleRootCertificate.public_key()) do
true ->
:ok
false ->
{:error, :invalid_wwdr_cert}
end
end
defp verify_wwdr_cert_policies_extension_oid(wwdr_cert) do
case find_matching_extension(wwdr_cert, @wwdr_cert_policies_extension_oid) do
nil ->
{:error, :wwdr_cert_policies_extension_oid_mismatch}
_extension ->
:ok
end
end
defp verify_itunes_cert_marker_extension_oid(itunes_cert) do
case find_matching_extension(itunes_cert, @itunes_cert_marker_extension_oid) do
nil ->
{:error, :itunes_cert_marker_extension_oid_mismatch}
_extension ->
:ok
end
end
defp verify_itunes_cert({:certificate, cert}, wwdr_cert) do
wwdr_public_key = extract_public_key(wwdr_cert)
cert = :public_key.der_encode(:Certificate, cert)
case :public_key.pkix_verify(cert, wwdr_public_key) do
true ->
:ok
false ->
{:error, :invalid_itunes_cert}
end
end
defp extract_public_key({:certificate, cert}) do
cert
|> PublicKey.certificate(:tbsCertificate)
|> PublicKey.tbs_certificate(:subjectPublicKeyInfo)
|> PublicKey.subject_public_key_info(:subjectPublicKey)
|> decode_public_key()
end
defp decode_public_key(key) do
:public_key.der_decode(:RSAPublicKey, key)
end
defp find_matching_extension({:certificate, cert}, oid) do
cert
|> PublicKey.certificate(:tbsCertificate)
|> PublicKey.tbs_certificate(:extensions)
|> Enum.find(fn ext ->
oid === PublicKey.extension(ext, :extnID)
end)
end
defp verify_signature(signer, receipt, itunes_cert) do
signature = get_signer_signature(signer)
public_key = extract_public_key(itunes_cert)
with {:ok, payload} <- Extractor.extract_payload(receipt),
true <- :public_key.verify(payload, :sha, signature, public_key) do
:ok
else
false ->
{:error, :invalid_signer_signature}
{:error, err} ->
{:error, err}
end
end
defp get_signer_signature(signer) do
{
:SignerInfo,
:siVer1,
_signer_identifier = {
:IssuerAndSerialNumber,
_issuer,
_serial_number
},
_digest_algorithm,
_authenticated_attributes,
_digest_encryption_algorithm,
encrypted_digest,
_unauthenticated_attributes
} = signer
encrypted_digest
end
defp destruct_receipt(receipt) do
{
:ContentInfo,
_content_type,
_pkcs7_content = {
:SignedData,
:sdVer1,
_digest_algorithms,
{:ContentInfo, _, _data},
{:certSet, certs},
_cert_revocation_lists,
{:siSet, [signer]}
}
} = receipt
{certs, signer}
end
end
|
lib/receipt_decoder/verifier/verifier.ex
| 0.739328 | 0.424054 |
verifier.ex
|
starcoder
|
defmodule Snitch.Tools.Helper.Order do
@moduledoc """
Helpers to insert variants and line items for handcrafted orders.
"""
@line_item %{
quantity: nil,
unit_price: nil,
product_id: nil,
order_id: nil,
inserted_at: DateTime.utc_now(),
updated_at: DateTime.utc_now()
}
@variant %{
sku: nil,
weight: Decimal.new("0.45"),
height: Decimal.new("0.15"),
depth: Decimal.new("0.1"),
width: Decimal.new("0.4"),
selling_price: nil,
shipping_category_id: nil,
inserted_at: DateTime.utc_now(),
updated_at: DateTime.utc_now(),
slug: "",
max_retail_price: nil
}
@doc """
Returns a list of variant `map`s using the `manifest`.
## Manifest schema
```
[
%{category: %ShippingCategory{}}
]
```
This result is suitable for a `Ecto.Repo.insert_all/3`
"""
@spec variants_with_manifest([map], map) :: [map]
def variants_with_manifest(manifest, context) do
variant_count = Map.get(context, :variant_count, 3)
manifest
|> Stream.with_index()
|> Stream.map(fn {%{category: sc}, index} ->
%{
@variant
| sku: "shoes-nike-#{index}",
shipping_category_id: sc.id,
selling_price: random_price(:USD, 14, 4),
slug: "product_slug-#{index}",
max_retail_price: random_price(:USD, 14, 4)
}
end)
|> Enum.take(variant_count)
end
@doc """
Returns a list of line_item `map`s after zipping `variants` and
`quanities`.
The price fields are not computed. This result is suitable for a
`Ecto.Repo.insert_all/3`
"""
@spec line_items([Variant.t()], [integer], non_neg_integer | nil) :: [map]
def line_items(variants, quantities, order_id \\ nil) do
variants
|> Stream.zip(quantities)
|> Stream.reject(fn {_, q} -> q == 0 end)
|> Enum.map(fn
{v, q} when is_map(v) ->
%{@line_item | quantity: q, variant_id: v.id, order_id: order_id}
{v, q} when is_integer(v) ->
%{@line_item | quantity: q, variant_id: v, order_id: order_id}
end)
end
@doc """
Returns a list of line_item `map`s after zipping `variants` and
`quanities` with price fields.
The price fields are not computed. This result is suitable for a
`Ecto.Repo.insert_all/3`
"""
@spec line_items_with_price([Variant.t()], [integer], non_neg_integer | nil) :: [map]
def line_items_with_price(variants, quantities, order_id \\ nil) do
variants
|> Stream.zip(quantities)
|> Stream.reject(fn {_, q} -> q == 0 end)
|> Enum.map(fn {v, q} when is_map(v) ->
%{
@line_item
| quantity: q,
product_id: v.id,
order_id: order_id,
unit_price: v.selling_price
}
end)
end
defp random_price(currency, min, delta) do
Money.new(currency, "#{:rand.uniform(delta) + min}.99")
end
end
|
apps/snitch_core/lib/core/tools/helpers/order.ex
| 0.912893 | 0.83868 |
order.ex
|
starcoder
|
defmodule Liquex.Represent do
@moduledoc """
Helper methods for maps
"""
alias Liquex.Representable
defguard is_lazy_object(value) when is_map(value) or is_list(value)
@doc """
Convert any object and deeply maps atom keys to strings
The value `deep` determines if it should eagerly represent the object. If
set to false, it will return a function for any nested objects.
Lazy values are automatically evaluated in the Liquid rendering engine.
"""
@spec represent(any, boolean) :: any
def represent(value, lazy \\ false)
def represent(struct, lazy) when is_struct(struct) do
struct
|> Map.from_struct()
|> represent(lazy)
end
def represent(value, lazy) when is_map(value) do
value
|> Enum.map(&to_string_key(&1, lazy))
|> Enum.into(%{})
end
# Walk the list and represent the keys of of any map members
def represent(value, lazy) when is_list(value), do: Enum.map(value, &lazy_represent(&1, lazy))
def represent(value, _), do: value
@spec expand(term) :: term
@doc """
Expands a previously represented object.
Useful when the represented object contains lazy fields. This can be useful
for generating JSON values from a represented object. For example, if you
had a `dump` filter that dumped a value to the page as JSON, you would use
this function so that lazy functions get represented correctly instead of
as functions.
"""
def expand(value) when is_struct(value), do: value
def expand(value) when is_function(value), do: expand(value.())
def expand(value) when is_map(value) do
value
|> Enum.map(fn {k, v} -> {k, expand(v)} end)
|> Enum.into(%{})
end
def expand(value) when is_list(value), do: Enum.map(value, &expand/1)
def expand(value), do: value
defp to_string_key({k, v}, lazy) when is_atom(k),
do: to_string_key({Atom.to_string(k), v}, lazy)
defp to_string_key({k, v}, lazy), do: {k, lazy_represent(v, lazy)}
defp lazy_represent(value, true) do
case Representable.is_lazy(value) do
true -> fn -> Liquex.Representable.represent(value, true) end
_ -> Liquex.Representable.represent(value, true)
end
end
defp lazy_represent(v, false), do: Liquex.Representable.represent(v, false)
end
|
lib/liquex/represent.ex
| 0.841533 | 0.594375 |
represent.ex
|
starcoder
|
defmodule Quarry.Sort do
@moduledoc false
require Ecto.Query
alias Quarry.{Join, From}
@sort_direction [:asc, :desc]
@spec build({Ecto.Query.t(), [Quarry.error()]}, Quarry.sort()) ::
{Ecto.Query.t(), [Qurry.error()]}
def build({query, errors}, keys, load_path \\ []) do
root_binding = From.get_root_binding(query)
schema = From.get_root_schema(query)
state = [
schema: schema,
binding: root_binding,
load_path: load_path
]
sort({query, errors}, [], keys, state)
end
defp sort(acc, join_deps, keys, state) when is_list(keys) do
Enum.reduce(
keys,
acc,
fn entry, {query, errors} ->
sort_key(entry, join_deps,
query: query,
schema: state[:schema],
binding: state[:binding],
load_path: state[:load_path],
errors: errors
)
end
)
end
defp sort(acc, join_deps, key, state),
do: sort(acc, join_deps, [key], state)
defp sort_key({dir, path}, join_deps, state), do: sort_key(path, dir, join_deps, state)
defp sort_key(path, join_deps, state), do: sort_key(path, :asc, join_deps, state)
defp sort_key([field_name], dir, join_deps, state),
do: sort_key(field_name, dir, join_deps, state)
defp sort_key([assoc | path], dir, join_deps, state) do
schema = state[:schema]
associations = schema.__schema__(:associations)
if assoc in associations do
child_schema = schema.__schema__(:association, assoc).related
state = Keyword.put(state, :schema, child_schema)
sort_key(path, dir, [assoc | join_deps], state)
else
error = build_error(assoc, join_deps, state)
{state[:query], [error | state[:errors]]}
end
end
defp sort_key(field_name, dir, join_deps, state) when is_atom(field_name) do
if field_name in state[:schema].__schema__(:fields) and dir in @sort_direction do
{query, join_binding} = Join.join_dependencies(state[:query], state[:binding], join_deps)
query = Ecto.Query.order_by(query, [{^dir, field(as(^join_binding), ^field_name)}])
{query, state[:errors]}
else
error = build_error(field_name, join_deps, state)
{state[:query], [error | state[:errors]]}
end
end
defp build_error(field, path, state) do
%{
type: :sort,
path: Enum.reverse([field | path]),
load_path: Enum.reverse(state[:load_path]),
message: "Quarry couldn't find field \"#{field}\" on Ecto schema \"#{state[:schema]}\""
}
end
end
|
lib/quarry/sort.ex
| 0.627381 | 0.409988 |
sort.ex
|
starcoder
|
defmodule Geometry.WKB.Parser do
@moduledoc false
alias Geometry.{
GeometryCollection,
GeometryCollectionM,
GeometryCollectionZ,
GeometryCollectionZM,
Hex,
LineString,
LineStringM,
LineStringZ,
LineStringZM,
MultiLineString,
MultiLineStringM,
MultiLineStringZ,
MultiLineStringZM,
MultiPoint,
MultiPointM,
MultiPointZ,
MultiPointZM,
MultiPolygon,
MultiPolygonM,
MultiPolygonZ,
MultiPolygonZM,
Point,
PointM,
PointZ,
PointZM,
Polygon,
PolygonM,
PolygonZ,
PolygonZM
}
@codes %{
0x00000001 => {Point, false},
0x20000001 => {Point, true},
0x40000001 => {PointM, false},
0x60000001 => {PointM, true},
0x80000001 => {PointZ, false},
0xA0000001 => {PointZ, true},
0xC0000001 => {PointZM, false},
0xE0000001 => {PointZM, true},
0x00000002 => {LineString, false},
0x20000002 => {LineString, true},
0x40000002 => {LineStringM, false},
0x60000002 => {LineStringM, true},
0x80000002 => {LineStringZ, false},
0xA0000002 => {LineStringZ, true},
0xC0000002 => {LineStringZM, false},
0xE0000002 => {LineStringZM, true},
0x00000003 => {Polygon, false},
0x20000003 => {Polygon, true},
0x40000003 => {PolygonM, false},
0x60000003 => {PolygonM, true},
0x80000003 => {PolygonZ, false},
0xA0000003 => {PolygonZ, true},
0xC0000003 => {PolygonZM, false},
0xE0000003 => {PolygonZM, true},
0x00000004 => {MultiPoint, false},
0x20000004 => {MultiPoint, true},
0x40000004 => {MultiPointM, false},
0x60000004 => {MultiPointM, true},
0x80000004 => {MultiPointZ, false},
0xA0000004 => {MultiPointZ, true},
0xC0000004 => {MultiPointZM, false},
0xE0000004 => {MultiPointZM, true},
0x00000005 => {MultiLineString, false},
0x20000005 => {MultiLineString, true},
0x40000005 => {MultiLineStringM, false},
0x60000005 => {MultiLineStringM, true},
0x80000005 => {MultiLineStringZ, false},
0xA0000005 => {MultiLineStringZ, true},
0xC0000005 => {MultiLineStringZM, false},
0xE0000005 => {MultiLineStringZM, true},
0x00000006 => {MultiPolygon, false},
0x20000006 => {MultiPolygon, true},
0x40000006 => {MultiPolygonM, false},
0x60000006 => {MultiPolygonM, true},
0x80000006 => {MultiPolygonZ, false},
0xA0000006 => {MultiPolygonZ, true},
0xC0000006 => {MultiPolygonZM, false},
0xE0000006 => {MultiPolygonZM, true},
0x00000007 => {GeometryCollection, false},
0x20000007 => {GeometryCollection, true},
0x40000007 => {GeometryCollectionM, false},
0x60000007 => {GeometryCollectionM, true},
0x80000007 => {GeometryCollectionZ, false},
0xA0000007 => {GeometryCollectionZ, true},
0xC0000007 => {GeometryCollectionZM, false},
0xE0000007 => {GeometryCollectionZM, true}
}
@spec parse(wkb, mode) :: {:ok, geometry | {geometry, srid}} | {:error, message, rest, offset}
when wkb: Geometry.wkb(),
mode: Geometry.mode(),
geometry: Geometry.t(),
srid: Geometry.srid(),
message: String.t(),
rest: binary(),
offset: non_neg_integer()
def parse(wkb, mode) do
with {:ok, {module, endian, srid?}, rest, offset} <-
geometry(wkb, 0, mode),
{:ok, srid, rest, offset} <- srid(rest, offset, srid?, endian, mode),
{:ok, geometry, rest, offset} <- geometry_body(module, rest, offset, endian, mode),
:ok <- eos(rest, offset) do
case srid? do
true -> {:ok, {geometry, srid}}
false -> {:ok, geometry}
end
end
end
# compile-time helpers
to_atoms = fn lists ->
Enum.map(lists, fn list -> Enum.map(list, &String.to_atom/1) end)
end
extend = fn xs, ys ->
Enum.map(ys, fn y ->
Enum.map(xs, fn x -> x <> y end)
end)
end
args = fn list ->
list |> extend.(["", "_z", "_m", "_zm"]) |> to_atoms.()
end
# combinators/parsers
defp geometry(str, offset, mode) do
with {:ok, endian, rest, offset} <- endian(str, offset, mode),
{:ok, {module, srid?}, rest, offset} <- code(rest, offset, endian, mode) do
{:ok, {module, endian, srid?}, rest, offset}
end
end
defp endian(<<"00", rest::binary()>>, offset, :hex) do
{:ok, :xdr, rest, offset + 2}
end
defp endian(<<"01", rest::binary()>>, offset, :hex) do
{:ok, :ndr, rest, offset + 2}
end
defp endian(<<got::binary-size(2), rest::binary()>>, offset, :hex) do
{:error, ~s(expected endian flag "00" or "01", got #{inspect(got)}), rest, offset}
end
defp endian(str, offset, :hex) do
{:error, ~s(expected endian flag "00" or "01"), str, offset}
end
defp endian(<<0::8, rest::binary()>>, offset, :binary) do
{:ok, :xdr, rest, offset + 1}
end
defp endian(<<1::8, rest::binary()>>, offset, :binary) do
{:ok, :ndr, rest, offset + 1}
end
defp endian(str, offset, :binary) do
{:error, "expected endian flag", str, offset}
end
defp check_endian(<<endian::binary-size(2), rest::binary()>>, offset, expected, :hex) do
case {endian, expected} do
{"00", :xdr} ->
{:ok, rest, offset + 2}
{"01", :ndr} ->
{:ok, rest, offset + 2}
{got, :ndr} ->
{:error, ~s(expected endian flag "01", got #{inspect(got)}), rest, offset}
{got, :xdr} ->
{:error, ~s(expected endian flag "00", got #{inspect(got)}), rest, offset}
end
end
defp check_endian(<<0::8, rest::binary()>>, offset, :xdr, :binary) do
{:ok, rest, offset + 1}
end
defp check_endian(<<1::8, rest::binary()>>, offset, :ndr, :binary) do
{:ok, rest, offset + 1}
end
defp check_endian(rest, offset, endian, _mode) do
{:error, "expected endian #{inspect(endian)}", rest, offset}
end
defp code(<<code::binary-size(8), rest::binary()>>, offset, endian, :hex) do
case fetch_info(code, endian) do
{:ok, info} ->
{:ok, info, rest, offset + 8}
{:error, :invalid} ->
{:error, "invalid geomtry code: #{inspect(code)}", rest, offset}
:error ->
{:error, "unknown geomtry code: #{inspect(code)}", rest, offset}
end
end
defp code(<<code::big-integer-size(32), rest::binary()>>, offset, :xdr, :binary) do
code(code, rest, offset)
end
defp code(<<code::little-integer-size(32), rest::binary()>>, offset, :ndr, :binary) do
code(code, rest, offset)
end
defp code(str, offset, _endian, _mode) do
{:error, "expected geometry code", str, offset}
end
defp code(code, rest, offset) do
case Map.fetch(@codes, code) do
{:ok, info} ->
{:ok, info, rest, offset + 4}
:error ->
{:error, "unknown geomtry code: #{inspect(code)}", rest, offset}
end
end
defp check_code(<<code::binary-size(8), rest::binary()>>, offset, endian, :hex, type) do
case fetch_info(code, endian) do
{:ok, {module, _srid?}} ->
case Macro.underscore(module) == "geometry/#{type}" do
true -> {:ok, rest, offset + 8}
false -> {:error, "unexpected code #{inspect(code)} for sub-geometry", rest, offset}
end
{:error, :invalid} ->
{:error, "invalid sub-geomtry code: #{inspect(code)}", rest, offset}
:error ->
{:error, "unknown sub-geomtry code: #{inspect(code)}", rest, offset}
end
end
defp check_code(<<code::big-integer-size(32), rest::binary()>>, offset, :xdr, :binary, type) do
check_code(code, rest, offset, type)
end
defp check_code(<<code::little-integer-size(32), rest::binary()>>, offset, :ndr, :binary, type) do
check_code(code, rest, offset, type)
end
defp check_code(str, offset, _endian, _mode, _type) do
{:error, "expected geometry code", str, offset}
end
defp check_code(code, rest, offset, type) do
case Map.fetch(@codes, code) do
{:ok, {module, _srid?}} ->
case Macro.underscore(module) == "geometry/#{type}" do
true -> {:ok, rest, offset + 4}
false -> {:error, "unexpected code #{inspect(code)} for sub-geometry", rest, offset}
end
:error ->
{:error, "unknown sub-geomtry code: #{inspect(code)}", rest, offset}
end
end
[
"point",
"line_string",
"polygon",
"multi_point",
"multi_line_string",
"multi_polygon"
]
|> Enum.flat_map(fn geometry ->
module = Macro.camelize(geometry)
[
["Elixir.Geometry.#{module}", geometry],
["Elixir.Geometry.#{module}M", "#{geometry}_m"],
["Elixir.Geometry.#{module}Z", "#{geometry}_z"],
["Elixir.Geometry.#{module}ZM", "#{geometry}_zm"]
]
end)
|> to_atoms.()
|> Enum.each(fn [module, parser] ->
defp geometry_body(unquote(module), str, offset, endian, mode) do
with {:ok, coordinates, rest, offset} <- unquote(parser)(str, offset, endian, mode) do
{:ok, unquote(module).from_coordinates(coordinates), rest, offset}
end
end
end)
["geometry_collection"]
|> Enum.flat_map(fn geometry ->
module = Macro.camelize(geometry)
[
["Elixir.Geometry.#{module}", geometry],
["Elixir.Geometry.#{module}M", "#{geometry}_m"],
["Elixir.Geometry.#{module}Z", "#{geometry}_z"],
["Elixir.Geometry.#{module}ZM", "#{geometry}_zm"]
]
end)
|> to_atoms.()
|> Enum.each(fn [module, parser] ->
defp geometry_body(unquote(module), str, offset, endian, mode) do
with {:ok, geometries, rest, offset} <- unquote(parser)(str, offset, endian, mode) do
{:ok, unquote(module).new(geometries), rest, offset}
end
end
end)
["point", "coordinate"]
|> args.()
|> Enum.each(fn [point, coordinate] ->
defp unquote(point)(str, offset, endian, mode) do
unquote(coordinate)(str, offset, endian, mode)
end
end)
["line_string", "coordinates"]
|> args.()
|> Enum.each(fn [line_string, coordinates] ->
defp unquote(line_string)(str, offset, endian, mode) do
with {:ok, length, rest, offset} <- length(str, offset, endian, mode),
{:ok, acc, rest, offset} <- unquote(coordinates)(length, rest, offset, endian, mode) do
{:ok, acc, rest, offset}
end
end
end)
["polygon", "rings"]
|> args.()
|> Enum.each(fn [polygon, rings] ->
defp unquote(polygon)(str, offset, endian, mode) do
with {:ok, length, rest, offset} <- length(str, offset, endian, mode),
{:ok, acc, rest, offset} <- unquote(rings)(length, rest, offset, endian, mode) do
{:ok, acc, rest, offset}
end
end
end)
["multi_point", "points"]
|> args.()
|> Enum.each(fn [multi_point, points] ->
defp unquote(multi_point)(str, offset, endian, mode) do
with {:ok, length, rest, offset} <- length(str, offset, endian, mode),
{:ok, acc, rest, offset} <- unquote(points)(length, rest, offset, endian, mode) do
{:ok, acc, rest, offset}
end
end
end)
["multi_line_string", "line_strings"]
|> args.()
|> Enum.each(fn [multi_line_string, line_strings] ->
defp unquote(multi_line_string)(str, offset, endian, mode) do
with {:ok, length, rest, offset} <- length(str, offset, endian, mode),
{:ok, acc, rest, offset} <- unquote(line_strings)(length, rest, offset, endian, mode) do
{:ok, acc, rest, offset}
end
end
end)
["multi_polygon", "polygons"]
|> args.()
|> Enum.each(fn [multi_polygon, polygons] ->
defp unquote(multi_polygon)(str, offset, endian, mode) do
with {:ok, length, rest, offset} <- length(str, offset, endian, mode),
{:ok, acc, rest, offset} <- unquote(polygons)(length, rest, offset, endian, mode) do
{:ok, acc, rest, offset}
end
end
end)
["geometry_collection", "geometry_collection_items"]
|> args.()
|> Enum.each(fn [geometry_collection, geometry_collection_items] ->
defp unquote(geometry_collection)(str, offset, endian, mode) do
with {:ok, length, rest, offset} <- length(str, offset, endian, mode),
{:ok, acc, rest, offset} <-
unquote(geometry_collection_items)(length, rest, offset, endian, mode) do
{:ok, acc, rest, offset}
end
end
end)
["geometry_collection_items"]
|> args.()
|> Enum.each(fn [geometry_collection_items] ->
defp unquote(geometry_collection_items)(n, str, offset, endian, mode, acc \\ [])
defp unquote(geometry_collection_items)(0, str, offset, _endian, _mode, acc) do
{:ok, acc, str, offset}
end
defp unquote(geometry_collection_items)(n, str, offset, endian, mode, acc) do
with {:ok, {module, _endian, srid?}, rest, offset} <- geometry(str, offset, mode),
{:ok, srid, rest, offset} <- srid(rest, offset, srid?, endian, mode),
{:ok, geometry, rest, offset} <- geometry_body(module, rest, offset, endian, mode) do
case srid == nil do
true ->
unquote(geometry_collection_items)(n - 1, rest, offset, endian, mode, [geometry | acc])
false ->
{:error, "unexpected SRID in sub-geometry", str, offset}
end
end
end
end)
["rings", "coordinates"]
|> args.()
|> Enum.each(fn [rings, coordinates] ->
defp unquote(rings)(n, str, offset, endian, mode, acc \\ [])
defp unquote(rings)(0, str, offset, _endian, _mode, acc) do
{:ok, Enum.reverse(acc), str, offset}
end
defp unquote(rings)(n, str, offset, endian, mode, acc) do
with {:ok, length, rest, offset} <- length(str, offset, endian, mode),
{:ok, coordinates, rest, offset} <-
unquote(coordinates)(length, rest, offset, endian, mode) do
unquote(rings)(n - 1, rest, offset, endian, mode, [coordinates | acc])
end
end
end)
["points", "point", "coordinate"]
|> args.()
|> Enum.each(fn [points, point, coordinate] ->
defp unquote(points)(n, str, offset, endian, mode, acc \\ [])
defp unquote(points)(0, str, offset, _endian, _mode, acc) do
{:ok, Enum.reverse(acc), str, offset}
end
defp unquote(points)(n, str, offset, endian, mode, acc) do
with {:ok, rest, offset} <- check_endian(str, offset, endian, mode),
{:ok, rest, offset} <- check_code(rest, offset, endian, mode, unquote(point)),
{:ok, coordinates, rest, offset} <- unquote(coordinate)(rest, offset, endian, mode) do
unquote(points)(n - 1, rest, offset, endian, mode, [coordinates | acc])
end
end
end)
["polygons", "polygon"]
|> args.()
|> Enum.each(fn [polygons, polygon] ->
defp unquote(polygons)(n, str, offset, endian, mode, acc \\ [])
defp unquote(polygons)(0, str, offset, _endian, _mode, acc) do
{:ok, Enum.reverse(acc), str, offset}
end
defp unquote(polygons)(n, str, offset, endian, mode, acc) do
with {:ok, rest, offset} <- check_endian(str, offset, endian, mode),
{:ok, rest, offset} <- check_code(rest, offset, endian, mode, unquote(polygon)),
{:ok, coordinates, rest, offset} <- unquote(polygon)(rest, offset, endian, mode) do
unquote(polygons)(n - 1, rest, offset, endian, mode, [coordinates | acc])
end
end
end)
["line_strings", "line_string"]
|> args.()
|> Enum.each(fn [line_strings, line_string] ->
defp unquote(line_strings)(n, str, offset, endian, mode, acc \\ [])
defp unquote(line_strings)(0, str, offset, _endian, _mode, acc) do
{:ok, Enum.reverse(acc), str, offset}
end
defp unquote(line_strings)(n, str, offset, endian, mode, acc) do
with {:ok, rest, offset} <- check_endian(str, offset, endian, mode),
{:ok, rest, offset} <- check_code(rest, offset, endian, mode, unquote(line_string)),
{:ok, coordinates, rest, offset} <- unquote(line_string)(rest, offset, endian, mode) do
unquote(line_strings)(n - 1, rest, offset, endian, mode, [coordinates | acc])
end
end
end)
["coordinates", "coordinate"]
|> args.()
|> Enum.each(fn [coordinates, coordinate] ->
defp unquote(coordinates)(n, str, offset, endian, mode, acc \\ [])
defp unquote(coordinates)(0, str, offset, _endian, _mode, acc) do
{:ok, Enum.reverse(acc), str, offset}
end
defp unquote(coordinates)(n, str, offset, endian, mode, acc) do
with {:ok, coordinate, rest, offset} <- unquote(coordinate)(str, offset, endian, mode) do
unquote(coordinates)(n - 1, rest, offset, endian, mode, [coordinate | acc])
end
end
end)
defp length(<<hex::binary-size(8), rest::binary()>>, offset, endian, :hex) do
case Hex.to_integer(hex, endian) do
{:ok, length} -> {:ok, length, rest, offset + 8}
:error -> {:error, "invalid length #{inspect(hex)}", rest, offset}
end
end
defp length(<<length::big-integer-size(32), rest::binary()>>, offset, :xdr, :binary) do
{:ok, length, rest, offset + 4}
end
defp length(<<length::little-integer-size(32), rest::binary()>>, offset, :ndr, :binary) do
{:ok, length, rest, offset + 4}
end
defp length(str, offset, _endian, _mode) do
{:error, "expected length, got #{inspect(str)}", str, offset}
end
defp coordinate_zm(
<<x::binary-size(16), y::binary-size(16), z::binary-size(16), m::binary-size(16),
rest::binary()>>,
offset,
endian,
:hex
) do
with {:x, {:ok, x}} <- {:x, Hex.to_float(x, endian)},
{:y, {:ok, y}} <- {:y, Hex.to_float(y, endian)},
{:z, {:ok, z}} <- {:z, Hex.to_float(z, endian)},
{:m, {:ok, m}} <- {:m, Hex.to_float(m, endian)} do
{:ok, [x, y, z, m], rest, offset + 64}
else
{:x, :error} -> {:error, "expected float, got #{inspect(x)}", rest, offset}
{:y, :error} -> {:error, "expected float, got #{inspect(y)}", rest, offset + 16}
{:z, :error} -> {:error, "expected float, got #{inspect(z)}", rest, offset + 32}
{:m, :error} -> {:error, "expected float, got #{inspect(m)}", rest, offset + 48}
end
end
defp coordinate_zm(
<<x::big-float-size(64), y::big-float-size(64), z::big-float-size(64),
m::big-float-size(64), rest::binary()>>,
offset,
:xdr,
:binary
) do
{:ok, [x, y, z, m], rest, offset + 32}
end
defp coordinate_zm(
<<x::little-float-size(64), y::little-float-size(64), z::little-float-size(64),
m::little-float-size(64), rest::binary()>>,
offset,
:ndr,
:binary
) do
{:ok, [x, y, z, m], rest, offset + 32}
end
defp coordinate_z(
<<x::binary-size(16), y::binary-size(16), z::binary-size(16), rest::binary()>>,
offset,
endian,
:hex
) do
with {:x, {:ok, x}} <- {:x, Hex.to_float(x, endian)},
{:y, {:ok, y}} <- {:y, Hex.to_float(y, endian)},
{:z, {:ok, z}} <- {:z, Hex.to_float(z, endian)} do
{:ok, [x, y, z], rest, offset + 48}
else
{:x, :error} -> {:error, "expected float, got #{inspect(x)}", rest, offset}
{:y, :error} -> {:error, "expected float, got #{inspect(y)}", rest, offset + 16}
{:z, :error} -> {:error, "expected float, got #{inspect(z)}", rest, offset + 32}
end
end
defp coordinate_z(
<<x::big-float-size(64), y::big-float-size(64), z::big-float-size(64), rest::binary()>>,
offset,
:xdr,
:binary
) do
{:ok, [x, y, z], rest, offset + 24}
end
defp coordinate_z(
<<x::little-float-size(64), y::little-float-size(64), z::little-float-size(64),
rest::binary()>>,
offset,
:ndr,
:binary
) do
{:ok, [x, y, z], rest, offset + 24}
end
defp coordinate_m(
<<x::binary-size(16), y::binary-size(16), m::binary-size(16), rest::binary()>>,
offset,
endian,
:hex
) do
with {:x, {:ok, x}} <- {:x, Hex.to_float(x, endian)},
{:y, {:ok, y}} <- {:y, Hex.to_float(y, endian)},
{:m, {:ok, m}} <- {:m, Hex.to_float(m, endian)} do
{:ok, [x, y, m], rest, offset + 48}
else
{:x, :error} -> {:error, "expected float, got #{inspect(x)}", rest, offset}
{:y, :error} -> {:error, "expected float, got #{inspect(y)}", rest, offset + 16}
{:m, :error} -> {:error, "expected float, got #{inspect(m)}", rest, offset + 32}
end
end
defp coordinate_m(
<<x::big-float-size(64), y::big-float-size(64), m::big-float-size(64), rest::binary()>>,
offset,
:xdr,
:binary
) do
{:ok, [x, y, m], rest, offset + 24}
end
defp coordinate_m(
<<x::little-float-size(64), y::little-float-size(64), m::little-float-size(64),
rest::binary()>>,
offset,
:ndr,
:binary
) do
{:ok, [x, y, m], rest, offset + 24}
end
defp coordinate(
<<x::binary-size(16), y::binary-size(16), rest::binary()>>,
offset,
endian,
:hex
) do
with {:x, {:ok, x}} <- {:x, Hex.to_float(x, endian)},
{:y, {:ok, y}} <- {:y, Hex.to_float(y, endian)} do
{:ok, [x, y], rest, offset + 32}
else
{:x, :error} -> {:error, "expected float, got #{inspect(x)}", rest, offset}
{:y, :error} -> {:error, "expected float, got #{inspect(y)}", rest, offset + 16}
end
end
defp coordinate(
<<x::big-float-size(64), y::big-float-size(64), rest::binary()>>,
offset,
:xdr,
:binary
) do
{:ok, [x, y], rest, offset + 16}
end
defp coordinate(
<<x::little-float-size(64), y::little-float-size(64), rest::binary()>>,
offset,
:ndr,
:binary
) do
{:ok, [x, y], rest, offset + 16}
end
["coordinate"]
|> args.()
|> Enum.each(fn [coordinate] ->
defp unquote(coordinate)(rest, offset, _endian, _mode) do
{:error, "invalid coordinate", rest, offset}
end
end)
defp srid(str, offset, false = _srid?, _endian, _mode), do: {:ok, nil, str, offset}
defp srid(<<srid::binary-size(8), rest::binary()>>, offset, _srid?, endian, :hex) do
case Hex.to_integer(srid, endian) do
{:ok, srid} -> {:ok, srid, rest, offset + 8}
:error -> {:error, "invalid SRID #{inspect(srid)}", rest, offset}
end
end
defp srid(<<srid::big-integer-size(32), rest::binary()>>, offset, _srid?, :xdr, :binary) do
{:ok, srid, rest, offset + 4}
end
defp srid(<<srid::little-integer-size(32), rest::binary()>>, offset, _srid?, :ndr, :binary) do
{:ok, srid, rest, offset + 4}
end
defp srid(rest, offset, _srid?, _endian, _mode) do
{:error, "expected SRID, got #{inspect(rest)}", rest, offset}
end
defp eos("", _offset), do: :ok
defp eos(eos, offset) do
case Regex.match?(~r/^[\s\n]*$/, eos) do
true ->
:ok
false ->
{:error, "expected EOS", eos, offset}
end
end
defp fetch_info(code, endian) do
case Hex.to_integer(code, endian) do
{:ok, int} -> Map.fetch(@codes, int)
:error -> {:error, :invalid}
end
end
end
|
lib/geometry/wkb/parser.ex
| 0.630571 | 0.588505 |
parser.ex
|
starcoder
|
defprotocol Contex.Scale do
@moduledoc """
Provides a common interface for scales generating plotting coordinates.
This enables Log & Linear scales, for example, to be handled exactly
the same way in plot generation code.
Example:
```
# It doesn't matter if x & y scales are log, linear or discretizing scale
x_tx_fn = Scale.domain_to_range_fn(x_scale)
y_tx_fn = Scale.domain_to_range_fn(y_scale)
points_to_plot = Enum.map(big_load_of_data, fn %{x: x, y: y}=_row ->
{x_tx_fn.(x), y_tx_fn.(y)}
end)
```
"""
@doc """
Returns a list of tick values in the domain of the scale
Typically these are used to label the tick
"""
@spec ticks_domain(t()) :: list(any())
def ticks_domain(scale)
@doc """
Returns a list of tick locations in the range of the scale
Typically these are used to plot the location of the tick
"""
@spec ticks_range(t()) :: list(number())
def ticks_range(scale)
@doc """
Returns a transform function to convert values within the domain to the
range.
Typically this function is used to calculate plotting coordinates for input data.
"""
@spec domain_to_range_fn(t()) :: fun()
def domain_to_range_fn(scale)
@doc """
Transforms a value in the domain to a plotting coordinate within the range
"""
@spec domain_to_range(t(), any()) :: number()
def domain_to_range(scale, domain_val)
@doc """
Returns the plotting range set for the scale
Note that there is not an equivalent for the domain, as the domain is specific to
the type of scale.
"""
@spec get_range(t()) :: {number(), number()}
def get_range(scale)
@doc """
Applies a plotting range set for the scale
"""
@spec set_range(t(), number(), number()) :: t()
def set_range(scale, start, finish)
@doc """
Formats a domain value according to formatting rules calculated for the scale.
For example, timescales will have formatting rules calculated based on the
overall time period being plotted. Numeric scales may calculate number of
decimal places to show based on the range of data being plotted.
"""
@spec get_formatted_tick(t(), number()) :: String.t()
def get_formatted_tick(scale, tick_val)
end
|
lib/chart/scale.ex
| 0.940024 | 0.94887 |
scale.ex
|
starcoder
|
defmodule LcdDisplay.HD44780.GPIO do
@moduledoc """
Knows how to commuticate with HD44780 type display using GPIO pins directly.
Supports the 4-bit mode only.
You can turn on/off the backlight.
## Usage
```
config = %{
pin_rs: 2,
pin_rw: 3,
pin_en: 4,
pin_d4: 23,
pin_d5: 24,
pin_d6: 25,
pin_d7: 26,
pin_led: 12,
}
# Start the LCD driver and get the initial display state.
{:ok, display} = LcdDisplay.HD44780.GPIO.start(config)
# Run a command and the display state will be updated.
{:ok, display} = LcdDisplay.HD44780.GPIO.execute(display, {:print, "Hello world"})
```
"""
use LcdDisplay.HD44780.Driver
alias LcdDisplay.GPIO, as: ParallelBus
@required_config_keys [
:pin_rs,
:pin_en,
:pin_d4,
:pin_d5,
:pin_d6,
:pin_d7
]
@optional_config_keys [:rows, :cols, :font_size, :pin_rw, :pin_led]
@type display_driver :: LcdDisplay.HD44780.Driver.t()
@typedoc """
The configuration options.
"""
@type config :: %{
required(:pin_rs) => pos_integer,
required(:pin_rw) => pos_integer,
required(:pin_en) => pos_integer,
required(:pin_d4) => pos_integer,
required(:pin_d5) => pos_integer,
required(:pin_d6) => pos_integer,
required(:pin_d7) => pos_integer,
required(:pin_led) => pos_integer,
optional(:rows) => 1..4,
optional(:cols) => 8..20,
optional(:font_size) => pos_integer
}
@doc """
Initializes the LCD driver and returns the initial display state.
"""
@impl LcdDisplay.HD44780.Driver
@spec start(config) :: {:ok, display_driver} | {:error, any}
def start(config) do
number_of_lines = if config[:rows] == 1, do: @number_of_lines_1, else: @number_of_lines_2
font_size = if config[:font_size] == "5x10", do: @font_size_5x10, else: @font_size_5x8
{:ok,
config
|> initial_state()
|> set_backlight(true)
|> initialize_display(function_set: @cmd_function_set ||| @mode_4bit ||| font_size ||| number_of_lines)}
rescue
e -> {:error, e}
end
@spec initial_state(config) :: display_driver | no_return
defp initial_state(opts) do
# Raise an error when required key is missing.
Enum.each(@required_config_keys, &Map.fetch!(opts, &1))
opts
# Ensure that the datatype is map and remove garbage keys.
|> Map.take(@required_config_keys ++ @optional_config_keys)
|> Map.merge(%{
driver_module: __MODULE__,
rows: opts[:rows] || @default_rows,
cols: opts[:cols] || @default_cols,
# Initial values for features that we can change later.
entry_mode: @cmd_entry_mode_set ||| @entry_left,
display_control: @cmd_display_control ||| @display_on
})
|> open_gpio_pins()
end
# Initializes the display for 4-bit interface. See Hitachi HD44780 datasheet page 46 for details.
@spec initialize_display(display_driver, list) :: display_driver | no_return
defp initialize_display(display, function_set: function_set) do
display
# Function set (8-bit mode; Interface is 8 bits long)
|> write_four_bits(0x03)
|> delay(5)
|> write_four_bits(0x03)
|> delay(5)
|> write_four_bits(0x03)
|> delay(1)
# Function set (4-bit mode; Interface is 8 bits long)
|> write_four_bits(0x02)
# Function set (4-bit mode; Interface is 4 bits long)
# The number of display lines and character font cannot be changed after this point.
|> write_instruction(function_set)
|> write_feature(:display_control)
|> clear()
|> write_feature(:entry_mode)
end
# Setup GPIO output pins, merge the refs to the config map.
@spec open_gpio_pins(map) :: map | no_return
defp open_gpio_pins(config) do
refs =
config
|> Enum.filter(fn {key, _} -> String.starts_with?("#{key}", "pin_") end)
|> Enum.map(fn {pin_name, pin_number} ->
{:ok, gpio_ref} = ParallelBus.open(pin_number, :output)
key = String.replace("#{pin_name}", "pin", "ref") |> String.to_atom()
{key, gpio_ref}
end)
|> Enum.into(%{})
Map.merge(config, refs)
end
@doc """
Executes the specified command and returns a new display state.
"""
@impl LcdDisplay.HD44780.Driver
def execute(display, :clear), do: {:ok, clear(display)}
def execute(display, :home), do: {:ok, home(display)}
def execute(display, {:print, text}), do: {:ok, print(display, text)}
def execute(display, {:set_cursor, row, col}), do: {:ok, set_cursor(display, row, col)}
def execute(display, {:cursor, on_off_bool}), do: {:ok, set_display_control_flag(display, @cursor_on, on_off_bool)}
def execute(display, {:blink, on_off_bool}), do: {:ok, set_display_control_flag(display, @blink_on, on_off_bool)}
def execute(display, {:display, on_off_bool}), do: {:ok, set_display_control_flag(display, @display_on, on_off_bool)}
def execute(display, {:autoscroll, on_off_bool}), do: {:ok, set_entry_mode_flag(display, @autoscroll, on_off_bool)}
def execute(display, {:text_direction, :right_to_left}), do: {:ok, set_entry_mode_flag(display, @entry_left, false)}
def execute(display, {:text_direction, :left_to_right}), do: {:ok, set_entry_mode_flag(display, @entry_left, true)}
def execute(display, {:scroll, cols}), do: {:ok, scroll(display, cols)}
def execute(display, {:right, cols}), do: {:ok, right(display, cols)}
def execute(display, {:left, cols}), do: {:ok, left(display, cols)}
def execute(display, {:backlight, on_off_bool}), do: {:ok, set_backlight(display, on_off_bool)}
def execute(_display, command), do: {:error, {:unsupported, command}}
defp clear(display), do: display |> write_instruction(@cmd_clear_display) |> delay(2)
defp home(display), do: display |> write_instruction(@cmd_return_home) |> delay(2)
defp print(display, char) when is_integer(char), do: write_data(display, char)
defp print(display, text) when is_binary(text) do
# Translates a text to a charlist (list of bytes).
text |> to_charlist() |> Enum.each(&write_data(display, &1))
display
end
# Set the DDRAM address corresponding to the specified cursor position.
@spec set_cursor(display_driver, pos_integer, pos_integer) :: display_driver
defp set_cursor(display, row, col) when row >= 0 and col >= 0 do
ddram_address = determine_ddram_address({row, col}, Map.take(display, [:rows, :cols]))
write_instruction(display, @cmd_set_ddram_address ||| ddram_address)
end
@spec set_entry_mode_flag(display_driver, byte, boolean) :: display_driver
defp set_entry_mode_flag(display, flag, on_off_bool) do
entry_mode =
if on_off_bool,
do: display.entry_mode ||| flag,
else: display.entry_mode &&& ~~~flag
write_feature(%{display | entry_mode: entry_mode}, :entry_mode)
end
@spec set_display_control_flag(display_driver, byte, boolean) :: display_driver
defp set_display_control_flag(display, flag, on_off_bool) do
display_control =
if on_off_bool,
do: display.display_control ||| flag,
else: display.display_control &&& ~~~flag
write_feature(%{display | display_control: display_control}, :display_control)
end
# Write a feature based on the display state.
@spec write_feature(display_driver, LcdDisplay.HD44780.Driver.feature()) :: display_driver
defp write_feature(display, feature_key) when is_atom(feature_key) do
display |> write_instruction(Map.fetch!(display, feature_key))
end
defp scroll(display, 0), do: display
# Scroll the entire display left
defp scroll(display, cols) when cols < 0 do
write_instruction(display, @cmd_cursor_shift_control ||| @shift_display)
scroll(display, cols + 1)
end
# Scroll the entire display right
defp scroll(display, cols) when cols > 0 do
write_instruction(display, @cmd_cursor_shift_control ||| @shift_display ||| @shift_right)
scroll(display, cols - 1)
end
# Move cursor right
defp right(display, 0), do: display
defp right(display, cols) do
write_instruction(display, @cmd_cursor_shift_control ||| @shift_right)
right(display, cols - 1)
end
# Move cursor left
defp left(display, 0), do: display
defp left(display, cols) do
write_instruction(display, @cmd_cursor_shift_control)
left(display, cols - 1)
end
@spec set_backlight(display_driver, boolean) :: display_driver
defp set_backlight(display, flag) when is_boolean(flag) do
:ok = ParallelBus.write(display.ref_led, if(flag, do: 1, else: 0))
display
end
@impl LcdDisplay.HD44780.Driver
def write_instruction(display, byte), do: write_byte(display, byte, 0)
@impl LcdDisplay.HD44780.Driver
def write_data(display, byte), do: write_byte(display, byte, 1)
@spec write_byte(display_driver, byte, 0 | 1) :: display_driver
defp write_byte(display, byte, mode) when is_integer(byte) and byte in 0..255 and mode in 0..1 do
<<first::4, second::4>> = <<byte>>
display
|> register_select(mode)
|> delay(1)
|> write_four_bits(first)
|> write_four_bits(second)
end
@spec write_four_bits(display_driver, 0..15) :: display_driver
defp write_four_bits(display, bits) when is_integer(bits) and bits in 0..15 do
<<bit1::1, bit2::1, bit3::1, bit4::1>> = <<bits::4>>
:ok = ParallelBus.write(display.ref_d4, bit4)
:ok = ParallelBus.write(display.ref_d5, bit3)
:ok = ParallelBus.write(display.ref_d6, bit2)
:ok = ParallelBus.write(display.ref_d7, bit1)
pulse_enable(display)
end
@spec register_select(display_driver, 0 | 1) :: display_driver
defp register_select(display, flag) when flag in 0..1 do
:ok = ParallelBus.write(display.ref_rs, flag)
display
end
@spec enable(display_driver, 0 | 1) :: display_driver
defp enable(display, flag) when flag in 0..1 do
:ok = ParallelBus.write(display.ref_en, flag)
display
end
@spec pulse_enable(display_driver) :: display_driver
defp pulse_enable(display) do
display
|> enable(1)
|> enable(0)
end
end
|
lib/lcd_display/driver/hd44780_gpio.ex
| 0.811527 | 0.756852 |
hd44780_gpio.ex
|
starcoder
|
defmodule Membrane.Core.Element.PadModel do
@moduledoc false
# Utility functions for veryfying and manipulating pads and their data.
alias Membrane.Element.Pad
alias Membrane.Core.Element.State
use Bunch
@type pad_data_t :: map
@type pad_info_t :: map
@type pads_t :: %{data: pad_data_t, info: pad_info_t, dynamic_currently_linking: [Pad.name_t()]}
@type unknown_pad_error_t :: {:error, {:unknown_pad, Pad.name_t()}}
@spec assert_instance(Pad.name_t(), State.t()) :: :ok | unknown_pad_error_t
def assert_instance(pad_name, state) do
if state.pads.data |> Map.has_key?(pad_name) do
:ok
else
{:error, {:unknown_pad, pad_name}}
end
end
@spec assert_instance!(Pad.name_t(), State.t()) :: :ok
def assert_instance!(pad_name, state) do
:ok = assert_instance(pad_name, state)
end
defmacro assert_data(pad_name, pattern, state) do
quote do
with {:ok, data} <- unquote(__MODULE__).get_data(unquote(pad_name), unquote(state)) do
if match?(unquote(pattern), data) do
:ok
else
{:error,
{:invalid_pad_data, name: unquote(pad_name), pattern: unquote(pattern), data: data}}
end
end
end
end
defmacro assert_data!(pad_name, pattern, state) do
quote do
:ok = unquote(__MODULE__).assert_data(unquote(pad_name), unquote(pattern), unquote(state))
end
end
@spec filter_names_by_data(constraints :: map, State.t()) :: [Pad.name_t()]
def filter_names_by_data(constraints \\ %{}, state)
def filter_names_by_data(constraints, state) when constraints == %{} do
state.pads.data |> Map.keys()
end
def filter_names_by_data(constraints, state) do
state.pads.data
|> Enum.filter(fn {_name, data} -> data |> constraints_met?(constraints) end)
|> Keyword.keys()
end
@spec filter_data(constraints :: map, State.t()) :: %{atom => pad_data_t}
def filter_data(constraints \\ %{}, state)
def filter_data(constraints, state) when constraints == %{} do
state.pads.data
end
def filter_data(constraints, state) do
state.pads.data
|> Enum.filter(fn {_name, data} -> data |> constraints_met?(constraints) end)
|> Map.new()
end
@spec get_data(Pad.name_t(), keys :: atom | [atom], State.t()) ::
{:ok, pad_data_t | any} | unknown_pad_error_t
def get_data(pad_name, keys \\ [], state) do
with :ok <- assert_instance(pad_name, state) do
state
|> Bunch.Struct.get_in(data_keys(pad_name, keys))
~> {:ok, &1}
end
end
@spec get_data!(Pad.name_t(), keys :: atom | [atom], State.t()) :: pad_data_t | any
def get_data!(pad_name, keys \\ [], state) do
{:ok, pad_data} = get_data(pad_name, keys, state)
pad_data
end
@spec set_data(Pad.name_t(), keys :: atom | [atom], State.t()) ::
State.stateful_t(:ok | unknown_pad_error_t)
def set_data(pad_name, keys \\ [], v, state) do
with {:ok, state} <- {assert_instance(pad_name, state), state} do
state
|> Bunch.Struct.put_in(data_keys(pad_name, keys), v)
~> {:ok, &1}
end
end
@spec set_data!(Pad.name_t(), keys :: atom | [atom], State.t()) ::
State.stateful_t(:ok | unknown_pad_error_t)
def set_data!(pad_name, keys \\ [], v, state) do
{:ok, state} = set_data(pad_name, keys, v, state)
state
end
@spec update_data(Pad.name_t(), keys :: atom | [atom], (data -> {:ok | error, data}), State.t()) ::
State.stateful_t(:ok | error | unknown_pad_error_t)
when data: pad_data_t | any, error: {:error, reason :: any}
def update_data(pad_name, keys \\ [], f, state) do
with {:ok, state} <- {assert_instance(pad_name, state), state},
{:ok, state} <-
state
|> Bunch.Struct.get_and_update_in(data_keys(pad_name, keys), f) do
{:ok, state}
else
{{:error, reason}, state} -> {{:error, reason}, state}
end
end
@spec update_data!(Pad.name_t(), keys :: atom | [atom], (data -> data), State.t()) :: State.t()
when data: pad_data_t | any
def update_data!(pad_name, keys \\ [], f, state) do
:ok = assert_instance(pad_name, state)
state
|> Bunch.Struct.update_in(data_keys(pad_name, keys), f)
end
@spec get_and_update_data(
Pad.name_t(),
keys :: atom | [atom],
(data -> {success | error, data}),
State.t()
) :: State.stateful_t(success | error | unknown_pad_error_t)
when data: pad_data_t | any, success: {:ok, data}, error: {:error, reason :: any}
def get_and_update_data(pad_name, keys \\ [], f, state) do
with {:ok, state} <- {assert_instance(pad_name, state), state},
{{:ok, out}, state} <-
state
|> Bunch.Struct.get_and_update_in(data_keys(pad_name, keys), f) do
{{:ok, out}, state}
else
{{:error, reason}, state} -> {{:error, reason}, state}
end
end
@spec get_and_update_data!(
Pad.name_t(),
keys :: atom | [atom],
(data -> {data, data}),
State.t()
) :: State.stateful_t(data)
when data: pad_data_t | any
def get_and_update_data!(pad_name, keys \\ [], f, state) do
:ok = assert_instance(pad_name, state)
state
|> Bunch.Struct.get_and_update_in(data_keys(pad_name, keys), f)
end
@spec pop_data(Pad.name_t(), State.t()) ::
State.stateful_t({:ok, pad_data_t | any} | unknown_pad_error_t)
def pop_data(pad_name, state) do
with {:ok, state} <- {assert_instance(pad_name, state), state} do
state
|> Bunch.Struct.pop_in(data_keys(pad_name))
~> {:ok, &1}
end
end
@spec pop_data!(Pad.name_t(), State.t()) :: State.stateful_t(pad_data_t | any)
def pop_data!(pad_name, state) do
{{:ok, pad_data}, state} = pop_data(pad_name, state)
{pad_data, state}
end
@spec delete_data(Pad.name_t(), State.t()) :: State.stateful_t(:ok | unknown_pad_error_t)
def delete_data(pad_name, state) do
with {:ok, {_out, state}} <- pop_data(pad_name, state) do
{:ok, state}
end
end
@spec delete_data!(Pad.name_t(), State.t()) :: State.t()
def delete_data!(pad_name, state) do
{:ok, state} = delete_data(pad_name, state)
state
end
@spec constraints_met?(pad_data_t, map) :: boolean
defp constraints_met?(data, constraints) do
constraints |> Enum.all?(fn {k, v} -> data[k] === v end)
end
@spec data_keys(Pad.name_t(), keys :: atom | [atom]) :: [atom]
defp data_keys(pad_name, keys \\ []) do
[:pads, :data, pad_name | Bunch.listify(keys)]
end
end
|
lib/membrane/core/element/pad_model.ex
| 0.861844 | 0.582461 |
pad_model.ex
|
starcoder
|
defmodule Aecore.Oracle.OracleStateTree do
@moduledoc """
Top level oracle state tree.
"""
alias Aecore.Chain.{Chainstate, Identifier}
alias Aecore.Oracle.{Oracle, OracleQuery}
alias Aecore.Oracle.Tx.OracleQueryTx
alias Aeutil.{PatriciaMerkleTree, Serialization}
alias MerklePatriciaTree.Trie
@typedoc "Hash of the tree"
@type hash :: binary()
@typedoc "Structure that holds Oracles Tree and Oracles Cache Tree"
@type oracles_state :: %{oracle_tree: Trie.t(), oracle_cache_tree: Trie.t()}
@dummy_val <<0>>
@spec init_empty() :: oracles_state()
def init_empty do
%{
oracle_tree: PatriciaMerkleTree.new(:oracles),
oracle_cache_tree: PatriciaMerkleTree.new(:oracles_cache)
}
end
@spec prune(Chainstate.t(), non_neg_integer()) :: Chainstate.t()
def prune(chainstate, block_height) do
{new_oracles_state, new_accounts_state} =
initialize_deletion({chainstate.oracles, chainstate.accounts}, block_height - 1)
%{chainstate | oracles: new_oracles_state, accounts: new_accounts_state}
end
@spec enter_oracle(oracles_state(), Oracle.t()) :: oracles_state()
def enter_oracle(oracles_state, oracle) do
add_oracle(oracles_state, oracle, :enter)
end
@spec insert_oracle(oracles_state(), Oracle.t()) :: oracles_state()
def insert_oracle(oracles_state, oracle) do
add_oracle(oracles_state, oracle, :insert)
end
@spec get_oracle(oracles_state(), binary()) :: Oracle.t()
def get_oracle(oracles_state, key) do
get(oracles_state.oracle_tree, key)
end
@spec exists_oracle?(oracles_state(), binary()) :: boolean()
def exists_oracle?(oracles_state, key) do
exists?(oracles_state, key, :oracle)
end
@spec enter_query(oracles_state(), OracleQuery.t()) :: oracles_state()
def enter_query(oracles_state, query) do
add_query(oracles_state, query, :enter)
end
@spec insert_query(oracles_state(), OracleQuery.t()) :: oracles_state()
def insert_query(oracles_state, query) do
add_query(oracles_state, query, :insert)
end
@spec get_query(oracles_state(), binary()) :: OracleQuery.t()
def get_query(oracles_state, key) do
get(oracles_state.oracle_tree, key)
end
@spec exists_query?(oracles_state(), binary()) :: boolean()
def exists_query?(oracles_state, key) do
exists?(oracles_state, key, :oracle_query)
end
@spec root_hash(oracles_state()) :: hash()
def root_hash(oracles_state) do
PatriciaMerkleTree.root_hash(oracles_state.oracle_tree)
end
defp initialize_deletion({oracles_state, _accounts_state} = trees, expires) do
oracles_state.oracle_cache_tree
|> PatriciaMerkleTree.all_keys()
|> Enum.reduce(trees, fn cache_key_encoded, new_trees_state ->
cache_key_encoded
|> Serialization.cache_key_decode()
|> filter_expired(expires, cache_key_encoded, new_trees_state)
end)
end
defp filter_expired({expires, data}, expires, cache_key_encoded, trees) do
{new_oracles_state, new_accounts_state} = delete_expired(data, trees)
{
%{
new_oracles_state
| oracle_cache_tree: delete(new_oracles_state.oracle_cache_tree, cache_key_encoded)
},
new_accounts_state
}
end
defp filter_expired(_, _, _, trees), do: trees
defp delete_expired({:oracle, oracle_id}, {oracles_state, accounts_state}) do
{
Map.put(oracles_state, :oracle_tree, delete(oracles_state.oracle_tree, oracle_id.value)),
accounts_state
}
end
defp delete_expired({:query, oracle_id, id}, {oracles_state, accounts_state}) do
query_id = oracle_id <> id
query = get_query(oracles_state, query_id)
new_accounts_state = Oracle.refund_sender(query, accounts_state)
{
%{oracles_state | oracle_tree: delete(oracles_state.oracle_tree, query_id)},
new_accounts_state
}
end
defp add_oracle(oracles_state, oracle, how) do
id = oracle.owner
expires = oracle.expires
serialized = Oracle.rlp_encode(oracle)
new_oracle_tree =
case how do
:insert ->
insert(oracles_state.oracle_tree, id.value, serialized)
:enter ->
enter(oracles_state.oracle_tree, id.value, serialized)
end
new_oracle_cache_tree =
%{oracles_state | oracle_tree: new_oracle_tree}
|> init_expired_cache_key_removal()
|> cache_push({:oracle, id}, expires)
%{oracle_tree: new_oracle_tree, oracle_cache_tree: new_oracle_cache_tree}
end
defp add_query(tree, query, how) do
oracle_id = query.oracle_address
id =
OracleQueryTx.id(
query.sender_address,
query.sender_nonce,
oracle_id
)
tree_id = oracle_id <> id
expires = query.expires
serialized = OracleQuery.rlp_encode(query)
new_oracle_tree =
case how do
:insert ->
insert(tree.oracle_tree, tree_id, serialized)
:enter ->
enter(tree.oracle_tree, tree_id, serialized)
end
new_oracle_cache_tree =
%{tree | oracle_tree: new_oracle_tree}
|> init_expired_cache_key_removal()
|> cache_push({:query, oracle_id, id}, expires)
%{oracle_tree: new_oracle_tree, oracle_cache_tree: new_oracle_cache_tree}
end
defp insert(tree, key, value) do
PatriciaMerkleTree.enter(tree, key, value)
end
defp enter(tree, key, value) do
PatriciaMerkleTree.enter(tree, key, value)
end
defp delete(tree, key) do
PatriciaMerkleTree.delete(tree, key)
end
defp exists?(oracles_state, key, where) do
oracles_state
|> which_tree(where)
|> get(key) !== :none
end
defp get(tree, key) do
case PatriciaMerkleTree.lookup(tree, key) do
{:ok, serialized} ->
{:ok, deserialized} = Serialization.rlp_decode_anything(serialized)
case deserialized do
%Oracle{} ->
%{deserialized | owner: Identifier.create_identity(key, :oracle)}
_ ->
deserialized
end
_ ->
:none
end
end
defp which_tree(oracles_state, :oracle), do: oracles_state.oracle_tree
defp which_tree(oracles_state, :oracle_query), do: oracles_state.oracle_tree
defp which_tree(oracles_state, _where), do: oracles_state.oracle_tree
defp cache_push(oracle_cache_tree, key, expires) do
encoded = Serialization.cache_key_encode(key, expires)
enter(oracle_cache_tree, encoded, @dummy_val)
end
defp init_expired_cache_key_removal(oracles_state) do
%{oracle_cache_tree: cache_tree} =
oracles_state.oracle_cache_tree
|> PatriciaMerkleTree.all_keys()
|> Enum.reduce(oracles_state, fn key, new_state ->
new_cache_tree =
key
|> Serialization.cache_key_decode()
|> remove_expired_cache_key(key, new_state)
%{new_state | oracle_cache_tree: new_cache_tree}
end)
cache_tree
end
defp remove_expired_cache_key({exp, data}, expired_cache_key, oracles_state) do
record_key = extract_record_key(data)
record = get(oracles_state.oracle_tree, record_key)
if record.expires > exp do
delete(oracles_state.oracle_cache_tree, expired_cache_key)
else
oracles_state.oracle_cache_tree
end
end
defp extract_record_key({:oracle, id}), do: id.value
defp extract_record_key({:query, oracle_id, id}), do: oracle_id <> id
end
|
apps/aecore/lib/aecore/oracle/oracle_state_tree.ex
| 0.829871 | 0.449997 |
oracle_state_tree.ex
|
starcoder
|
defmodule ExLTTB do
@moduledoc """
Documentation for ExLTTB.
"""
alias ExLTTB.SampleUtils
@doc """
Downsamples a sample list using [LTTB](https://skemman.is/bitstream/1946/15343/3/SS_MSthesis.pdf).
## Arguments
* `sample_list`: a `List` of samples. These can have any representation provided that access functions are provided (see Options). The samples are assumed to be sorted by the `x` coordinate.
* `threshold`: the number of required output samples. Must be >= 0.
* `opts`: a keyword list of options.
## Options
* `sample_to_x_fun`: a function that takes as argument a sample and returns its x coordinate. Defaults to `sample[:x]`
* `sample_to_y_fun`: a function that takes as argument a sample and returns its y coordinate. Defaults to `sample[:y]`
* `xy_to_sample_fun`: a function that takes as argument `x` and `y` and returns a sample with these coordinates. Defaults to `%{x: x, y: y}`
## Return
* `sample_list`: a downsampled list of samples.
"""
def downsample_to(sample_list, threshold, opts \\ [])
def downsample_to(sample_list, threshold, _opts)
when (threshold >= 0 and threshold < 2) or length(sample_list) <= 2 do
Enum.take(sample_list, threshold)
end
def downsample_to([first_sample | _tail] = sample_list, threshold, _opts) when threshold == 2 do
[first_sample, List.last(sample_list)]
end
def downsample_to(sample_list, threshold, _opts) when threshold > length(sample_list) do
sample_list
end
def downsample_to(sample_list, threshold, opts) do
make_buckets(sample_list, threshold)
|> select_samples(opts)
end
defp make_buckets(sample_list, buckets_number) when buckets_number > length(sample_list) do
Enum.map(sample_list, fn el -> [el] end)
end
defp make_buckets([first_sample, second_sample | tail] = sample_list, buckets_number) do
# We subtract 2 since the first and last buckets are fixed,
# containing the first and last sample
avg = (length(sample_list) - 2) / (buckets_number - 2)
# The acc is populated from right to left and reversed at the end
do_make_buckets(tail, 1, avg, avg, [[second_sample], [first_sample]])
end
defp do_make_buckets([head | []], _current_index, _avg, _avg_acc, buckets_acc) do
Enum.reverse([[head] | buckets_acc])
end
defp do_make_buckets(
[head | tail],
current_index,
avg,
avg_acc,
[bucket_head | bucket_tail] = buckets_acc
) do
next_index = current_index + 1
if current_index > avg_acc do
do_make_buckets(tail, next_index, avg, avg_acc + avg, [[head] | buckets_acc])
else
do_make_buckets(tail, next_index, avg, avg_acc, [[head | bucket_head] | bucket_tail])
end
end
defp select_samples([[first_sample] | tail] = _buckets, opts) do
do_select_samples(tail, [first_sample], opts)
end
defp do_select_samples([[last_sample] | []], acc, _opts) do
Enum.reverse([last_sample | acc])
end
defp do_select_samples([candidates, next_bucket | tail], [prev_sample | _acc_tail] = acc, opts) do
next_sample = SampleUtils.average_sample(next_bucket, opts)
[initial_candidate | _tail] = candidates
initial_area = SampleUtils.triangle_area(prev_sample, initial_candidate, next_sample, opts)
{selected_sample, _area} =
Enum.reduce(candidates, {initial_candidate, initial_area}, fn candidate_sample,
{best_sample, best_area} ->
candidate_area =
SampleUtils.triangle_area(prev_sample, candidate_sample, next_sample, opts)
if candidate_area > best_area do
{candidate_sample, candidate_area}
else
{best_sample, best_area}
end
end)
do_select_samples([next_bucket | tail], [selected_sample | acc], opts)
end
end
|
lib/ex_lttb.ex
| 0.881239 | 0.784979 |
ex_lttb.ex
|
starcoder
|
defmodule ExUnitSpan.Track do
@moduledoc false
defstruct [:lanes, :free_lanes, :started_at]
def from_events(events) do
Enum.reduce(events, %__MODULE__{}, fn event, track -> build(track, event) end)
end
defp build(%__MODULE__{}, {ts, {:suite_started, _}}) do
%__MODULE__{lanes: %{}, free_lanes: %{}, started_at: ts}
end
defp build(
%__MODULE__{lanes: lanes, free_lanes: free_lanes},
{_ts, {:suite_finished, _}}
)
when map_size(lanes) == 0 do
Enum.to_list(free_lanes)
|> Enum.sort_by(fn {lane_id, _} -> lane_id end)
|> Enum.map(fn {_, lane} -> Enum.reverse(lane) end)
end
defp build(
%__MODULE__{lanes: lanes, free_lanes: free_lanes} = track,
{ts, {:module_started, test_module}}
) do
parent = %{name: test_module.name, children: [], started_at: ts - track.started_at}
if Enum.empty?(free_lanes) do
lanes = Map.put(lanes, map_size(lanes), [parent])
%{track | lanes: lanes}
else
lane_id = Enum.min(Map.keys(free_lanes))
{lane, free_lanes} = Map.pop(free_lanes, lane_id)
lanes = Map.put(lanes, lane_id, [parent | lane])
%{track | lanes: lanes, free_lanes: free_lanes}
end
end
defp build(
%__MODULE__{lanes: lanes, free_lanes: free_lanes} = track,
{ts, {:module_finished, test_module}}
) do
{lane_id, lane} =
Enum.find(lanes, fn {_, [parent | _]} -> parent.name == test_module.name end)
[parent | rest] = lane
parent =
Map.put(parent, :finished_at, ts - track.started_at)
|> Map.update!(:children, &Enum.reverse/1)
free_lanes = Map.put(free_lanes, lane_id, [parent | rest])
{_, lanes} = Map.pop(lanes, lane_id)
%{track | lanes: lanes, free_lanes: free_lanes}
end
defp build(
%__MODULE__{lanes: lanes} = track,
{ts, {:test_started, test}}
) do
{lane_id, [parent | rest]} =
Enum.find(lanes, fn {_, [parent | _]} -> parent.name == test.module end)
child = %{name: test.name, started_at: ts - track.started_at}
parent = %{parent | children: [child | parent.children]}
lanes = Map.put(lanes, lane_id, [parent | rest])
%{track | lanes: lanes}
end
defp build(
%__MODULE__{lanes: lanes} = track,
{ts, {:test_finished, test}}
) do
{lane_id, [parent | rest]} =
Enum.find(lanes, fn {_, [parent | _]} -> parent.name == test.module end)
[child | rest_children] = parent.children
child = Map.put(child, :finished_at, ts - track.started_at)
parent = %{parent | children: [child | rest_children]}
lanes = Map.put(lanes, lane_id, [parent | rest])
%{track | lanes: lanes}
end
end
|
lib/ex_unit_span/track.ex
| 0.775392 | 0.436202 |
track.ex
|
starcoder
|
defmodule RMQ.RPC do
@moduledoc """
RPC via RabbitMQ.
In short, it's a `GenServer` which implements a publisher and a consumer at once.
You can read more about how this works in the
[tutorial](https://www.rabbitmq.com/tutorials/tutorial-six-python.html).
## Configuration
* `:connection` - the connection module which implements `RMQ.Connection` behaviour;
* `:queue` - the queue name to which the module will be subscribed for consuming responses.
Also can be a tuple `{queue, options}`. See the options for `AMQP.Queue.declare/3`.
Defaults to `""` which means the broker will assign a name to the newly created queue by itself;
* `:exchange` - the exchange name to which `:queue` will be bound.
Please make sure the exchange exist. Defaults to `""` - the default exchange;
* `:consumer_tag` - a consumer tag for `:queue`. Defaults to the current module name;
* `:publishing_options` - any valid options for `AMQP.Basic.publish/5` except
`reply_to`, `correlation_id` - these will be set automatically and cannot be changed.
Defaults to `[]`;
* `:reconnect_interval` - a reconnect interval in milliseconds. It can be also a function that
accepts the current connection attempt as a number and returns a new interval.
Defaults to `5000`;
* `:filter_parameters` - a list of parameters that may contain sensitive data and have
to be filtered out when logging. Defaults to `["password"]`.
## Example
Application 1:
defmodule MyApp.RemoteResource do
use RMQ.RPC, publishing_options: [app_id: "MyApp"]
def find_by_id(id) do
call("remote-resource-finder", %{id: id})
end
end
Application 2:
defmodule MyOtherApp.Consumer do
use RMQ.Consumer, queue: "remote-resource-finder"
@impl RMQ.Consumer
def consume(chan, payload, meta) do
response =
payload
|> Jason.decode!()
|> Map.fetch!("id")
|> MyOtherApp.Resource.get()
|> Jason.encode!()
reply(chan, meta, response)
ack(chan, meta)
end
end
"""
use RMQ.Logger
import RMQ.Utils
@defaults [
connection: RMQ.Connection,
queue: "",
exchange: "",
publishing_options: [],
reconnect_interval: 5000,
filter_parameters: ["password"]
]
@doc """
A callback for dynamic configuration.
Will be called before `c:setup_queue/2`.
"""
@callback config() :: keyword()
@doc """
Does all the job on preparing the queue.
Whenever you need full control over configuring the queue you can implement this callback and
use `AMQP` library directly.
See `setup_queue/2` for the default implementation.
"""
@callback setup_queue(chan :: AMQP.Channel.t(), config :: keyword()) :: binary()
@doc false
def start_link(module, opts) do
GenServer.start_link(module, module, Keyword.put_new(opts, :name, module))
end
@doc """
Performs a call against the given module.
Here `options` is a keyword list which will be merged into `:publishing_options` during the call
and `timeout` is the timeout in milliseconds for the inner GenServer call.
The payload will be encoded by using `Jason.encode!/1`.
"""
@spec call(
module :: module(),
queue :: binary(),
payload :: any(),
options :: keyword(),
timeout :: integer()
) :: {:ok, any()} | {:error, :not_connected} | {:error, :timeout} | {:error, any()}
def call(module, queue, payload \\ %{}, options \\ [], timeout \\ 5000) do
GenServer.call(module, {:publish, queue, payload, options}, timeout)
catch
:exit, {:timeout, _} -> {:error, :timeout}
end
@doc """
The default implementation for `c:setup_queue/2` callback.
"""
@spec setup_queue(chan :: AMQP.Channel.t(), config :: keyword()) :: binary()
def setup_queue(chan, conf) do
{q, opts} = normalize_queue(conf[:queue])
{:ok, %{queue: queue}} =
AMQP.Queue.declare(chan, q, Keyword.merge([exclusive: true, auto_delete: true], opts))
unless conf[:exchange] == "" do
:ok = AMQP.Queue.bind(chan, queue, conf[:exchange], routing_key: queue)
end
{:ok, _} = AMQP.Basic.consume(chan, queue, nil, consumer_tag: conf[:consumer_tag])
queue
end
@doc false
def init(_module, _arg) do
Process.flag(:trap_exit, true)
send(self(), {:init, 1})
{:ok, %{chan: nil, queue: nil, pids: %{}, config: %{}}}
end
@doc false
def handle_call(_module, {:publish, _queue, _payload, _options}, _from, %{chan: nil} = state) do
{:reply, {:error, :not_connected}, state}
end
def handle_call(module, {:publish, queue, payload, options}, from, %{config: config} = state) do
correlation_id = UUID.uuid1()
options =
config[:publishing_options]
|> Keyword.merge(options)
|> Keyword.put(:reply_to, state.queue)
|> Keyword.put(:correlation_id, correlation_id)
log_debug(module, [
"Publishing >>> ",
"queue=#{inspect(queue)} ",
"payload=#{inspect(filter_values(payload, config[:filter_parameters]))}"
])
payload = encode_message(payload)
case AMQP.Basic.publish(state.chan, config[:exchange], queue, payload, options) do
:ok -> {:noreply, put_in(state.pids[correlation_id], from)}
{:error, reason} -> {:reply, {:error, reason}, state}
end
end
@doc false
def handle_info(module, {:init, attempt}, state) do
config = module_config(module)
with {:ok, conn} <- config[:connection].get_connection(),
{:ok, chan} <- AMQP.Channel.open(conn) do
Process.monitor(chan.pid)
queue = module.setup_queue(chan, config)
log_info("Ready")
{:noreply, %{state | chan: chan, queue: queue, config: config}}
else
error ->
time = reconnect_interval(config[:reconnect_interval], attempt)
log_error("No connection: #{inspect(error)}. Retrying in #{time}ms")
Process.send_after(self(), {:init, attempt + 1}, time)
{:noreply, %{state | config: config}}
end
end
# Confirmation sent by the broker after registering this process as a consumer
def handle_info(_module, {:basic_consume_ok, _meta}, state) do
{:noreply, state}
end
# Sent by the broker when the consumer is unexpectedly cancelled (such as after a queue deletion)
def handle_info(_module, {:basic_cancel, _meta}, state) do
{:stop, :normal, state}
end
# Confirmation sent by the broker to the consumer process after a Basic.cancel
def handle_info(_module, {:basic_cancel_ok, _meta}, state) do
{:noreply, state}
end
def handle_info(module, {:basic_deliver, payload, meta}, %{config: config} = state) do
{pid, state} = pop_in(state.pids[meta.correlation_id])
unless is_nil(pid) do
payload = decode_message(payload)
log_debug(module, [
"Consuming <<< ",
"payload=#{inspect(filter_values(payload, config[:filter_parameters]))}"
])
GenServer.reply(pid, {:ok, payload})
AMQP.Basic.ack(state.chan, meta.delivery_tag)
end
{:noreply, state}
end
def handle_info(module, {:DOWN, _ref, :process, _pid, reason}, state) do
log_error(module, "Connection lost: #{inspect(reason)}. Reconnecting...")
send(self(), {:init, 1})
{:noreply, %{state | chan: nil}}
end
def handle_info(module, :shutdown = reason, state) do
terminate(module, reason, state)
{:noreply, state}
end
@doc false
def terminate(_module, _reason, %{chan: chan}) do
close_channel(chan)
end
defp module_config(module) do
@defaults
|> Keyword.merge(module.config())
|> Keyword.put_new(:consumer_tag, to_string(module))
end
defmacro __using__(opts \\ []) do
quote location: :keep do
use GenServer
@behaviour RMQ.RPC
@config unquote(opts)
def start_link(opts), do: RMQ.RPC.start_link(__MODULE__, opts)
def call(queue, payload \\ %{}, options \\ [], timeout \\ 5000) do
RMQ.RPC.call(__MODULE__, queue, payload, options, timeout)
end
@impl RMQ.RPC
def config, do: @config
@impl RMQ.RPC
def setup_queue(chan, config), do: RMQ.RPC.setup_queue(chan, config)
@impl GenServer
def init(arg), do: RMQ.RPC.init(__MODULE__, arg)
@impl GenServer
def handle_call(msg, from, state), do: RMQ.RPC.handle_call(__MODULE__, msg, from, state)
@impl GenServer
def handle_info(msg, state), do: RMQ.RPC.handle_info(__MODULE__, msg, state)
@impl GenServer
def terminate(reason, state), do: RMQ.RPC.terminate(__MODULE__, reason, state)
defoverridable config: 0, setup_queue: 2
end
end
end
|
lib/rmq/rpc.ex
| 0.853699 | 0.642096 |
rpc.ex
|
starcoder
|
defmodule Ockam.Router.Protocol.Encoding.Default.Codegen do
alias Ockam.Router.Protocol.Encoding.Default.Encode
alias Ockam.Router.Protocol.Encoding.Default.Decode
def build_kv_iodata(kv, encode_opts, encode_args) do
kv
|> Enum.map(&encode_pair(&1, encode_opts, encode_args))
|> List.flatten()
|> collapse_static()
end
def build_constant_iodata(kv, schema, encode_args) do
Enum.map(kv, &encode_value(&1, schema, encode_args))
end
def build_decoder(_module, value, input, [], _decode_args) do
quote(do: {:ok, unquote(value), unquote(input)})
end
def build_decoder(module, value, input, schema, decode_args) do
decodes = build_decoder(module, value, input, schema, decode_args, [])
quote do
with unquote_splicing(decodes) do
{:ok, unquote(value), unquote(input)}
end
end
end
defp build_decoder(_module, _value, _input, [], _decode_args, acc), do: Enum.reverse(acc)
defp build_decoder(module, value, input, [{key, {type, default}} | schema], decode_args, acc) do
acc = build_value_decoder(module, value, input, key, type, default, decode_args, acc)
build_decoder(module, value, input, schema, decode_args, acc)
end
defp build_decoder(module, value, input, [{key, type} | schema], decode_args, acc) do
acc = build_value_decoder(module, value, input, key, type, nil, decode_args, acc)
build_decoder(module, value, input, schema, decode_args, acc)
end
defp build_value_decoder(_module, value, input, key, type, _default, decode_args, acc) do
binding = {key, [generated: true], __MODULE__}
bound =
quote do
{:ok, unquote(binding), unquote(input)} <-
unquote(Decode).decode(
unquote(type),
unquote_splicing(decode_args)
)
end
applied =
quote do
unquote(value) = Map.put(unquote(value), unquote(key), unquote(binding))
end
[applied, bound | acc]
end
defp encode_pair({key, value}, encode_opts, encode_args) do
key = IO.iodata_to_binary(Encode.atom(key, encode_opts))
[key, quote(do: unquote(Encode).encode_value(unquote(value), unquote_splicing(encode_args)))]
end
defp encode_value({key, value}, schema, encode_args) do
type = Keyword.fetch!(schema, key)
case type do
:i1 ->
quote(do: {:ok, unquote(Encode).i1(unquote(value), unquote_splicing(encode_args))})
_other ->
quote(do: unquote(Encode).encode_value(unquote(value), unquote_splicing(encode_args)))
end
end
defp collapse_static([bin1, bin2 | rest]) when is_binary(bin1) and is_binary(bin2) do
collapse_static([bin1 <> bin2 | rest])
end
defp collapse_static([other | rest]) do
[other | collapse_static(rest)]
end
defp collapse_static([]) do
[]
end
end
|
implementations/elixir/lib/router/protocol/encoding/default/codegen.ex
| 0.57069 | 0.501282 |
codegen.ex
|
starcoder
|
defmodule AdventOfCode.Day08 do
@type mapping :: %{required(String.t()) => String.t()}
@type line :: {[String.t()], [String.t()]}
@spec part1([binary()]) :: integer()
def part1(args) do
parse_args(args)
|> Enum.flat_map(&elem(&1, 1))
|> Enum.filter(&Enum.member?([2, 3, 4, 7], String.length(&1)))
|> Enum.count()
end
@spec part2([binary()]) :: integer()
def part2(args) do
parse_args(args)
|> Enum.map(&decode_display/1)
|> Enum.sum()
end
# Solves the misconfiguration and returns the corrected displayed number
@spec decode_display(line()) :: integer()
defp decode_display({test_signals, displayed_digits}) do
displayed_digits
|> Enum.map(&correct_display(&1, segment_correction_map(test_signals)))
|> Enum.map(&decode/1)
|> Integer.undigits()
end
# Generate a map that takes an (falsely) illuminated segment, giving
# the segment that should be correctly illuminated, by analysing
# the segment illumination frequencies during the test signals
@spec segment_correction_map([String.t()]) :: mapping()
defp segment_correction_map(test_signals) do
segment_graphemes = Enum.flat_map(test_signals, &String.graphemes/1)
four = Enum.find(test_signals, &(String.length(&1) == 4))
seven = Enum.find(test_signals, &(String.length(&1) == 3))
# By adding two fours and two sevens, the illumination frequencies
# during the test signals become independent from the rest.
# The correct segment letter can then be mapped to the falsely illuminated segment.
frequency_offset = [four, four, seven, seven] |> Enum.flat_map(&String.graphemes/1)
independent_frequencies = Enum.frequencies(frequency_offset ++ segment_graphemes)
independent_frequencies
|> Map.to_list()
|> Enum.map(fn {char, frequency} -> {char, frequency_to_segment(frequency)} end)
|> Map.new()
end
# Maps the offset independent frequencies to the correct segment letter
@spec frequency_to_segment(integer()) :: String.t()
defp frequency_to_segment(frequency) do
case frequency do
10 -> "a"
8 -> "b"
12 -> "c"
9 -> "d"
4 -> "e"
13 -> "f"
7 -> "g"
end
end
# Applies the correction map to the signal and returns an
# alphabetically sorted string of illuminated segments
@spec correct_display(String.t(), mapping()) :: String.t()
defp correct_display(segments, correction_map) do
String.graphemes(segments)
|> Enum.map(&Map.get(correction_map, &1))
|> Enum.sort()
|> Enum.join()
end
# Returns the numerical number that corresponds to a given
# set of illuminated segments (in alphabetical order)
@spec decode(String.t()) :: integer()
defp decode(segments) do
case segments do
"abcefg" -> 0
"cf" -> 1
"acdeg" -> 2
"acdfg" -> 3
"bcdf" -> 4
"abdfg" -> 5
"abdefg" -> 6
"acf" -> 7
"abcdefg" -> 8
"abcdfg" -> 9
end
end
@spec parse_args([binary()]) :: [line()]
defp parse_args(args), do: Enum.map(args, &parse_line/1)
@spec parse_line(binary()) :: line()
defp parse_line(line) do
String.split(line, " | ") |> Enum.map(&String.split(&1, " ")) |> List.to_tuple()
end
end
|
lib/advent_of_code/day_08.ex
| 0.886414 | 0.483039 |
day_08.ex
|
starcoder
|
defmodule PassiveSupport.Integer do
@moduledoc """
Functions and guards for working with integers
"""
import Integer, only: [is_odd: 1, is_even: 1]
@doc """
Qualifies if `integer` is an integer less than 0.
"""
defguard is_negative(integer)
when is_integer(integer)
and integer < 0
@doc """
Qualifies if `integer` is a natural number or 0.
"""
defguard is_nonnegative(integer)
when is_integer(integer)
and integer >= 0
@doc """
Qualifies if `integer` is an integer greater than 0.
(adhering to the mathematical principle that 0 is neither positive nor negative)
"""
defguard is_positive(integer)
when is_integer(integer)
and integer > 0
@doc """
Returns the quotient and the remainder of `dividend ÷ divisor`
## Examples
iex> remdiv(3, 2)
{1, 1}
iex> remdiv(-325, 60)
{-5, -25}
iex> remdiv(11, 3)
{3, 2}
"""
@spec remdiv(integer, integer) :: {integer, integer}
def remdiv(dividend, divisor) when is_integer(dividend) and is_integer(divisor),
do: {div(dividend, divisor), rem(dividend, divisor)}
@doc ~S"""
Arbitrary-precision factorial.
## Examples
iex> factorial(5)
120
iex> factorial(6)
720
iex> factorial(-6)
-720
iex> factorial(1)
1
"""
@spec factorial(integer) :: integer
def factorial(0), do: 1
def factorial(integer) when is_negative(integer),
do: -factorial(-integer)
def factorial(integer) when is_integer(integer),
do: 1..integer |> Enum.reduce(fn int, product -> product * int end)
import Bitwise
# Derived from https://stackoverflow.com/questions/32024156/how-do-i-raise-a-number-to-a-power-in-elixir#answer-32030190
@doc ~S"""
Arbitrary-precision exponentiation.
Will be deprecated by Elixir [v1.12.0](https://github.com/elixir-lang/elixir/commit/b11a119f52c882e2ab0f35040ef4a4b4e9d23065)
## Examples
iex> exponential(2, 10)
1024
iex> exponential(3, 3)
27
iex> exponential(2, 100)
1267650600228229401496703205376
iex> exponential(5, -3)
0.008
iex> exponential(9832, 0)
1
iex> exponential(0, 2)
0
iex> exponential(0, 0)
1
"""
@spec exponential(integer, integer) :: number
def exponential(_base, 0), do: 1
def exponential(0, _exponent), do: 0
def exponential(base, 1), do: base
def exponential(base, exponent) when is_negative(exponent), do: 1 / exponential(base, -exponent)
def exponential(base, exponent) do
derivation(base, exponent)
end
defp derivation(product, primer \\ 1, power)
defp derivation(product, primer, 1), do: product * primer
defp derivation(product, primer, power) when is_odd(power),
do: derivation(product * product, primer * product, (power >>> 1))
defp derivation(product, primer, power) when is_even(power),
do: derivation(product * product, primer, (power >>> 1))
@doc """
Converts an integer to a string, with a `separator` (default `","`)
inserted, starting in from the right side of the number, spaced apart
by the number of digits specified by `spacing` (default `3`).
## Examples
iex> formatted(57468291379)
"57,468,291,379"
iex> formatted(57468291379, separator: "_")
"57_468_291_379"
iex> formatted(57468291379, spacing: 4)
"574,6829,1379"
"""
@spec formatted(integer, separator: String.t, spacing: integer) :: String.t
def formatted(integer, opts \\ []) do
separator = opts[:separator] || ","
spacing = opts[:spacing] || 3
integer
|> Integer.digits
|> Enum.reverse
|> Stream.map(&to_string/1)
|> Stream.chunk_every(spacing)
|> Stream.intersperse(separator)
|> Enum.join
|> String.reverse
end
end
|
lib/passive_support/base/integer.ex
| 0.929336 | 0.570959 |
integer.ex
|
starcoder
|
defmodule PayDayLoan.LoadState do
@moduledoc """
Keeps track of which keys are loaded, requested, and loading
Acts as a state tracker and a queue for the loader.
You shouldn't need to call any of these functions manually but
they can be useful for debugging.
"""
@typedoc """
Load states that a key can have.
* `:requested` - A load has been requested. The load worker should
pick this up and set the state to `:loading`.
* `:reload` - A value is available and a reload has been requested.
* `:loading` - The load worker is in the process of loading this key.
* `:reload_loading` - The load worker is in the process of loading this
key, but it is already loaded and will not block.
* `:loaded` - The key is loaded in cache.
* `:failed` - The key attempted a load or refresh and failed.
"""
@type t :: :requested | :reload | :loading | :loaded | :failed | :reload_loading
# creates the ETS table
@doc false
@spec create_table(atom) :: :ok
def create_table(ets_table_id) do
_ =
:ets.new(
ets_table_id,
[:set, :public, :named_table, {:read_concurrency, true}]
)
:ok
end
@doc """
Set the load state to `:requested` if not loaded or loading,
return the load state
"""
@spec query(atom, PayDayLoan.key() | [PayDayLoan.key()]) :: t | [t]
def query(ets_table_id, keys) when is_list(keys) do
Enum.map(keys, fn key -> query(ets_table_id, key) end)
end
def query(ets_table_id, key) do
case :ets.lookup(ets_table_id, key) do
[] ->
:requested = request(ets_table_id, key)
[{^key, status}] ->
status
end
end
@doc """
Return load state without modifying; return nil if key is not found
"""
@spec peek(atom, PayDayLoan.key() | [PayDayLoan.key()]) :: t | nil | [t | nil]
def peek(ets_table_id, keys) when is_list(keys) do
Enum.map(keys, fn key -> peek(ets_table_id, key) end)
end
def peek(ets_table_id, key) do
case :ets.lookup(ets_table_id, key) do
[] -> nil
[{^key, status}] -> status
end
end
@doc """
Set state to `:requested`
"""
@spec request(atom, PayDayLoan.key() | [PayDayLoan.key()]) ::
:requested | [:requested]
def request(ets_table_id, keys) when is_list(keys) do
Enum.map(keys, fn key -> request(ets_table_id, key) end)
end
def request(ets_table_id, key) do
set_status(ets_table_id, key, :requested)
end
@doc """
Set the state to `:reload`
"""
@spec reload(atom, PayDayLoan.key() | [PayDayLoan.key()]) :: :reload | [:reload]
def reload(ets_table_id, keys) when is_list(keys) do
Enum.map(keys, fn key -> reload(ets_table_id, key) end)
end
def reload(ets_table_id, key) do
set_status(ets_table_id, key, :reload)
end
@doc """
Set the state to `:reload` if the key is loaded, set it to `:request` if it
is not
"""
@spec request_or_reload(atom, PayDayLoan.key() | [PayDayLoan.key()]) ::
:request | :reload | [:request | :reload]
def request_or_reload(ets_table_id, keys) when is_list(keys) do
Enum.map(keys, fn key -> request_or_reload(ets_table_id, key) end)
end
def request_or_reload(ets_table_id, key) do
if peek(ets_table_id, key) == :loaded do
reload(ets_table_id, key)
else
request(ets_table_id, key)
end
end
@doc """
Set state to `:loaded`
"""
@spec loaded(atom, PayDayLoan.key() | [PayDayLoan.key()]) ::
:loaded | [:loaded]
def loaded(ets_table_id, keys) when is_list(keys) do
Enum.map(keys, fn key -> loaded(ets_table_id, key) end)
end
def loaded(ets_table_id, key) do
set_status(ets_table_id, key, :loaded)
end
@doc """
Set state to `:loading`
"""
@spec loading(atom, PayDayLoan.key() | [PayDayLoan.key()]) ::
:loading | [:loading]
def loading(ets_table_id, keys) when is_list(keys) do
Enum.map(keys, fn key -> loading(ets_table_id, key) end)
end
def loading(ets_table_id, key) do
set_status(ets_table_id, key, :loading)
end
@doc """
Set state to `:reload_loading`
"""
@spec reload_loading(atom, PayDayLoan.key() | [PayDayLoan.key()]) ::
:reload_loading | [:reload_loading]
def reload_loading(ets_table_id, keys) when is_list(keys) do
Enum.map(keys, fn key -> reload_loading(ets_table_id, key) end)
end
def reload_loading(ets_table_id, key) do
set_status(ets_table_id, key, :reload_loading)
end
@doc """
Set state to `:failed`
"""
@spec failed(atom, PayDayLoan.key() | [PayDayLoan.key()]) ::
:failed | [:failed]
def failed(ets_table_id, keys) when is_list(keys) do
Enum.map(keys, fn key -> failed(ets_table_id, key) end)
end
def failed(ets_table_id, key) do
set_status(ets_table_id, key, :failed)
end
@doc """
Remove a key from the load state table
"""
@spec unload(atom, PayDayLoan.key() | [PayDayLoan.key()]) :: :ok | [:ok]
def unload(ets_table_id, keys) when is_list(keys) do
Enum.map(keys, fn key -> unload(ets_table_id, key) end)
end
def unload(ets_table_id, key) do
true = :ets.delete(ets_table_id, key)
:ok
end
@doc """
Returns true if any keys are in the `:requested` or `:reload` states
"""
@spec any_requested?(atom) :: boolean
def any_requested?(ets_table_id) do
# :ets.fun2ms(fn({_, :loaded}) -> true; ({_, :loading}) -> true end)
match_spec = [
{{:_, :requested}, [], [true]},
{{:_, :reload}, [], [true]}
]
case :ets.select(ets_table_id, match_spec, 1) do
{[[]], _} -> false
:"$end_of_table" -> false
_any_other_result -> true
end
end
@doc """
Return the list of requested keys, limited to `limit` elements
"""
@spec requested_keys(atom, pos_integer) :: [PayDayLoan.key()]
def requested_keys(_ets_table_id, 0), do: []
def requested_keys(ets_table_id, limit) do
keys_in_state(ets_table_id, :requested, limit)
end
@doc """
Return the list of keys in the `:reload` state, limited to `limit` elements
"""
@spec reload_keys(atom, pos_integer) :: [PayDayLoan.key()]
def reload_keys(_ets_table_id, 0), do: []
def reload_keys(ets_table_id, limit) do
keys_in_state(ets_table_id, :reload, limit)
end
@doc """
Returns all elements of the table
"""
@spec all(atom) :: [{PayDayLoan.key(), t}]
def all(ets_table_id) do
List.flatten(:ets.match(ets_table_id, :"$1"))
end
defp set_status(ets_table_id, key, status) do
true = :ets.insert(ets_table_id, {key, status})
status
end
defp keys_in_state(ets_table_id, state, limit) do
case :ets.match(ets_table_id, {:"$1", state}, limit) do
:"$end_of_table" -> []
{keys, _continuation} -> List.flatten(keys)
end
end
end
|
lib/pay_day_loan/load_state.ex
| 0.803945 | 0.674081 |
load_state.ex
|
starcoder
|
defmodule DateTimeParser.Parser do
@moduledoc """
Interface for the DateTimeParser to use when parsing a string.
The flow is:
1. Preflight the string to see if the parser is appropriate. Sometimes the parsing can happen at
this stage if it's a simple parser, for example it can be done in a single regex. Results of the
preflight, if needed, can be stored in the `struct.preflight`.
2. Parse the string. You have `t:context/0` to check if you should return a time, date, or datetime.
Also make sure you're honoring the user's options supplied in `struct.opts`
You may create your own parser and use it with the DateTimeParser by creating a module that
follows this behaviour.
"""
defstruct [:string, :mod, :preflight, :context, opts: []]
@type context :: :datetime | :date | :time
@type t :: %__MODULE__{
string: String.t(),
mod: module(),
context: context(),
preflight: any(),
opts: Keyword.t()
}
alias DateTimeParser.Parser
require Logger
@doc """
Determine if the string is appropriate to parse with this parser. If not, then other parsers will
be attempted.
"""
@callback preflight(t()) :: {:ok, t()} | {:error, :not_compatible}
@doc """
Parse the string.
"""
@callback parse(t()) ::
{:ok, DateTime.t() | NaiveDateTime.t() | Time.t() | Date.t()} | {:error, any()}
@default_parsers [Parser.Epoch, Parser.Serial, Parser.Tokenizer]
@builtin_parsers @default_parsers ++
[Parser.DateTime, Parser.DateTimeUS, Parser.Date, Parser.DateUS, Parser.Time]
@doc false
def builtin_parsers, do: @builtin_parsers
@doc false
def default_parsers, do: @default_parsers
@doc false
def build(string, context, opts) do
parser = %__MODULE__{context: context, string: string, opts: opts}
{parsers, opts} =
Keyword.pop(
opts,
:parsers,
Application.get_env(:date_time_parser, :parsers, @default_parsers)
)
parsers
|> Enum.map(&to_parser_mod/1)
|> Enum.find_value({:error, :no_parser}, fn parser_mod ->
case parser_mod.preflight(parser) do
{:ok, parser} -> {:ok, put_new_mod(%{parser | opts: opts}, parser_mod)}
{:error, _} -> false
end
end)
end
defp put_new_mod(%{mod: nil} = parser, mod), do: %{parser | mod: mod}
defp put_new_mod(parser, _), do: parser
defp to_parser_mod(:tokenizer) do
Logger.info("Using :tokenizer is deprecated. Use DateTimeParser.Parser.Tokenizer instead.")
Parser.Tokenizer
end
defp to_parser_mod(:epoch) do
Logger.info("Using :epoch is deprecated. Use DateTimeParser.Parser.Epoch instead.")
Parser.Epoch
end
defp to_parser_mod(:serial) do
Logger.info("Using :serial is deprecated. Use DateTimeParser.Parser.Serial instead.")
Parser.Serial
end
defp to_parser_mod(mod), do: mod
end
|
lib/parser.ex
| 0.847385 | 0.528838 |
parser.ex
|
starcoder
|
defmodule Formex.Ecto.Type do
@moduledoc """
Module that must be used in form types that uses Ecto.
# Installation
Just add `use Formex.Ecto.Type`
# Example
```
defmodule App.ArticleType do
use Formex.Type
use Formex.Ecto.Type
def build_form(form) do
form
|> add(:title, :text_input, label: "Title")
# ...
end
# optional
def fields_casted_manually(_form) do
# do some fields will be casted later?
[]
end
# optional
def modify_changeset(changeset, form) do
# Modify changeset. If you change some data here, it will be saved to database
# You can also add validation rules here
changeset
end
```
# Example with `Arc.Ecto`
```
defmodule App.ArticleType do
# ...
def build_form(form) do
form
|> add(:image, :file_input, label: "Image")
# ...
end
# Arc.Ecto.cast_attachments doesn't work if we used Ecto.Changeset.cast/3 on :image
# (Formex.Ecto.Changest does this automatically), therefore we must indicate that this field
# will be casted manually
def fields_casted_manually(_form) do
[:image]
end
# manually use `Arc.Ecto.cast_attachment/3`
def modify_changeset(changeset, _form) do
changeset
|> cast_attachments(changeset.params, [:image])
end
end
```
"""
defmacro __using__([]) do
quote do
@behaviour Formex.Ecto.Type
def modify_changeset(changeset, _form) do
changeset
end
def fields_casted_manually(_form) do
[]
end
defoverridable modify_changeset: 2, fields_casted_manually: 1
end
end
@doc """
Callback that will be called after changeset creation.
In this callback you can modify changeset.
Any errors added here will not be displayed together with errors added normally, i.e. using
`Formex.Validator`. Insert/update actions are performed only when Formex.Validator validation
passes. Errors from changeset are added to form after insert/update failure.
"""
@callback modify_changeset(changeset :: Ecto.Changeset.t(), form :: Formex.Form.t()) ::
Ecto.Changeset.t()
@doc """
Do you have some fields that should be casted manually?
All fields listed here will not be
casted automatically by `Ecto.Changeset.cast/3` function. You must cast them in the
`c:modify_changeset/2`.
"""
@callback fields_casted_manually(form :: Formex.Form.t()) :: List.t()
end
|
lib/type.ex
| 0.863909 | 0.574783 |
type.ex
|
starcoder
|
defmodule Cryptopunk.Derivation do
@moduledoc """
Implements key derivation logic.
See https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki
"""
alias Cryptopunk.Derivation.Path
alias Cryptopunk.Key
alias Cryptopunk.Utils
import Path, only: [is_normal: 1, is_hardened: 1]
@order 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
@spec derive(Key.t(), Path.t() | Path.raw_path()) :: Key.t()
def derive(key, %Path{} = path) do
raw_path = Path.to_raw_path(path)
derive(key, raw_path)
end
def derive(%Key{type: :public}, {:private, _}) do
raise ArgumentError, message: "Can not derive private key from public key"
end
def derive(%Key{type: :private} = key, {:public, path}) do
key
|> do_derive(path)
|> Key.public_from_private()
end
def derive(key, {_type, path}) do
do_derive(key, path)
end
def do_derive(key, []), do: key
def do_derive(%Key{chain_code: chain_code, type: :private} = private_key, [idx | tail])
when is_normal(idx) do
ser_public_key =
private_key
|> Key.public_from_private()
|> Utils.compress_public_key()
new_private_key =
chain_code
|> Utils.hmac_sha512(<<ser_public_key::binary, idx::32>>)
|> create_from_private_key(private_key, idx)
do_derive(new_private_key, tail)
end
def do_derive(%Key{chain_code: chain_code, key: key, type: :private} = private_key, [idx | tail])
when is_hardened(idx) do
new_private_key =
chain_code
|> Utils.hmac_sha512(<<fdf8:f53e:61e4::18, key::binary, idx::32>>)
|> create_from_private_key(private_key, idx)
do_derive(new_private_key, tail)
end
def do_derive(%Key{chain_code: chain_code, type: :public} = public_key, [idx | tail])
when is_normal(idx) do
ser_public_key = Utils.compress_public_key(public_key)
new_private_key =
chain_code
|> Utils.hmac_sha512(<<ser_public_key::binary, idx::32>>)
|> create_from_public_key(public_key, idx)
do_derive(new_private_key, tail)
end
def do_derive(%Key{type: :public}, [idx | _tail]) when is_hardened(idx) do
raise ArgumentError, message: "Can not derive hardened key from public key"
end
defp create_from_public_key(
<<l_l::binary-32, l_r::binary>>,
%Key{key: key, type: :public} = parent_key,
idx
) do
{:ok, new_public_key} = ExSecp256k1.public_key_tweak_add(key, l_l)
Key.new_public(
key: new_public_key,
chain_code: l_r,
parent_key: parent_key,
index: idx
)
end
defp create_from_private_key(
<<new_key::256, new_chain::binary>>,
%Key{key: <<parent_key::256>>, type: :private} = parent_key_struct,
idx
) do
new_private_key =
new_key
|> Kernel.+(parent_key)
|> rem(@order)
|> :binary.encode_unsigned()
|> pad()
Key.new_private(
key: new_private_key,
chain_code: new_chain,
parent_key: parent_key_struct,
index: idx
)
end
defp pad(binary) when byte_size(binary) >= 32, do: binary
defp pad(binary) do
bits = (32 - byte_size(binary)) * 8
<<0::size(bits)>> <> binary
end
end
|
lib/cryptopunk/derivation.ex
| 0.875887 | 0.512266 |
derivation.ex
|
starcoder
|
defmodule Jeaux.Params do
@moduledoc false
def compare(params, schema) do
params
|> ProperCase.to_snake_case
|> keys_to_atoms
|> apply_defaults(schema)
|> validate_required(schema)
|> parse_into_types(schema)
|> validate_types(schema)
|> validate_min(schema)
|> validate_max(schema)
|> validate_valid(schema)
|> validate_nested(schema)
end
defp keys_to_atoms(params) do
keys = Map.keys(params)
convert_all_keys(keys, params)
end
defp convert_all_keys([], _params), do: %{}
defp convert_all_keys([k | tail], params) when is_binary(k) do
if is_map(params[k]) do
Map.put(convert_all_keys(tail, params), String.to_atom(k), keys_to_atoms(params[k]))
else
Map.put(convert_all_keys(tail, params), String.to_atom(k), params[k])
end
end
defp convert_all_keys([k | tail], params) do
if is_map(params[k]) do
Map.put(convert_all_keys(tail, params), k, keys_to_atoms(params[k]))
else
Map.put(convert_all_keys(tail, params), k, params[k])
end
end
defp apply_defaults(params, schema) do
param_keys = Map.keys(params)
default_schema_keys =
schema
|> Enum.filter(fn({_k, v}) ->
case is_map(v) do
true -> false
false -> Keyword.get(v, :default) !== nil
end
end)
|> Keyword.keys
|> Enum.filter(&(!Enum.member?(param_keys, &1) || params[&1] === nil))
add_defaults(params, schema, default_schema_keys)
end
defp validate_required(params, schema) do
param_keys = Map.keys(params)
compared_params =
schema
|> Enum.filter(fn({_k, v}) ->
case is_map(v) do
true -> false
false -> Keyword.get(v, :required) === true
end
end)
|> Keyword.keys
|> Enum.drop_while(fn(required_param) -> Enum.member?(param_keys, required_param) end)
case Enum.empty?(compared_params) do
true -> {:ok, params}
false ->
[first_required_param | _tail] = compared_params
{:error, "#{first_required_param} is required."}
end
end
defp parse_into_types({:error, message}, _schema), do: {:error, message}
defp parse_into_types({:ok, params}, schema) do
params_keys = Map.keys(params)
{:ok, check_and_format_types(params, schema, params_keys)}
end
defp validate_types({:error, message}, _schema), do: {:error, message}
defp validate_types({:ok, params}, schema) do
errors = Enum.reduce params, [], fn {k, v}, error_list ->
type =
case is_map(schema[k]) do
true -> nil
false -> Keyword.get(schema[k] || [], :type)
end
validate_type({k, v}, schema[k], type) ++ error_list
end
case Enum.empty?(errors) do
true -> {:ok, params}
false ->
[first_error | _tail] = errors
first_error
end
end
defp check_and_format_types(params, _schema, []), do: params
defp check_and_format_types(params, schema, [k | tail]) do
expected_type =
case is_map(schema[k]) do
true -> nil
false -> Keyword.get(schema[k] || [], :type)
end
is_expected? =
case expected_type do
:list -> is_list(params[k])
:string -> is_binary(params[k])
:guid -> is_binary(params[k])
:float -> is_float(params[k])
:integer -> is_integer(params[k])
:boolean -> is_boolean(params[k])
nil -> true
end
case is_expected? do
true -> Map.put(check_and_format_types(params, schema, tail), k, params[k])
false ->
parsed_value = try_to_parse(params[k], expected_type)
Map.put(check_and_format_types(params, schema, tail), k, parsed_value)
end
end
defp try_to_parse(value, :string), do: to_string(value)
defp try_to_parse(value, :guid), do: to_string(value)
defp try_to_parse(value, :float) when is_integer(value), do: String.to_float("#{value}.0")
defp try_to_parse(value, :float) when is_binary(value) do
case Float.parse(value) do
{v, _} -> v
:error -> value
end
end
defp try_to_parse(value, :integer) when is_binary(value) do
case Integer.parse(value) do
{v, _} -> v
:error -> value
end
end
defp try_to_parse(value, :integer) when is_float(value), do: round(value)
defp try_to_parse(value, :list) when is_binary(value), do: String.split(value, ",")
defp try_to_parse(value, :list), do: value
defp try_to_parse("true", :boolean), do: true
defp try_to_parse("false", :boolean), do: false
defp try_to_parse(value, :boolean), do: value
defp validate_min({:error, message}, _schema), do: {:error, message}
defp validate_min({:ok, params}, schema) do
minimum_schema_keys =
schema
|> Enum.filter(fn({_k, v}) ->
case is_map(v) do
true -> false
false -> Keyword.get(v, :min) !== nil
end
end)
|> Keyword.keys
errors = Enum.reduce minimum_schema_keys, [], fn k, error_list ->
minimum = Keyword.get(schema[k], :min)
case params[k] >= minimum do
true -> [] ++ error_list
false -> [{:error, "#{k} must be greater than or equal to #{minimum}"}] ++ error_list
end
end
case Enum.empty?(errors) do
true -> {:ok, params}
false ->
[first_error | _tail] = errors
first_error
end
end
defp validate_max({:error, message}, _schema), do: {:error, message}
defp validate_max({:ok, params}, schema) do
maximum_schema_keys =
schema
|> Enum.filter(fn({_k, v}) ->
case is_map(v) do
true -> false
false -> Keyword.get(v, :max) !== nil
end
end)
|> Keyword.keys
errors = Enum.reduce maximum_schema_keys, [], fn k, error_list ->
maximum = Keyword.get(schema[k], :max)
case params[k] <= maximum do
true -> [] ++ error_list
false -> [{:error, "#{k} must be less than or equal to #{maximum}"}] ++ error_list
end
end
case Enum.empty?(errors) do
true -> {:ok, params}
false ->
[first_error | _tail] = errors
first_error
end
end
defp validate_valid({:error, message}, _schema), do: {:error, message}
defp validate_valid({:ok, params}, schema) do
valid_keys =
schema
|> Enum.filter(fn({_k, v}) ->
case is_map(v) do
true -> false
false -> Keyword.get(v, :valid) !== nil
end
end)
|> Keyword.keys
errors = Enum.reduce valid_keys, [], fn k, error_list ->
vals = Keyword.get(schema[k], :valid)
valid_values =
case is_list(vals) do
true -> vals
false -> [vals]
end
case Enum.any?(valid_values, &(&1 === params[k])) do
true -> [] ++ error_list
false -> [{:error, "#{k} is not a valid value."}]
end
end
case Enum.empty?(errors) do
true -> {:ok, params}
false ->
[first_error | _tail] = errors
first_error
end
end
defp validate_type({k, _v}, nil, _type), do: [{:error, "#{k} is not a valid parameter"}]
defp validate_type(_param, _schema, nil), do: []
defp validate_type({k, v}, _schema, :integer) do
case is_integer(v) do
true -> []
false -> [{:error, "#{k} must be an integer."}]
end
end
defp validate_type({k, v}, _schema, :float) do
case is_float(v) do
true -> []
false -> [{:error, "#{k} must be a float."}]
end
end
defp validate_type({k, v}, _schema, :string) do
case is_binary(v) do
true -> []
false -> [{:error, "#{k} must be a string."}]
end
end
defp validate_type({k, v}, _schema, :list) do
case is_list(v) do
true -> []
false -> [{:error, "#{k} must be a list."}]
end
end
defp validate_type({k, v}, _schema, :guid) do
case guid_match?(v) do
true -> []
false -> [{:error, "#{k} must be in valid guid format."}]
end
end
defp validate_type({k, v}, _schema, :boolean) do
case is_boolean(v) do
true -> []
false -> [{:error, "#{k} must be a boolean."}]
end
end
defp add_defaults(params, _schema, []), do: params
defp add_defaults(params, schema, [k | tail]) do
default = Keyword.get(schema[k], :default)
Map.put(add_defaults(params, schema, tail), k, default)
end
defp validate_nested({:error, message}, _schema), do: {:error, message}
defp validate_nested({:ok, params}, schema) do
keys_with_maps =
schema
|> Enum.filter(fn({_k, v}) -> is_map(v) end)
|> Keyword.keys
case each_nested(keys_with_maps, params, schema) do
{:error, message} -> {:error, message}
new_params -> {:ok, new_params}
end
end
defp each_nested([], params, _schema), do: params
defp each_nested([k | tail], params, schema) do
case is_map(params[k]) do
true ->
case Jeaux.validate(params[k], schema[k]) do
{:ok, new_params} -> Map.put(each_nested(tail, params, schema), k, new_params)
{:error, message} -> {:error, message}
end
false -> {:error, "expected #{k} to be a map"}
end
end
defp guid_match?(v) do
Regex.match?(~r/\A[A-F0-9]{8}(?:-?[A-F0-9]{4}){3}-?[A-F0-9]{12}\z/i, v) ||
Regex.match?(~r/\A\{[A-F0-9]{8}(?:-?[A-F0-9]{4}){3}-?[A-F0-9]{12}\}\z/i, v)
end
end
|
lib/jeaux/params.ex
| 0.546012 | 0.425665 |
params.ex
|
starcoder
|
defmodule Axon.CompilerError do
defexception [:exception, :graph]
@impl true
def message(%{graph: %Axon{op: op}, exception: exception}) do
op_inspect =
if is_atom(op) do
Atom.to_string(op)
else
"#{inspect(op)}"
end
"""
error while building prediction for #{op_inspect}:
** (#{inspect(exception.__struct__)}) #{Exception.message(exception)}
"""
end
end
defmodule Axon.Compiler do
@moduledoc false
require Logger
import Axon.Shared
## Init JIT Compilation
@doc false
def __compile__(graph, opts) do
mode = opts[:mode] || :inference
{compile_init(graph), compile_predict(graph, mode)}
end
@doc false
def __jit_init__(graph, [] = args, opts) do
fun = compile_init(graph)
Nx.Defn.jit_or_apply(fun, args, opts)
end
defp compile_init(%Axon{} = graph) do
init_fn = fn ->
{cache, _} = to_init_fun(graph, {%{}, %{}})
cache
|> Enum.reduce(%{}, fn {_, layer}, layers_acc ->
Map.merge(layer, layers_acc)
end)
end
fn -> Nx.Defn.jit_or_apply(init_fn, []) end
end
defp compile_init(graph) do
raise ArgumentError,
"attempting to compile initialization function from" <>
" an unrecognized graph #{inspect(graph)}, if you" <>
" are attempting to initialize a model with a container" <>
" output, use `Axon.container`"
end
defp to_init_fun(
%Axon{
id: id,
parent: parent,
op: op,
name: name_fn,
params: params,
policy: %{params: dtype},
hooks: hooks
},
cache_and_counts
) do
{cache, op_counts} =
case {op, parent} do
{:container, [parent]} ->
deep_reduce(parent, cache_and_counts, &to_init_fun/2)
{_, nil} ->
cache_and_counts
{_, parents} when is_list(parents) ->
Enum.reduce(parents, cache_and_counts, &to_init_fun/2)
{_, parents} when is_tuple(parents) ->
deep_reduce(parents, cache_and_counts, &to_init_fun/2)
end
case cache do
%{^id => _} ->
{cache, op_counts}
%{} ->
if Enum.empty?(params) do
{cache, op_counts}
else
layer_params =
Enum.reduce(params, %{}, fn {key, param}, layer_params ->
init_param(key, param, layer_params, dtype)
end)
layer_params = apply_hooks(layer_params, :initialize, nil, hooks)
name = name_fn.(op, op_counts)
params = %{name => layer_params}
{
Map.put(cache, id, params),
Map.update(op_counts, op, 1, fn x -> x + 1 end)
}
end
end
end
defp init_param(key, param, layer_params, dtype) do
case param do
%{name: name, shape: shape, initializer: initializer} ->
fun = apply(Axon.Initializers, initializer, [[type: dtype, shape: shape]])
Map.put(layer_params, name, fun)
params when is_tuple(params) ->
params
|> Tuple.to_list()
|> Enum.map(fn %{shape: shape, initializer: initializer} ->
apply(Axon.Initializers, initializer, [[type: dtype, shape: shape]])
end)
|> List.to_tuple()
|> then(&Map.put(layer_params, key, &1))
end
end
## Model JIT Compilation
@doc false
def __jit_predict__(graph, args, opts) do
{mode, opts} = Keyword.pop(opts, :mode, :inference)
fun = compile_predict(graph, mode)
Nx.Defn.jit_or_apply(fun, args, opts)
end
defp compile_predict(%Axon{} = graph, mode) do
{root_id, {cache, _op_counts}} = to_predict_fun(graph, {%{}, %{}}, mode)
predict_fn = fn params, inputs ->
try do
case mode do
:train ->
{pred_expr, {state_expr, _}} = cache[root_id].(params, inputs, %{}, cache, %{})
%{prediction: pred_expr, state: state_expr}
:inference ->
{pred_expr, _} = cache[root_id].(params, inputs, %{}, cache, %{})
pred_expr
end
rescue
e -> reraise Axon.CompilerError.exception(graph: graph, exception: e), __STACKTRACE__
end
end
&Nx.Defn.jit_or_apply(predict_fn, [&1, &2])
end
defp compile_predict(graph, _mode) do
raise ArgumentError,
"attempting to compile predict function from" <>
" an unrecognized graph #{inspect(graph)}, if you" <>
" are attempting to initialize a model with a container" <>
" output, use `Axon.container`"
end
defp to_predict_fun(%{id: id} = graph, {cache, op_counts}, mode) do
case cache do
%{^id => _} ->
{id, {cache, op_counts}}
%{} ->
try do
recur_predict_fun(graph, {cache, op_counts}, mode)
rescue
e -> reraise Axon.CompilerError.exception(graph: graph, exception: e), __STACKTRACE__
end
end
end
defp call_cache(parent_id, params, inputs, state, cache, result_cache) do
key = {:cache, parent_id}
case result_cache do
%{^key => {expr, state}} ->
{expr, {state, result_cache}}
%{} ->
{expr, {state, result_cache}} =
cache[parent_id].(params, inputs, state, cache, result_cache)
{expr, {state, Map.put(result_cache, key, {expr, state})}}
end
end
defp recur_predict_fun(
%Axon{id: id, op: :container, parent: [parents]},
cache_and_counts,
mode
) do
{parent_ids, {cache, op_counts}} =
deep_map_reduce(parents, cache_and_counts, &to_predict_fun(&1, &2, mode))
op_counts = Map.update(op_counts, :container, 1, fn x -> x + 1 end)
fun = fn params, inputs, state, cache, result_cache ->
deep_map_reduce(parent_ids, {state, result_cache}, fn parent_id, {state, result_cache} ->
call_cache(parent_id, params, inputs, state, cache, result_cache)
end)
end
{id, {Map.put(cache, id, fun), op_counts}}
end
## Custom Layers
defp recur_predict_fun(
%Axon{
id: id,
name: name_fn,
op: op,
parent: parents,
params: layer_params,
opts: opts,
policy: %{compute: compute, output: output},
hooks: hooks
},
cache_and_counts,
mode
)
when is_function(op) and is_list(parents) do
{parent_ids, {cache, op_counts}} =
Enum.map_reduce(
parents,
cache_and_counts,
&to_predict_fun(&1, &2, mode)
)
{_, opts} = Keyword.pop(opts, :layer_op)
name = name_fn.(op, op_counts)
op_counts = Map.update(op_counts, op, 1, fn x -> x + 1 end)
layer_params =
Enum.map(layer_params, fn {k, %{name: v, frozen: frz}} ->
{k, {v, frz}}
end)
fun = fn params, inputs, state, cache, result_cache ->
{res, {state, result_cache}} =
parent_ids
|> Enum.map_reduce({state, result_cache}, fn parent_id, {state, result_cache} ->
call_cache(parent_id, params, inputs, state, cache, result_cache)
end)
inp_params =
Map.new(layer_params, fn {k, {v, frz}} ->
{k, maybe_freeze(params[name][v], frz)}
end)
inputs =
res
|> Enum.map(&safe_as_type(&1, compute))
|> Enum.map(&apply_hooks(&1, :pre_forward, mode, hooks))
args =
case opts do
[] ->
inputs ++ [inp_params]
[_ | _] ->
inputs ++ [inp_params, opts]
end
out =
args
|> then(&apply(op, &1))
|> apply_hooks(:forward, mode, hooks)
|> apply_hooks(:backward, mode, hooks)
|> safe_as_type(output)
{out, {state, result_cache}}
end
{id, {Map.put(cache, id, fun), op_counts}}
end
## Activation Layers
@activation_layers [:celu, :elu, :exp, :gelu, :hard_sigmoid, :hard_silu, :hard_tanh] ++
[:leaky_relu, :linear, :log_sigmoid, :mish, :relu, :relu6] ++
[:sigmoid, :silu, :selu, :softmax, :softplus, :softsign, :tanh] ++
[:log_softmax]
defp recur_predict_fun(
%Axon{
id: id,
op: op,
parent: [parent],
policy: %{compute: compute, output: output},
opts: opts,
hooks: hooks
},
cache_and_counts,
mode
)
when op in @activation_layers do
{parent_id, {cache, op_counts}} = to_predict_fun(parent, cache_and_counts, mode)
op_counts = Map.update(op_counts, op, 1, fn x -> x + 1 end)
fun = fn params, inputs, state, cache, result_cache ->
{res, {state, result_cache}} =
call_cache(parent_id, params, inputs, state, cache, result_cache)
res =
res
|> safe_as_type(compute)
|> apply_hooks(:pre_forward, mode, hooks)
args =
case opts do
[] ->
[res]
[_ | _] ->
[res, opts]
end
res =
args
|> then(&apply(Axon.Activations, op, &1))
|> safe_as_type(output)
|> apply_hooks(:forward, mode, hooks)
|> apply_hooks(:backward, mode, hooks)
{res, {state, result_cache}}
end
{id, {Map.put(cache, id, fun), op_counts}}
end
## Linear Layers
@linear_layers [:dense, :bilinear, :conv, :depthwise_conv, :conv_transpose]
defp recur_predict_fun(
%Axon{
id: id,
op: op,
name: name_fn,
parent: parent,
params: layer_params,
policy: %{compute: compute, output: output},
opts: opts,
hooks: hooks
},
cache_and_counts,
mode
)
when op in @linear_layers do
{parent_ids, {cache, op_counts}} =
Enum.map_reduce(
parent,
cache_and_counts,
&to_predict_fun(&1, &2, mode)
)
layer_name = name_fn.(op, op_counts)
op_counts = Map.update(op_counts, op, 1, fn x -> x + 1 end)
{use_bias, opts} = Keyword.pop(opts, :use_bias, true)
%{frozen: w_frz} = layer_params["kernel"]
%{frozen: b_frz} = if use_bias, do: layer_params["bias"], else: %{frozen: false}
fun = fn params, inputs, state, cache, result_cache ->
{res, {state, result_cache}} =
parent_ids
|> Enum.map_reduce({state, result_cache}, fn parent_id, {state, result_cache} ->
call_cache(parent_id, params, inputs, state, cache, result_cache)
end)
inputs =
res
|> Enum.map(&safe_as_type(&1, compute))
|> Enum.map(&apply_hooks(&1, :pre_forward, mode, hooks))
w = get_param(params, layer_name, "kernel", w_frz, compute)
b =
if use_bias do
get_param(params, layer_name, "bias", b_frz, compute)
else
Nx.tensor(0.0, type: compute)
end
args =
case opts do
[] ->
inputs ++ [w, b]
[_ | _] ->
inputs ++ [w, b, opts]
end
res =
args
|> then(&apply(Axon.Layers, op, &1))
|> safe_as_type(output)
|> apply_hooks(:forward, mode, hooks)
|> apply_hooks(:backward, mode, hooks)
{res, {state, result_cache}}
end
{id, {Map.put(cache, id, fun), op_counts}}
end
defp recur_predict_fun(
%Axon{
id: id,
name: name_fn,
op: :bias,
parent: parent,
params: layer_params,
policy: %{compute: compute, output: output},
hooks: hooks
},
cache_and_counts,
mode
) do
{parent_ids, {cache, op_counts}} =
Enum.map_reduce(
parent,
cache_and_counts,
&to_predict_fun(&1, &2, mode)
)
layer_name = name_fn.(:bias, op_counts)
op_counts = Map.update(op_counts, :bias, 1, fn x -> x + 1 end)
%{frozen: b_frz} = layer_params["bias"]
fun = fn params, inputs, state, cache, result_cache ->
{res, {state, result_cache}} =
parent_ids
|> Enum.map_reduce({state, result_cache}, fn parent_id, {state, result_cache} ->
call_cache(parent_id, params, inputs, state, cache, result_cache)
end)
b = get_param(params, layer_name, "bias", b_frz, compute)
inputs =
res
|> Enum.map(&safe_as_type(&1, compute))
|> Enum.map(&apply_hooks(&1, :pre_forward, mode, hooks))
args = inputs ++ [b]
res =
args
|> then(&apply(Axon.Layers, :bias, &1))
|> safe_as_type(output)
|> apply_hooks(:forward, mode, hooks)
|> apply_hooks(:backward, mode, hooks)
{res, {state, result_cache}}
end
{id, {Map.put(cache, id, fun), op_counts}}
end
## Sparse Layers
defp recur_predict_fun(
%Axon{
id: id,
name: name_fn,
op: :embedding,
parent: [parent],
params: layer_params,
policy: %{compute: compute, output: output},
hooks: hooks
},
cache_and_counts,
mode
) do
{parent_id, {cache, op_counts}} = to_predict_fun(parent, cache_and_counts, mode)
layer_name = name_fn.(:embedding, op_counts)
op_counts = Map.update(op_counts, :embedding, 1, fn x -> x + 1 end)
%{frozen: w_frz} = layer_params["kernel"]
fun = fn params, inputs, state, cache, result_cache ->
{res, {state, result_cache}} =
call_cache(parent_id, params, inputs, state, cache, result_cache)
w = get_param(params, layer_name, "kernel", w_frz, compute)
res =
res
|> apply_hooks(:pre_forward, :inference, hooks)
|> safe_as_type({:s, 64})
|> Axon.Layers.embedding(w)
|> safe_as_type(output)
|> apply_hooks(:forward, :inference, hooks)
|> apply_hooks(:backward, :inference, hooks)
{res, {state, result_cache}}
end
{id, {Map.put(cache, id, fun), op_counts}}
end
## Pooling Layers
@pooling_layers [:max_pool, :avg_pool, :adaptive_avg_pool] ++
[:adaptive_max_pool, :adaptive_lp_pool, :lp_pool] ++
[:global_lp_pool, :global_max_pool, :global_avg_pool]
defp recur_predict_fun(
%Axon{
id: id,
op: op,
parent: [parent],
opts: opts,
policy: %{compute: compute, output: output},
hooks: hooks
},
cache_and_counts,
mode
)
when op in @pooling_layers do
{parent_id, {cache, op_counts}} = to_predict_fun(parent, cache_and_counts, mode)
op_counts = Map.update(op_counts, op, 1, fn x -> x + 1 end)
fun = fn params, inputs, state, cache, result_cache ->
{res, {state, result_cache}} =
call_cache(parent_id, params, inputs, state, cache, result_cache)
res =
res
|> safe_as_type(compute)
|> apply_hooks(:pre_forward, :inference, hooks)
|> then(&apply(Axon.Layers, op, [&1, opts]))
|> safe_as_type(output)
|> apply_hooks(:forward, :inference, hooks)
|> apply_hooks(:backward, :inference, hooks)
{res, {state, result_cache}}
end
{id, {Map.put(cache, id, fun), op_counts}}
end
## Dropout Layers
@dropout_layers [:dropout, :feature_alpha_dropout, :spatial_dropout, :alpha_dropout]
defp recur_predict_fun(
%Axon{
id: id,
op: op,
parent: [parent],
opts: opts,
policy: %{compute: compute, output: output},
hooks: hooks
},
cache_and_counts,
mode
)
when op in @dropout_layers do
{parent_id, {cache, op_counts}} = to_predict_fun(parent, cache_and_counts, mode)
op_counts = Map.update(op_counts, op, 1, fn x -> x + 1 end)
fun = fn params, inputs, state, cache, result_cache ->
{inputs, {state, result_cache}} =
call_cache(parent_id, params, inputs, state, cache, result_cache)
res =
case mode do
:train ->
inputs
|> safe_as_type(compute)
|> apply_hooks(:pre_forward, :train, hooks)
|> then(&apply(Axon.Layers, op, [&1, opts]))
|> safe_as_type(output)
|> apply_hooks(:forward, :train, hooks)
|> apply_hooks(:backward, :train, hooks)
:inference ->
# Skip dropout in inference mode
safe_as_type(inputs, output)
end
{res, {state, result_cache}}
end
{id, {Map.put(cache, id, fun), op_counts}}
end
defp recur_predict_fun(
%Axon{
id: id,
name: name_fn,
op: :separable_conv2d,
parent: [parent],
opts: opts,
params: layer_params,
policy: %{compute: compute, output: output},
hooks: hooks
},
cache_and_counts,
mode
) do
{parent_id, {cache, op_counts}} = to_predict_fun(parent, cache_and_counts, mode)
name = name_fn.(:separable_conv2d, op_counts)
op_counts = Map.update(op_counts, :separable_conv2d, 1, fn x -> x + 1 end)
{use_bias, opts} = Keyword.pop!(opts, :use_bias)
%{frozen: k1_frz} = layer_params["k1"]
%{frozen: k2_frz} = layer_params["k2"]
%{frozen: b1_frz} = if use_bias, do: layer_params["b1"], else: %{frozen: false}
%{frozen: b2_frz} = if use_bias, do: layer_params["b2"], else: %{frozen: false}
fun = fn params, inputs, state, cache, result_cache ->
{inputs, {state, result_cache}} =
call_cache(parent_id, params, inputs, state, cache, result_cache)
k1 = get_param(params, name, "kernel_1", k1_frz, compute)
k2 = get_param(params, name, "kernel_2", k2_frz, compute)
{b1, b2} =
if use_bias do
{get_param(params, name, "bias_1", b1_frz, compute),
get_param(params, name, "bias_2", b2_frz, compute)}
else
{Nx.tensor(0, type: compute), Nx.tensor(0, type: compute)}
end
res =
inputs
|> safe_as_type(compute)
|> apply_hooks(:pre_forward, mode, hooks)
|> Axon.Layers.separable_conv2d(k1, b1, k2, b2, opts)
|> safe_as_type(output)
|> apply_hooks(:forward, mode, hooks)
|> apply_hooks(:backward, mode, hooks)
{res, {state, result_cache}}
end
{id, {Map.put(cache, id, fun), op_counts}}
end
defp recur_predict_fun(
%Axon{
id: id,
name: name_fn,
op: :separable_conv3d,
parent: [parent],
opts: opts,
params: layer_params,
policy: %{compute: compute, output: output},
hooks: hooks
},
cache_and_counts,
mode
) do
{parent_id, {cache, op_counts}} = to_predict_fun(parent, cache_and_counts, mode)
name = name_fn.(:separable_conv3d, op_counts)
op_counts = Map.update(op_counts, :separable_conv3d, 1, fn x -> x + 1 end)
{use_bias, opts} = Keyword.pop!(opts, :use_bias)
%{frozen: k1_frz} = layer_params["k1"]
%{frozen: k2_frz} = layer_params["k2"]
%{frozen: k3_frz} = layer_params["k3"]
%{frozen: b1_frz} = if use_bias, do: layer_params["b1"], else: %{frozen: false}
%{frozen: b2_frz} = if use_bias, do: layer_params["b2"], else: %{frozen: false}
%{frozen: b3_frz} = if use_bias, do: layer_params["b3"], else: %{frozen: false}
fun = fn params, inputs, state, cache, result_cache ->
{inputs, {state, result_cache}} =
call_cache(parent_id, params, inputs, state, cache, result_cache)
k1 = get_param(params, name, "kernel_1", k1_frz, compute)
k2 = get_param(params, name, "kernel_2", k2_frz, compute)
k3 = get_param(params, name, "kernel_3", k3_frz, compute)
{b1, b2, b3} =
if use_bias do
{get_param(params, name, "bias_1", b1_frz, compute),
get_param(params, name, "bias_2", b2_frz, compute),
get_param(params, name, "bias_3", b3_frz, compute)}
else
{Nx.tensor(0, type: compute), Nx.tensor(0, type: compute), Nx.tensor(0, type: compute)}
end
res =
inputs
|> safe_as_type(compute)
|> apply_hooks(:pre_forward, mode, hooks)
|> Axon.Layers.separable_conv3d(k1, b1, k2, b2, k3, b3, opts)
|> safe_as_type(output)
|> apply_hooks(:forward, mode, hooks)
|> apply_hooks(:backward, mode, hooks)
{res, {state, result_cache}}
end
{id, {Map.put(cache, id, fun), op_counts}}
end
## Normalization Layers
@normalization_with_stats [:batch_norm, :instance_norm]
defp recur_predict_fun(
%Axon{
id: id,
name: name_fn,
op: op,
parent: [parent],
opts: [epsilon: epsilon, channel_index: channel_index, momentum: momentum],
params: layer_params,
policy: %{compute: compute, output: output},
hooks: hooks
},
cache_and_counts,
mode
)
when op in @normalization_with_stats do
{parent_id, {cache, op_counts}} = to_predict_fun(parent, cache_and_counts, mode)
name = name_fn.(op, op_counts)
op_counts = Map.update(op_counts, op, 1, fn x -> x + 1 end)
training? = mode == :train
norm_opts = [
epsilon: epsilon,
channel_index: channel_index,
momentum: momentum,
training?: training?
]
%{frozen: g_frz} = layer_params["gamma"]
%{frozen: b_frz} = layer_params["beta"]
%{frozen: mean_frz} = layer_params["mean"]
%{frozen: var_frz} = layer_params["var"]
fun = fn params, inputs, state, cache, result_cache ->
{inputs, {state, result_cache}} =
call_cache(parent_id, params, inputs, state, cache, result_cache)
g = get_param(params, name, "gamma", g_frz, compute)
b = get_param(params, name, "beta", b_frz, compute)
mean = get_param(params, name, "mean", mean_frz, compute)
var = get_param(params, name, "var", var_frz, compute)
case mode do
:train ->
{out, ra_mean, ra_var} =
inputs
|> safe_as_type(compute)
|> apply_hooks(:pre_forward, :train, hooks)
|> then(&apply(Axon.Layers, op, [&1, g, b, mean, var, norm_opts]))
|> then(fn {y, m, v} -> {safe_as_type(y, output), m, v} end)
|> apply_hooks(:forward, :train, hooks)
|> apply_hooks(:backward, :train, hooks)
res = safe_as_type(out, output)
state = Map.put(state, name, %{"mean" => ra_mean, "var" => ra_var})
{res, {state, result_cache}}
:inference ->
res =
inputs
|> safe_as_type(compute)
|> apply_hooks(:pre_forward, :inference, hooks)
|> then(&apply(Axon.Layers, op, [&1, g, b, mean, var, norm_opts]))
|> safe_as_type(output)
|> apply_hooks(:forward, :inference, hooks)
|> apply_hooks(:backward, :inference, hooks)
{res, {state, result_cache}}
end
end
{id, {Map.put(cache, id, fun), op_counts}}
end
@normalization_layers [:layer_norm, :group_norm]
defp recur_predict_fun(
%Axon{
id: id,
name: name_fn,
op: op,
parent: [parent],
opts: opts,
params: layer_params,
policy: %{compute: compute, output: output},
hooks: hooks
},
cache_and_counts,
mode
)
when op in @normalization_layers do
{parent_id, {cache, op_counts}} = to_predict_fun(parent, cache_and_counts, mode)
name = name_fn.(op, op_counts)
op_counts = Map.update(op_counts, op, 1, fn x -> x + 1 end)
%{frozen: g_frz} = layer_params["gamma"]
%{frozen: b_frz} = layer_params["beta"]
fun = fn params, inputs, state, cache, result_cache ->
{inputs, {state, result_cache}} =
call_cache(parent_id, params, inputs, state, cache, result_cache)
g = get_param(params, name, "gamma", g_frz, compute)
b = get_param(params, name, "beta", b_frz, compute)
res =
inputs
|> safe_as_type(compute)
|> apply_hooks(:pre_forward, mode, hooks)
|> then(&apply(Axon.Layers, op, [&1, g, b, opts]))
|> safe_as_type(output)
|> apply_hooks(:forward, mode, hooks)
|> apply_hooks(:backward, mode, hooks)
{res, {state, result_cache}}
end
{id, {Map.put(cache, id, fun), op_counts}}
end
## Recurrent Layers
@recurrent_layers [:gru, :lstm, :conv_lstm]
defp recur_predict_fun(
%Axon{
id: id,
name: name_fn,
op: op,
parent: parents,
params: layer_params,
policy: %{compute: compute, output: output},
opts: opts,
hooks: hooks
},
cache_and_counts,
mode
)
when op in @recurrent_layers do
{[input_id, hidden_state_id], {cache, op_counts}} =
Enum.map_reduce(parents, cache_and_counts, &to_predict_fun(&1, &2, mode))
num_bias = if op == :conv_lstm, do: 1, else: 4
{activation, opts} = Keyword.pop(opts, :activation)
{gate, opts} = Keyword.pop(opts, :gate)
{use_bias, opts} = Keyword.pop(opts, :use_bias)
{unroll, conv_opts} = Keyword.pop(opts, :unroll)
name = name_fn.(op, op_counts)
op_counts = Map.update(op_counts, op, 1, fn x -> x + 1 end)
input_kernel = layer_params["input_kernel"]
hidden_kernel = layer_params["hidden_kernel"]
bias =
if use_bias,
do: layer_params["bias"],
else: List.to_tuple(List.duplicate(%{frozen: false}, num_bias))
input_kernel_frozen =
input_kernel
|> Tuple.to_list()
|> Enum.map(fn %{frozen: frz} -> frz end)
|> List.to_tuple()
hidden_kernel_frozen =
hidden_kernel
|> Tuple.to_list()
|> Enum.map(fn %{frozen: frz} -> frz end)
|> List.to_tuple()
bias_frozen =
bias
|> Tuple.to_list()
|> Enum.map(fn %{frozen: frz} -> frz end)
|> List.to_tuple()
fun = fn params, inputs, state, cache, result_cache ->
{input, {state, result_cache}} =
call_cache(input_id, params, inputs, state, cache, result_cache)
{hidden_state, {state, result_cache}} =
call_cache(hidden_state_id, params, inputs, state, cache, result_cache)
input_kernel = get_param(params, name, "input_kernel", input_kernel_frozen, compute)
hidden_kernel = get_param(params, name, "hidden_kernel", hidden_kernel_frozen, compute)
bias =
if use_bias do
get_param(params, name, "bias", bias_frozen, compute)
else
List.duplicate(Nx.tensor(0, type: compute), num_bias)
|> List.to_tuple()
end
input = safe_as_type(input, compute)
carry = deep_new(hidden_state, &safe_as_type(&1, compute))
# TODO: Should these be hooked together? Not at all?
{input, carry} = apply_hooks({input, carry}, :pre_forward, mode, hooks)
cell_fn = get_cell_fn(op, gate, activation, conv_opts)
{carry, out} =
case unroll do
:static ->
Axon.Recurrent.static_unroll(
cell_fn,
input,
carry,
input_kernel,
hidden_kernel,
bias
)
:dynamic ->
Axon.Recurrent.dynamic_unroll(
cell_fn,
input,
carry,
input_kernel,
hidden_kernel,
bias
)
end
res = {deep_new(carry, &safe_as_type(&1, output)), safe_as_type(out, output)}
res = apply_hooks(res, :forward, mode, hooks)
res = apply_hooks(res, :backward, mode, hooks)
{res, {state, result_cache}}
end
{id, {Map.put(cache, id, fun), op_counts}}
end
## Element-wise layers
@element_wise_layers [:add, :subtract, :multiply]
defp recur_predict_fun(
%Axon{
id: id,
op: op,
parent: parents,
policy: %{compute: compute, output: output},
hooks: hooks
},
cache_and_counts,
mode
)
when op in @element_wise_layers do
{parent_ids, {cache, op_counts}} =
Enum.map_reduce(
parents,
cache_and_counts,
&to_predict_fun(&1, &2, mode)
)
op_counts = Map.update(op_counts, op, 1, fn x -> x + 1 end)
fun = fn params, inputs, state, cache, result_cache ->
{[expr | exprs], {state, result_cache}} =
Enum.map_reduce(parent_ids, {state, result_cache}, fn parent_id, {state, result_cache} ->
call_cache(parent_id, params, inputs, state, cache, result_cache)
end)
[expr | exprs] =
[expr | exprs]
|> List.to_tuple()
|> apply_hooks(:pre_forward, mode, hooks)
|> Tuple.to_list()
res =
Enum.reduce(exprs, expr, fn next_expr, acc ->
input = safe_as_type(next_expr, compute)
acc = safe_as_type(acc, compute)
safe_as_type(apply(Nx, op, [acc, input]), output)
end)
res = apply_hooks(res, :forward, mode, hooks)
res = apply_hooks(res, :backward, mode, hooks)
{res, {state, result_cache}}
end
{id, {Map.put(cache, id, fun), op_counts}}
end
## Shape Layers
@shape_layers [:resize, :flatten, :reshape, :transpose, :pad]
defp recur_predict_fun(
%Axon{
id: id,
op: op,
parent: [parent],
policy: %{compute: compute, output: output},
hooks: hooks,
opts: opts
},
cache_and_counts,
mode
)
when op in @shape_layers do
{parent_id, {cache, op_counts}} = to_predict_fun(parent, cache_and_counts, mode)
op_counts = Map.update(op_counts, op, 1, fn x -> x + 1 end)
opts =
case op do
:resize ->
{shape, opts} = Keyword.pop(opts, :resize_shape)
Keyword.put(opts, :shape, shape)
:reshape ->
{shape, opts} = Keyword.pop(opts, :reshape_shape)
Keyword.put(opts, :shape, shape)
_ ->
opts
end
fun = fn params, inputs, state, cache, result_cache ->
{res, {state, result_cache}} =
call_cache(parent_id, params, inputs, state, cache, result_cache)
res =
res
|> safe_as_type(compute)
|> apply_hooks(:pre_forward, mode, hooks)
|> then(&apply(Axon.Layers, op, [&1, opts]))
|> apply_hooks(:forward, mode, hooks)
|> apply_hooks(:backward, mode, hooks)
|> safe_as_type(output)
{res, {state, result_cache}}
end
{id, {Map.put(cache, id, fun), op_counts}}
end
defp recur_predict_fun(
%Axon{
id: id,
op: :concatenate,
parent: parents,
opts: [axis: axis],
policy: %{compute: compute, output: output},
hooks: hooks
},
cache_and_counts,
mode
) do
{parent_ids, {cache, op_counts}} =
Enum.map_reduce(
parents,
cache_and_counts,
&to_predict_fun(&1, &2, mode)
)
op_counts = Map.update(op_counts, :concatenate, 1, fn x -> x + 1 end)
fun = fn params, inputs, state, cache, result_cache ->
{exprs, {state, result_cache}} =
Enum.map_reduce(parent_ids, {state, result_cache}, fn parent_id, {state, result_cache} ->
call_cache(parent_id, params, inputs, state, cache, result_cache)
end)
inps = Enum.map(exprs, &safe_as_type(&1, compute))
inps =
inps
|> List.to_tuple()
|> apply_hooks(:pre_forward, mode, hooks)
|> Tuple.to_list()
res =
inps
|> Nx.concatenate(axis: axis)
|> safe_as_type(output)
|> apply_hooks(:forward, mode, hooks)
|> apply_hooks(:backward, mode, hooks)
{res, {state, result_cache}}
end
{id, {Map.put(cache, id, fun), op_counts}}
end
defp recur_predict_fun(
%Axon{
id: id,
op: :cond,
parent: parents,
opts: [cond: cond_fn],
policy: %{compute: compute, output: output},
hooks: hooks
},
cache_and_counts,
mode
) do
{parent_ids, {cache, op_counts}} =
Enum.map_reduce(
parents,
cache_and_counts,
&to_predict_fun(&1, &2, mode)
)
op_counts = Map.update(op_counts, :cond, 1, fn x -> x + 1 end)
fun = fn params, inputs, state, cache, result_cache ->
{exprs, {state, result_cache}} =
Enum.map_reduce(parent_ids, {state, result_cache}, fn parent_id, {state, result_cache} ->
call_cache(parent_id, params, inputs, state, cache, result_cache)
end)
[cond_input_expr, true_expr, false_expr] = exprs
cond_expr = cond_fn.(cond_input_expr)
cond_rank = Nx.rank(cond_expr)
cond_type = Nx.type(cond_expr)
unless cond_rank == 0 and cond_type == {:u, 8} do
raise Axon.CompilerError,
"cond_fn must return a scalar-boolean tensor" <>
" got result with rank #{inspect(cond_rank)} and" <>
" type #{inspect(cond_type)}"
end
{cond_expr, on_true, on_false} =
[cond_expr, true_expr, false_expr]
|> List.to_tuple()
|> apply_hooks(:pre_forward, mode, hooks)
res =
Axon.Layers.cond(
Nx.all(cond_expr),
safe_as_type(on_true, compute),
safe_as_type(on_false, compute)
)
res = safe_as_type(res, output)
res = apply_hooks(res, :forward, mode, hooks)
res = apply_hooks(res, :backward, mode, hooks)
{res, {state, result_cache}}
end
{id, {Map.put(cache, id, fun), op_counts}}
end
## Special Layers
defp recur_predict_fun(
%Axon{
id: id,
op: :nx,
parent: [parent],
opts: [fun: nx_fun],
policy: %{compute: compute, output: output},
hooks: hooks
},
cache_and_counts,
mode
) do
{parent_id, {cache, op_counts}} = to_predict_fun(parent, cache_and_counts, mode)
op_counts = Map.update(op_counts, :nx, 1, fn x -> x + 1 end)
fun = fn params, inputs, state, cache, result_cache ->
{res, {state, result_cache}} =
call_cache(parent_id, params, inputs, state, cache, result_cache)
res =
res
|> safe_as_type(compute)
|> apply_hooks(:pre_forward, mode, hooks)
|> nx_fun.()
|> safe_as_type(output)
|> apply_hooks(:forward, mode, hooks)
|> apply_hooks(:backward, mode, hooks)
{res, {state, result_cache}}
end
{id, {Map.put(cache, id, fun), op_counts}}
end
defp recur_predict_fun(
%Axon{id: id, op: :constant, opts: [value: tensor], policy: %{output: output}},
{cache, op_counts},
_
) do
fun = fn _params, _inputs, state, _cache, result_cache ->
out = safe_as_type(tensor, output)
{out, {state, result_cache}}
end
op_counts = Map.update(op_counts, :constant, 1, fn x -> x + 1 end)
{id, {Map.put(cache, id, fun), op_counts}}
end
defp recur_predict_fun(
%Axon{id: id, op: :input, output_shape: shape, hooks: hooks, name: name_fn},
{cache, op_counts},
mode
) do
name = name_fn.(:input, op_counts)
op_counts = Map.update(op_counts, :input, 1, fn x -> x + 1 end)
fun = fn _params, inputs, state, _cache, result_cache ->
res =
case inputs do
%Nx.Tensor{} = inputs ->
inputs
%{} = inputs ->
inputs[name]
_ ->
raise ArgumentError,
"invalid input given to model, expected input" <>
" expected input to be a tensor or a map" <>
" corresponding to correct input names"
end
unless res do
raise ArgumentError,
"unable to find input #{name} for model given to predict," <>
" you must provide an input tensor for every input" <>
" specified in the graph"
end
unless Axon.Shape.compatible?(Nx.shape(res), shape) do
raise ArgumentError,
"invalid input shape given to model, expected input" <>
" with shape #{inspect(shape)}, but got input with" <>
" shape #{inspect(Nx.shape(res))}"
end
res =
res
|> apply_hooks(:forward, mode, hooks)
|> apply_hooks(:backward, mode, hooks)
{res, {state, result_cache}}
end
{id, {Map.put(cache, id, fun), op_counts}}
end
## Helpers
defp maybe_freeze(param, true), do: Nx.Defn.Kernel.stop_grad(param)
defp maybe_freeze(param, false), do: param
defp apply_hooks(res, event, mode, hooks) do
hooks
|> Enum.reverse()
|> Enum.reduce(res, fn {on_event, on_mode, hook_fn}, expr ->
event? = on_event == event or on_event == :all
mode? = on_mode == mode or on_mode == :both or mode == nil
if event? and mode? do
if on_event == :backward do
Nx.Defn.Kernel.custom_grad(expr, fn _ans, g ->
hooked_g = Nx.Defn.Kernel.hook(g, hook_fn)
[{expr, hooked_g}]
end)
else
Nx.Defn.Kernel.hook(expr, hook_fn)
end
else
expr
end
end)
end
defp get_cell_fn(op, gate, activation, conv_opts) do
case op do
:lstm ->
gate_fn = &apply(Axon.Activations, gate, [&1])
activation_fn = &apply(Axon.Activations, activation, [&1])
&Axon.Recurrent.lstm_cell(&1, &2, &3, &4, &5, gate_fn, activation_fn)
:gru ->
gate_fn = &apply(Axon.Activations, gate, [&1])
activation_fn = &apply(Axon.Activations, activation, [&1])
&Axon.Recurrent.gru_cell(&1, &2, &3, &4, &5, gate_fn, activation_fn)
:conv_lstm ->
&Axon.Recurrent.conv_lstm_cell(&1, &2, &3, &4, &5, conv_opts)
end
end
defp get_param(params, layer_name, param_name, frozen?, type) do
case params[layer_name][param_name] do
tuple when is_tuple(tuple) ->
tuple
|> Tuple.to_list()
|> Enum.zip_with(Tuple.to_list(frozen?), &maybe_freeze/2)
|> List.to_tuple()
|> safe_as_type(type)
param ->
param
|> maybe_freeze(frozen?)
|> safe_as_type(type)
end
end
defp safe_as_type(container_or_tensor, type) do
case container_or_tensor do
%Nx.Tensor{} = tensor ->
Nx.as_type(tensor, type)
container ->
deep_new(container, &Nx.as_type(&1, type))
end
end
end
|
lib/axon/compiler.ex
| 0.645455 | 0.413862 |
compiler.ex
|
starcoder
|
defmodule ExDiceRoller.Compilers.Math do
@moduledoc """
Handles compiling expressions using common mathematical operators.
iex> {:ok, tokens} = ExDiceRoller.Tokenizer.tokenize("1+x")
{:ok, [{:int, 1, '1'}, {:basic_operator, 1, '+'}, {:var, 1, 'x'}]}
iex> {:ok, parse_tree} = ExDiceRoller.Parser.parse(tokens)
{:ok, {{:operator, '+'}, 1, {:var, 'x'}}}
iex> fun = ExDiceRoller.Compilers.Math.compile(parse_tree)
iex> fun.([x: 2])
3
iex> fun.([x: 2.4])
3.4
ExDiceRoller uses [infix notation](https://en.wikipedia.org/wiki/Infix_notation)
when working with mathematical operators. Below is the list of operators
currently supported by ExDiceRoller:
* `+`: adds the values on both sides of the expression
* `-`: subtracts the value on the right from the value on the left
* `*`: multiplies the values on both sides of the expression
* `/`: divides, with the left value as the dividend, the right the divisor
* `%`: [modulo](https://en.wikipedia.org/wiki/Modulo_operation), with the
left the dividend, the right the divisor
* `^`: exponentiation, with the left the base, the right the exponent
"""
@behaviour ExDiceRoller.Compiler
alias ExDiceRoller.{Compiler, ListComprehension}
@err_name "math operators"
@operators [
{'+', &Kernel.+/2, "add"},
{'-', &Kernel.-/2, "sub"},
{'*', &Kernel.*/2, "mul"},
{'/', &__MODULE__.divide/2, "div"},
{'%', &__MODULE__.modulo/2, "mod"},
{'^', &:math.pow/2, "exp"}
]
@doc "Function used for modulo calculations. Only accepts integer values."
@spec modulo(integer, integer) :: integer
def modulo(_, 0), do: raise(ArgumentError, "the divisor cannot be 0")
def modulo(_, 0.0), do: raise(ArgumentError, "the divisor cannot be 0")
def modulo(l, r) when is_integer(l) and is_integer(r) do
rem(Compiler.round_val(l), Compiler.round_val(r))
end
def modulo(_, _), do: raise(ArgumentError, "modulo operator only accepts integer values")
@doc "Function used for division calculations."
@spec divide(Compiler.calculated_val(), Compiler.calculated_val()) :: float
def divide(_, 0), do: raise(ArgumentError, "the divisor cannot be 0")
def divide(_, 0.0), do: raise(ArgumentError, "the divisor cannot be 0")
def divide(l, r) when is_number(l) and is_number(r), do: l / r
@impl true
def compile({{:operator, op}, left_expr, right_expr}) do
compile_op(op, Compiler.delegate(left_expr), Compiler.delegate(right_expr))
end
@spec compile_op(charlist, Compiler.compiled_val(), Compiler.compiled_val()) ::
Compiler.compiled_val()
for {char, _, name} <- @operators do
defp compile_op(unquote(char), l, r), do: unquote(:"compile_#{name}")(l, r)
end
for {_, fun, name} <- @operators do
@spec unquote(:"compile_#{name}")(Compiler.compiled_val(), Compiler.compiled_val()) ::
Compiler.compiled_val()
defp unquote(:"compile_#{name}")(l, r) when is_function(l) and is_function(r) do
fn args ->
ListComprehension.apply(l.(args), r.(args), unquote(fun), @err_name, &op/3)
end
end
defp unquote(:"compile_#{name}")(l, r) when is_function(l) do
fn args ->
ListComprehension.apply(l.(args), r, unquote(fun), @err_name, &op/3)
end
end
defp unquote(:"compile_#{name}")(l, r) when is_function(r) do
fn args ->
ListComprehension.apply(l, r.(args), unquote(fun), @err_name, &op/3)
end
end
defp unquote(:"compile_#{name}")(l, r) do
fn _ ->
ListComprehension.apply(l, r, unquote(fun), @err_name, &op/3)
end
end
end
@spec op(Compiler.calculated_val(), Compiler.calculated_val(), function) ::
Compiler.calculated_val()
defp op(l, r, fun) do
ListComprehension.apply(l, r, [], @err_name, fn l, r, _ -> fun.(l, r) end)
end
end
|
lib/compilers/math.ex
| 0.7696 | 0.697042 |
math.ex
|
starcoder
|
defmodule AOC.Day3.CrossedWires do
@moduledoc false
@type point :: {integer, integer}
@type segment :: [point, ...]
@type wire :: list(point)
def part1() do
[w1, w2] =
stream_input("./resources/day3_part1_input.txt")
|> process_input()
compute_part1(w1, w2)
end
def part2() do
[w1, w2] =
stream_input("./resources/day3_part1_input.txt")
|> process_input()
compute_part2(w1, w2)
end
def stream_input(path) do
File.stream!(path)
|> Enum.map(fn line ->
line
|> String.trim()
|> String.split(",")
end)
end
def process_input(lines) do
Enum.map(lines, fn line ->
line
|> process_wire_input()
|> points()
end)
end
@spec process_wire_input(list(String.t())) :: list({atom, integer})
def process_wire_input(wire) do
wire
|> Enum.map(fn vector ->
{direction, magnitude} = String.split_at(vector, 1)
direction = String.to_atom(direction)
magnitude = String.to_integer(magnitude)
{direction, magnitude}
end)
end
@spec points(list({atom, integer})) :: list(point)
def points(path) do
path
|> Enum.reduce([{0, 0}], fn {direction, quantity}, acc ->
{x, y} = hd(acc)
p =
cond do
:R == direction -> {x + quantity, y}
:L == direction -> {x - quantity, y}
:U == direction -> {x, y + quantity}
:D == direction -> {x, y - quantity}
end
[p | acc]
end)
|> Enum.reverse()
end
@spec compute_part1(wire, wire) :: non_neg_integer
def compute_part1(w1, w2) do
w1_segments = Enum.chunk_every(w1, 2, 1, :discard)
w2_segments = Enum.chunk_every(w2, 2, 1, :discard)
find_intersections(w1_segments, w2_segments)
|> Enum.min_by(&manhattan_distance(&1, {0, 0}))
|> manhattan_distance({0, 0})
end
@spec compute_part2(wire, wire) :: float
def compute_part2(w1, w2) do
w1_segments = Enum.chunk_every(w1, 2, 1, :discard)
w2_segments = Enum.chunk_every(w2, 2, 1, :discard)
intersections = find_intersections(w1_segments, w2_segments)
w1_sums =
Enum.map(intersections, fn intersection ->
sum_segments_until(w1_segments, intersection)
end)
w2_sums =
Enum.map(intersections, fn intersection ->
sum_segments_until(w2_segments, intersection)
end)
Enum.zip(w1_sums, w2_sums)
|> Enum.map(fn {a, b} -> a + b end)
|> Enum.min()
end
@doc """
Adapted from
https://stackoverflow.com/a/24392281/3141194
http://paulbourke.net/geometry/pointlineplane/javascript.txt
"""
@spec segments_intersect(segment, segment) :: point | :none
def segments_intersect([start1, end1], [start2, end2]) do
{a, b} = start1
{c, d} = end1
{p, q} = start2
{r, s} = end2
determinate = (c - a) * (s - q) - (r - p) * (d - b)
with true <- determinate != 0,
lambda <- ((s - q) * (r - a) + (p - r) * (s - b)) / determinate,
gamma <- ((b - d) * (r - a) + (c - a) * (s - b)) / determinate,
true <- 0 < lambda && lambda < 1 and (0 < gamma && gamma < 1) do
x = a + lambda * (c - a)
y = b + lambda * (d - b)
{x, y}
else
_ -> :none
end
end
@spec find_intersections(list(segment), list(segment)) :: list(point)
def find_intersections(w1_segments, w2_segments) do
Enum.reduce(w1_segments, [], fn segment1, acc ->
intersections =
Enum.reduce(w2_segments, [], fn segment2, acc ->
intersections = segments_intersect(segment1, segment2)
if :none != intersections do
[intersections | acc]
else
acc
end
end)
intersections ++ acc
end)
end
@spec point_on_segment?(segment, point) :: boolean
def point_on_segment?(segment, point) do
[start, stop] = segment
manhattan_distance(start, stop) ==
manhattan_distance(start, point) + manhattan_distance(stop, point)
end
@spec sum_segments_until(list(segment), point) :: non_neg_integer
def sum_segments_until(segments, point) do
segments
|> Enum.reduce_while(0, fn [start, stop], acc ->
if point_on_segment?([start, stop], point) do
{:halt, acc + manhattan_distance(start, point)}
else
{:cont, acc + manhattan_distance(start, stop)}
end
end)
end
@spec manhattan_distance({integer, integer}, {integer, integer}) :: non_neg_integer
def manhattan_distance({x1, y1}, {x2, y2}) do
abs(y1 - y2) + abs(x1 - x2)
end
end
|
aoc-2019/lib/aoc/day3/crossed_wires.ex
| 0.863435 | 0.521532 |
crossed_wires.ex
|
starcoder
|
defmodule RDF.Graph do
@moduledoc """
A set of RDF triples with an optional name.
`RDF.Graph` implements:
- Elixir's `Access` behaviour
- Elixir's `Enumerable` protocol
- Elixir's `Inspect` protocol
- the `RDF.Data` protocol
"""
defstruct name: nil, descriptions: %{}, prefixes: nil, base_iri: nil
@behaviour Access
alias RDF.{Description, IRI, PrefixMap, PropertyMap}
alias RDF.Graph.Builder
alias RDF.Star.Statement
@type graph_description :: %{Statement.subject() => Description.t()}
@type t :: %__MODULE__{
name: IRI.t() | nil,
descriptions: graph_description,
prefixes: PrefixMap.t() | nil,
base_iri: IRI.t() | nil
}
@type input ::
Statement.coercible()
| {
Statement.coercible_subject(),
Description.input()
}
| Description.t()
| t
| %{
Statement.coercible_subject() => %{
Statement.coercible_predicate() =>
Statement.coercible_object() | [Statement.coercible_object()]
}
}
| list(input)
@type update_description_fun :: (Description.t() -> Description.t())
@type get_and_update_description_fun :: (Description.t() -> {Description.t(), input} | :pop)
@doc """
Creates an empty unnamed `RDF.Graph`.
"""
@spec new :: t
def new, do: %__MODULE__{}
@doc """
Creates an `RDF.Graph`.
If a keyword list with options is given an empty graph is created.
Otherwise an unnamed graph initialized with the given data is created.
See `new/2` for available arguments and the different ways to provide data.
## Examples
RDF.Graph.new(name: EX.GraphName)
RDF.Graph.new(init: {EX.S, EX.p, EX.O})
RDF.Graph.new({EX.S, EX.p, EX.O})
"""
@spec new(input | keyword) :: t
def new(data_or_opts)
def new(data_or_opts) when is_list(data_or_opts) and length(data_or_opts) != 0 do
if Keyword.keyword?(data_or_opts) do
{data, options} = Keyword.pop(data_or_opts, :init)
new(data, options)
else
new(data_or_opts, [])
end
end
def new(data), do: new(data, [])
@doc """
Creates an `RDF.Graph` initialized with data.
The initial RDF triples can be provided
- as a single statement tuple
- a nested subject-predicate-object map
- a `RDF.Description`
- a `RDF.Graph`
- a `RDF.Dataset`
- or a list with any combination of the former
Available options:
- `name`: the name of the graph to be created
- `prefixes`: some prefix mappings which should be stored alongside the graph
and will be used for example when serializing in a format with prefix support
- `base_iri`: a base IRI which should be stored alongside the graph
and will be used for example when serializing in a format with base IRI support
- `init`: some data with which the graph should be initialized; the data can be
provided in any form accepted by `add/3` and above that also with a function returning
the initialization data in any of these forms
## Examples
RDF.Graph.new({EX.S, EX.p, EX.O})
RDF.Graph.new({EX.S, EX.p, EX.O}, name: EX.GraphName)
RDF.Graph.new({EX.S, EX.p, [EX.O1, EX.O2]})
RDF.Graph.new([{EX.S1, EX.p1, EX.O1}, {EX.S2, EX.p2, EX.O2}])
RDF.Graph.new(RDF.Description.new(EX.S, EX.P, EX.O))
RDF.Graph.new([graph, description, triple])
RDF.Graph.new({EX.S, EX.p, EX.O}, name: EX.GraphName, base_iri: EX.base)
"""
@spec new(input, keyword) :: t
def new(data, opts)
def new(%__MODULE__{} = graph, opts) do
%__MODULE__{graph | name: opts |> Keyword.get(:name) |> RDF.coerce_graph_name()}
|> add_prefixes(Keyword.get(opts, :prefixes))
|> set_base_iri(Keyword.get(opts, :base_iri))
end
def new(data, opts) do
new()
|> new(opts)
|> init(data, opts)
end
defp init(graph, nil, _), do: graph
defp init(graph, fun, opts) when is_function(fun), do: add(graph, fun.(), opts)
defp init(graph, data, opts), do: add(graph, data, opts)
@doc """
Builds an `RDF.Graph` from a description of its content in a graph DSL.
All available opts of `new/2` are also supported here.
For a description of the DSL see [this guide](https://rdf-elixir.dev/rdf-ex/description-and-graph-dsl.html).
"""
defmacro build(opts \\ [], do: block) do
Builder.build(block, opts)
end
@doc """
Removes all triples from `graph`.
This function is useful for getting an empty graph based on the settings of
another graph, as this function keeps graph name, base IRI and default prefixes
as they are and just removes the triples.
"""
@spec clear(t) :: t
def clear(%__MODULE__{} = graph) do
%__MODULE__{graph | descriptions: %{}}
end
@doc """
Returns the graph name IRI of `graph`.
"""
@spec name(t) :: Statement.graph_name()
def name(%__MODULE__{} = graph), do: graph.name
@doc """
Changes the graph name of `graph`.
"""
@spec change_name(t, Statement.coercible_graph_name()) :: t
def change_name(%__MODULE__{} = graph, new_name) do
%__MODULE__{graph | name: RDF.coerce_graph_name(new_name)}
end
@doc """
Adds triples to a `RDF.Graph`.
The `input` can be provided
- as a single statement tuple
- a nested subject-predicate-object map
- a `RDF.Description`
- a `RDF.Graph`
- or a list with any combination of the former
When the statements to be added are given as another `RDF.Graph`,
the graph name must not match graph name of the graph to which the statements
are added. As opposed to that, `RDF.Data.merge/2` will produce a `RDF.Dataset`
containing both graphs.
Also when the statements to be added are given as another `RDF.Graph`, the
prefixes of this graph will be added. In case of conflicting prefix mappings
the original prefix from `graph` will be kept.
RDF-star annotations to be added to all of the given statements can be specified with
the `:add_annotations`, `:put_annotations` or `:put_annotation_properties` keyword
options. They have different addition semantics similar to the `add_annotations/3`,
`put_annotations/3` and `put_annotation_properties/3` counterparts.
"""
@spec add(t, input, keyword) :: t
def add(graph, input, opts \\ [])
def add(%__MODULE__{descriptions: descriptions} = graph, %Description{} = description, opts) do
if Description.empty?(description) do
graph
else
%__MODULE__{
graph
| descriptions:
Map.update(
descriptions,
description.subject,
description,
&Description.add(&1, description, opts)
)
}
|> RDF.Star.Graph.handle_addition_annotations(description, opts)
end
end
def add(graph, %__MODULE__{descriptions: descriptions, prefixes: prefixes}, opts) do
# normalize the annotations here, so we don't have to do this repeatedly in do_add/4
opts = RDF.Star.Graph.normalize_annotation_opts(opts)
graph =
Enum.reduce(descriptions, graph, fn {_, description}, graph ->
add(graph, description, opts)
end)
if prefixes do
add_prefixes(graph, prefixes, :ignore)
else
graph
end
end
def add(graph, %RDF.Dataset{} = dataset, opts) do
# normalize the annotations here, so we don't have to do this repeatedly
opts = RDF.Star.Graph.normalize_annotation_opts(opts)
dataset
|> RDF.Dataset.graphs()
|> Enum.reduce(graph, &add(&2, &1, opts))
end
def add(%__MODULE__{} = graph, {subject, predications}, opts),
do: add(graph, Description.new(subject, Keyword.put(opts, :init, predications)), opts)
def add(%__MODULE__{} = graph, {subject, _, _} = triple, opts),
do: add(graph, Description.new(subject, Keyword.put(opts, :init, triple)), opts)
def add(graph, {subject, predicate, object, _}, opts),
do: add(graph, {subject, predicate, object}, opts)
def add(graph, input, opts) when is_list(input) or (is_map(input) and not is_struct(input)) do
Enum.reduce(input, graph, &add(&2, &1, opts))
end
@doc """
Adds statements to a `RDF.Graph` overwriting existing statements with the subjects given in the `input` data.
When the statements to be added are given as another `RDF.Graph`, the prefixes
of this graph will be added. In case of conflicting prefix mappings the
original prefix from `graph` will be kept.
RDF-star annotations to be added to all of the given statements can be specified with
the `:add_annotations`, `:put_annotations` or `:put_annotation_properties` keyword
options. They have different addition semantics similar to the `add_annotations/3`,
`put_annotations/3` and `put_annotation_properties/3` counterparts.
What should happen with the annotations of statements which got deleted during the
overwrite, can be controlled with these keyword options:
- `:delete_annotations_on_deleted`: deletes all or some annotations of the deleted
statements (see `delete_annotations/3` on possible values)
- `:add_annotations_on_deleted`, `:put_annotations_on_deleted`,
`:put_annotation_properties_on_deleted`: add annotations about the deleted
statements with the respective addition semantics similar to the keyword
options with the `_on_deleted` suffix mentioned above
## Examples
iex> RDF.Graph.new([{EX.S1, EX.P1, EX.O1}, {EX.S2, EX.P2, EX.O2}])
...> |> RDF.Graph.put([{EX.S1, EX.P3, EX.O3}])
RDF.Graph.new([{EX.S1, EX.P3, EX.O3}, {EX.S2, EX.P2, EX.O2}])
"""
@spec put(t, input, keyword) :: t
def put(graph, input, opts \\ [])
def put(%__MODULE__{} = graph, %__MODULE__{} = input, opts) do
new_graph = %__MODULE__{
graph
| descriptions:
Enum.reduce(
input.descriptions,
graph.descriptions,
fn {subject, description}, descriptions ->
Map.put(descriptions, subject, description)
end
)
}
if input.prefixes do
add_prefixes(new_graph, input.prefixes, :ignore)
else
new_graph
end
|> RDF.Star.Graph.handle_overwrite_annotations(graph, input, opts)
|> RDF.Star.Graph.handle_addition_annotations(input, opts)
end
def put(%__MODULE__{}, %RDF.Dataset{}, _opts) do
raise ArgumentError, "RDF.Graph.put/3 does not support RDF.Datasets"
end
def put(%__MODULE__{} = graph, input, opts) do
put(graph, new() |> add(input, RDF.Star.Graph.clear_annotation_opts(opts)), opts)
end
@doc """
Adds statements to a `RDF.Graph` and overwrites all existing statements with the same subject-predicate combinations given in the `input` data.
When the statements to be added are given as another `RDF.Graph`, the prefixes
of this graph will be added. In case of conflicting prefix mappings the
original prefix from `graph` will be kept.
RDF-star annotations to be added to all of the given statements can be specified with
the `:add_annotations`, `:put_annotations` or `:put_annotation_properties` keyword
options. They have different addition semantics similar to the `add_annotations/3`,
`put_annotations/3` and `put_annotation_properties/3` counterparts.
What should happen with the annotations of statements which got deleted during the
overwrite, can be controlled with these keyword options:
- `:delete_annotations_on_deleted`: deletes all or some annotations of the deleted
statements (see `delete_annotations/3` on possible values)
- `:add_annotations_on_deleted`, `:put_annotations_on_deleted`,
`:put_annotation_properties_on_deleted`: add annotations about the deleted
statements with the respective addition semantics similar to the keyword
options with the `_on_deleted` suffix mentioned above
## Examples
iex> RDF.Graph.new([{EX.S1, EX.P1, EX.O1}, {EX.S2, EX.P2, EX.O2}])
...> |> RDF.Graph.put_properties([{EX.S1, EX.P2, EX.O3}, {EX.S2, EX.P2, EX.O3}])
RDF.Graph.new([{EX.S1, EX.P1, EX.O1}, {EX.S1, EX.P2, EX.O3}, {EX.S2, EX.P2, EX.O3}])
"""
@spec put_properties(t, input, keyword) :: t
def put_properties(graph, input, opts \\ [])
def put_properties(%__MODULE__{} = graph, %__MODULE__{} = input, opts) do
new_graph = %__MODULE__{
graph
| descriptions:
Enum.reduce(
input.descriptions,
graph.descriptions,
fn {subject, description}, descriptions ->
Map.update(
descriptions,
subject,
description,
fn current -> Description.put(current, description, opts) end
)
end
)
}
if input.prefixes do
add_prefixes(new_graph, input.prefixes, :ignore)
else
new_graph
end
|> RDF.Star.Graph.handle_overwrite_annotations(graph, input, opts)
|> RDF.Star.Graph.handle_addition_annotations(input, opts)
end
def put_properties(%__MODULE__{}, %RDF.Dataset{}, _opts) do
raise ArgumentError, "RDF.Graph.put_properties/3 does not support RDF.Datasets"
end
def put_properties(%__MODULE__{} = graph, input, opts) do
put_properties(graph, new() |> add(input, RDF.Star.Graph.clear_annotation_opts(opts)), opts)
end
@doc """
Deletes statements from a `RDF.Graph`.
When the statements to be deleted are given as another `RDF.Graph`,
the graph name must not match graph name of the graph from which the statements
are deleted. If you want to delete only statements with matching graph names, you can
use `RDF.Data.delete/2`.
The optional `:delete_annotations` keyword option allows to set which of
the RDF-star annotations of the deleted statements should be deleted.
Any of the possible values of `delete_annotations/3` can be provided here.
By default no annotations of the deleted statements will be removed.
Alternatively, the `:add_annotations`, `:put_annotations` or `:put_annotation_properties`
keyword options can be used to add annotations about the deleted statements
with the addition semantics similar to the respective `add_annotations/3`,
`put_annotations/3` and `put_annotation_properties/3` counterparts.
"""
@spec delete(t, input, keyword) :: t
def delete(graph, input, opts \\ [])
def delete(%__MODULE__{} = graph, {subject, _, _} = triple, opts),
do: do_delete(graph, RDF.coerce_subject(subject), triple, opts)
def delete(%__MODULE__{} = graph, {subject, predications}, opts),
do: do_delete(graph, RDF.coerce_subject(subject), predications, opts)
def delete(graph, {subject, predicate, object, _}, opts),
do: delete(graph, {subject, predicate, object}, opts)
def delete(%__MODULE__{} = graph, %Description{} = description, opts),
do: do_delete(graph, description.subject, description, opts)
def delete(%__MODULE__{} = graph, %__MODULE__{} = input, opts) do
Enum.reduce(input.descriptions, graph, fn {_, description}, graph ->
delete(graph, description, opts)
end)
end
def delete(%__MODULE__{} = graph, input, opts)
when is_list(input) or (is_map(input) and not is_struct(input)) do
Enum.reduce(input, graph, &delete(&2, &1, opts))
end
defp do_delete(%__MODULE__{descriptions: descriptions} = graph, subject, input, opts) do
if description = descriptions[subject] do
new_description = Description.delete(description, input, opts)
%__MODULE__{
graph
| descriptions:
if Description.empty?(new_description) do
Map.delete(descriptions, subject)
else
Map.put(descriptions, subject, new_description)
end
}
else
graph
end
|> RDF.Star.Graph.handle_deletion_annotations({subject, input}, opts)
end
@doc """
Deletes all statements with the given `subjects`.
If `subjects` contains subjects that are not in `graph`, they're simply ignored.
The optional `:delete_annotations` keyword option allows to set which of
the RDF-star annotations of the deleted statements should be deleted.
Any of the possible values of `delete_annotations/3` can be provided here.
By default no annotations of the deleted statements will be removed.
Alternatively, the `:add_annotations`, `:put_annotations` or `:put_annotation_properties`
keyword options can be used to add annotations about the deleted statements
with the addition semantics similar to the respective `add_annotations/3`,
`put_annotations/3` and `put_annotation_properties/3` counterparts.
"""
@spec delete_descriptions(
t,
Statement.coercible_subject() | [Statement.coercible_subject()],
keyword
) :: t
def delete_descriptions(graph, subjects, opts \\ [])
def delete_descriptions(%__MODULE__{} = graph, subjects, opts) when is_list(subjects) do
Enum.reduce(subjects, graph, &delete_descriptions(&2, &1, opts))
end
def delete_descriptions(%__MODULE__{} = graph, subject, opts) do
case Map.pop(graph.descriptions, RDF.coerce_subject(subject)) do
{nil, _} ->
graph
{deleted_description, descriptions} ->
%__MODULE__{graph | descriptions: descriptions}
|> RDF.Star.Graph.handle_deletion_annotations(deleted_description, opts)
end
end
defdelegate delete_subjects(graph, subjects), to: __MODULE__, as: :delete_descriptions
defdelegate delete_subjects(graph, subjects, opts), to: __MODULE__, as: :delete_descriptions
@doc """
Adds RDF-star annotations to the given set of statements.
The set of `statements` can be given in any input form (see `add/3`).
The predicate-objects pairs to be added as annotations can be given as a tuple,
a list of tuples or a map.
"""
@spec add_annotations(t, input, Description.input() | nil) :: t
defdelegate add_annotations(graph, statements, annotations), to: RDF.Star.Graph
@doc """
Adds RDF-star annotations to the given set of statements overwriting all existing annotations.
The set of `statements` can be given in any input form (see `add/3`).
The predicate-objects pairs to be added as annotations can be given as a tuple,
a list of tuples or a map.
"""
@spec put_annotations(t, input, Description.input() | nil) :: t
defdelegate put_annotations(graph, statements, annotations), to: RDF.Star.Graph
@doc """
Adds RDF-star annotations to the given set of statements overwriting all existing annotations with the given properties.
The set of `statements` can be given in any input form (see `add/3`).
The predicate-objects pairs to be added as annotations can be given as a tuple,
a list of tuples or a map.
"""
@spec put_annotation_properties(t, input, Description.input() | nil) :: t
defdelegate put_annotation_properties(graph, statements, annotations), to: RDF.Star.Graph
@doc """
Deletes RDF-star annotations of a given set of statements.
The `statements` can be given in any input form (see `add/3`).
If `true` is given as the third argument or is `delete_annotations/2` is used,
all annotations of the given `statements` are deleted.
If a single predicate or list of predicates is given only statements with
these predicates from the annotations of the given `statements` are deleted.
"""
@spec delete_annotations(
t,
input,
boolean | Statement.coercible_predicate() | [Statement.coercible_predicate()]
) :: t
defdelegate delete_annotations(graph, statements), to: RDF.Star.Graph
defdelegate delete_annotations(graph, statements, delete), to: RDF.Star.Graph
@doc """
Updates the description of the `subject` in `graph` with the given function.
If `subject` is present in `graph` with `description` as description,
`fun` is invoked with argument `description` and its result is used as the new
description of `subject`. If `subject` is not present in `graph`,
`initial` is inserted as the description of `subject`. If no `initial` value is
given, the `graph` remains unchanged. If `nil` is returned by `fun`, the
respective description will be removed from `graph`.
The initial value and the returned objects by the update function will be tried
te coerced to proper RDF descriptions before added. If the initial or returned
description is a `RDF.Description` with another subject, the respective
statements are added with `subject` as subject.
## Examples
iex> RDF.Graph.new({EX.S, EX.p, EX.O})
...> |> RDF.Graph.update(EX.S,
...> fn description -> Description.add(description, {EX.p, EX.O2})
...> end)
RDF.Graph.new([{EX.S, EX.p, EX.O}, {EX.S, EX.p, EX.O2}])
iex> RDF.Graph.new({EX.S, EX.p, EX.O})
...> |> RDF.Graph.update(EX.S,
...> fn _ -> Description.new(EX.S2, init: {EX.p2, EX.O2})
...> end)
RDF.Graph.new([{EX.S, EX.p2, EX.O2}])
iex> RDF.Graph.new()
...> |> RDF.Graph.update(EX.S, Description.new(EX.S, init: {EX.p, EX.O}),
...> fn description -> Description.add(description, {EX.p, EX.O2})
...> end)
RDF.Graph.new([{EX.S, EX.p, EX.O}])
"""
@spec update(
t,
Statement.coercible_subject(),
Description.input() | nil,
update_description_fun
) :: t
def update(%__MODULE__{} = graph, subject, initial \\ nil, fun) do
subject = RDF.coerce_subject(subject)
case get(graph, subject) do
nil ->
if initial do
add(graph, Description.new(subject, init: initial))
else
graph
end
description ->
description
|> fun.()
|> case do
nil ->
delete_descriptions(graph, subject)
new_description ->
graph
|> delete_descriptions(subject)
|> add(Description.new(subject, init: new_description))
end
end
end
@doc """
Fetches the description of the given subject.
When the subject can not be found `:error` is returned.
## Examples
iex> RDF.Graph.new([{EX.S1, EX.P1, EX.O1}, {EX.S2, EX.P2, EX.O2}])
...> |> RDF.Graph.fetch(EX.S1)
{:ok, RDF.Description.new(EX.S1, init: {EX.P1, EX.O1})}
iex> RDF.Graph.new() |> RDF.Graph.fetch(EX.foo)
:error
"""
@impl Access
@spec fetch(t, Statement.coercible_subject()) :: {:ok, Description.t()} | :error
def fetch(%__MODULE__{} = graph, subject) do
Access.fetch(graph.descriptions, RDF.coerce_subject(subject))
end
@doc """
Gets the description of the given `subject` in the given `graph`.
When the subject can not be found the optionally given default value or
`nil` is returned.
## Examples
iex> RDF.Graph.new([{EX.S1, EX.P1, EX.O1}, {EX.S2, EX.P2, EX.O2}])
...> |> RDF.Graph.get(EX.S1)
RDF.Description.new(EX.S1, init: {EX.P1, EX.O1})
iex> RDF.Graph.get(RDF.Graph.new(), EX.Foo)
nil
iex> RDF.Graph.get(RDF.Graph.new(), EX.Foo, :bar)
:bar
"""
@spec get(t, Statement.coercible_subject(), any) :: Description.t() | any
def get(%__MODULE__{} = graph, subject, default \\ nil) do
case fetch(graph, subject) do
{:ok, value} -> value
:error -> default
end
end
@doc """
Returns the description of the given `subject` in the given `graph`.
As opposed to `get/3` this function returns an empty `RDF.Description` when
the subject does not exist in the given `graph`.
## Examples
iex> RDF.Graph.new([{EX.S1, EX.P1, EX.O1}, {EX.S2, EX.P2, EX.O2}])
...> |> RDF.Graph.description(EX.S1)
RDF.Description.new(EX.S1, init: {EX.P1, EX.O1})
iex> RDF.Graph.description(RDF.Graph.new(), EX.Foo)
RDF.Description.new(EX.Foo)
"""
@spec description(t, Statement.coercible_subject()) :: Description.t()
def description(%__MODULE__{} = graph, subject) do
case fetch(graph, subject) do
{:ok, value} -> value
:error -> Description.new(subject)
end
end
@doc """
All `RDF.Description`s within a `RDF.Graph`.
"""
@spec descriptions(t) :: [Description.t()]
def descriptions(%__MODULE__{} = graph) do
Map.values(graph.descriptions)
end
@doc """
Returns the `RDF.Graph` of all annotations.
Note: The graph includes only triples where the subject is a quoted triple.
Triples where only the object is a quoted triple are NOT included.
"""
@spec annotations(t) :: t
defdelegate annotations(graph), to: RDF.Star.Graph
@doc """
Returns the `RDF.Graph` without all annotations.
Note: This function excludes only triples where the subject is a quoted triple.
If you want to exclude also triples where the object is a quoted triple,
you'll have to use `RDF.Graph.without_star_statements/1`.
"""
@spec without_annotations(t) :: t
defdelegate without_annotations(graph), to: RDF.Star.Graph
@doc """
Returns the `RDF.Graph` without all statements including quoted triples on subject or object position.
This function is relatively costly, since it requires a full walk-through of all triples.
In many cases quoted triples are only used on subject position, where you can use
the significantly faster `RDF.Graph.without_annotations/1`.
"""
@spec without_star_statements(t) :: t
defdelegate without_star_statements(graph), to: RDF.Star.Graph
@doc """
Gets and updates the description of the given subject, in a single pass.
Invokes the passed function on the `RDF.Description` of the given subject;
this function should return either `{description_to_return, new_description}` or `:pop`.
If the passed function returns `{description_to_return, new_description}`, the
return value of `get_and_update` is `{description_to_return, new_graph}` where
`new_graph` is the input `Graph` updated with `new_description` for
the given subject.
If the passed function returns `:pop` the description for the given subject is
removed and a `{removed_description, new_graph}` tuple gets returned.
## Examples
iex> RDF.Graph.new({EX.S, EX.P, EX.O})
...> |> RDF.Graph.get_and_update(EX.S, fn current_description ->
...> {current_description, {EX.P, EX.NEW}}
...> end)
{RDF.Description.new(EX.S, init: {EX.P, EX.O}), RDF.Graph.new({EX.S, EX.P, EX.NEW})}
"""
@impl Access
@spec get_and_update(t, Statement.coercible_subject(), get_and_update_description_fun) ::
{Description.t(), input}
def get_and_update(%__MODULE__{} = graph, subject, fun) do
subject = RDF.coerce_subject(subject)
case fun.(get(graph, subject)) do
{old_description, new_description} ->
{old_description, put(graph, {subject, new_description})}
:pop ->
pop(graph, subject)
other ->
raise "the given function must return a two-element tuple or :pop, got: #{inspect(other)}"
end
end
@doc """
Pops an arbitrary triple from a `RDF.Graph`.
"""
@spec pop(t) :: {Statement.t() | nil, t}
def pop(graph)
def pop(%__MODULE__{descriptions: descriptions} = graph)
when descriptions == %{},
do: {nil, graph}
def pop(%__MODULE__{descriptions: descriptions} = graph) do
# TODO: Find a faster way ...
[{subject, description}] = Enum.take(descriptions, 1)
{triple, popped_description} = Description.pop(description)
popped =
if Description.empty?(popped_description),
do: descriptions |> Map.delete(subject),
else: descriptions |> Map.put(subject, popped_description)
{triple, %__MODULE__{graph | descriptions: popped}}
end
@doc """
Pops the description of the given subject.
When the subject can not be found the optionally given default value or `nil` is returned.
## Examples
iex> RDF.Graph.new([{EX.S1, EX.P1, EX.O1}, {EX.S2, EX.P2, EX.O2}])
...> |> RDF.Graph.pop(EX.S1)
{RDF.Description.new(EX.S1, init: {EX.P1, EX.O1}), RDF.Graph.new({EX.S2, EX.P2, EX.O2})}
iex> RDF.Graph.new({EX.S, EX.P, EX.O}) |> RDF.Graph.pop(EX.Missing)
{nil, RDF.Graph.new({EX.S, EX.P, EX.O})}
"""
@impl Access
@spec pop(t, Statement.coercible_subject()) :: {Description.t() | nil, t}
def pop(%__MODULE__{} = graph, subject) do
case Access.pop(graph.descriptions, RDF.coerce_subject(subject)) do
{nil, _} ->
{nil, graph}
{description, new_descriptions} ->
{description, %__MODULE__{graph | descriptions: new_descriptions}}
end
end
@doc """
The number of subjects within a `RDF.Graph`.
## Examples
iex> RDF.Graph.new([
...> {EX.S1, EX.p1, EX.O1},
...> {EX.S2, EX.p2, EX.O2},
...> {EX.S1, EX.p2, EX.O3}])
...> |> RDF.Graph.subject_count()
2
"""
@spec subject_count(t) :: non_neg_integer
def subject_count(%__MODULE__{} = graph) do
Enum.count(graph.descriptions)
end
@doc """
The number of statements within a `RDF.Graph`.
## Examples
iex> RDF.Graph.new([
...> {EX.S1, EX.p1, EX.O1},
...> {EX.S2, EX.p2, EX.O2},
...> {EX.S1, EX.p2, EX.O3}])
...> |> RDF.Graph.statement_count()
3
"""
@spec statement_count(t) :: non_neg_integer
def statement_count(%__MODULE__{} = graph) do
Enum.reduce(graph.descriptions, 0, fn {_subject, description}, count ->
count + Description.count(description)
end)
end
defdelegate triple_count(graph), to: __MODULE__, as: :statement_count
@doc """
The set of all subjects used in the statements within a `RDF.Graph`.
## Examples
iex> RDF.Graph.new([
...> {EX.S1, EX.p1, EX.O1},
...> {EX.S2, EX.p2, EX.O2},
...> {EX.S1, EX.p2, EX.O3}])
...> |> RDF.Graph.subjects()
MapSet.new([RDF.iri(EX.S1), RDF.iri(EX.S2)])
"""
def subjects(%__MODULE__{} = graph) do
graph.descriptions |> Map.keys() |> MapSet.new()
end
@doc """
The set of all properties used in the predicates of the statements within a `RDF.Graph`.
## Examples
iex> RDF.Graph.new([
...> {EX.S1, EX.p1, EX.O1},
...> {EX.S2, EX.p2, EX.O2},
...> {EX.S1, EX.p2, EX.O3}])
...> |> RDF.Graph.predicates()
MapSet.new([EX.p1, EX.p2])
"""
def predicates(%__MODULE__{} = graph) do
Enum.reduce(graph.descriptions, MapSet.new(), fn {_, description}, acc ->
description
|> Description.predicates()
|> MapSet.union(acc)
end)
end
@doc """
The set of all resources used in the objects within a `RDF.Graph`.
Note: This function does collect only IRIs and BlankNodes, not Literals.
## Examples
iex> RDF.Graph.new([
...> {EX.S1, EX.p1, EX.O1},
...> {EX.S2, EX.p2, EX.O2},
...> {EX.S3, EX.p1, EX.O2},
...> {EX.S4, EX.p2, RDF.bnode(:bnode)},
...> {EX.S5, EX.p3, "foo"}])
...> |> RDF.Graph.objects()
MapSet.new([RDF.iri(EX.O1), RDF.iri(EX.O2), RDF.bnode(:bnode)])
"""
def objects(%__MODULE__{} = graph) do
Enum.reduce(graph.descriptions, MapSet.new(), fn {_, description}, acc ->
description
|> Description.objects()
|> MapSet.union(acc)
end)
end
@doc """
The set of all resources used within a `RDF.Graph`.
## Examples
iex> RDF.Graph.new([
...> {EX.S1, EX.p1, EX.O1},
...> {EX.S2, EX.p1, EX.O2},
...> {EX.S2, EX.p2, RDF.bnode(:bnode)},
...> {EX.S3, EX.p1, "foo"}])
...> |> RDF.Graph.resources()
MapSet.new([RDF.iri(EX.S1), RDF.iri(EX.S2), RDF.iri(EX.S3),
RDF.iri(EX.O1), RDF.iri(EX.O2), RDF.bnode(:bnode), EX.p1, EX.p2])
"""
def resources(graph = %__MODULE__{} = graph) do
Enum.reduce(graph.descriptions, MapSet.new(), fn {_, description}, acc ->
description
|> Description.resources()
|> MapSet.union(acc)
end)
|> MapSet.union(subjects(graph))
end
@doc """
The list of all statements within a `RDF.Graph`.
When the optional `:filter_star` flag is set to `true` RDF-star triples with
a triple as subject or object will be filtered. The default value is `false`.
## Examples
iex> RDF.Graph.new([
...> {EX.S1, EX.p1, EX.O1},
...> {EX.S2, EX.p2, EX.O2},
...> {EX.S1, EX.p2, EX.O3}])
...> |> RDF.Graph.triples()
[{RDF.iri(EX.S1), RDF.iri(EX.p1), RDF.iri(EX.O1)},
{RDF.iri(EX.S1), RDF.iri(EX.p2), RDF.iri(EX.O3)},
{RDF.iri(EX.S2), RDF.iri(EX.p2), RDF.iri(EX.O2)}]
"""
@spec triples(t, keyword) :: [Statement.t()]
def triples(%__MODULE__{} = graph, opts \\ []) do
if Keyword.get(opts, :filter_star, false) do
Enum.flat_map(graph.descriptions, fn
{subject, _} when is_tuple(subject) -> []
{_, description} -> Description.triples(description, opts)
end)
else
Enum.flat_map(graph.descriptions, fn {_, description} ->
Description.triples(description, opts)
end)
end
end
defdelegate statements(graph, opts \\ []), to: __MODULE__, as: :triples
@doc """
Returns if the given `graph` is empty.
Note: You should always prefer this over the use of `Enum.empty?/1` as it is significantly faster.
"""
@spec empty?(t) :: boolean
def empty?(%__MODULE__{} = graph) do
Enum.empty?(graph.descriptions)
end
@doc """
Checks if the given `input` statements exist within `graph`.
"""
@spec include?(t, input, keyword) :: boolean
def include?(graph, input, opts \\ [])
def include?(%__MODULE__{} = graph, {subject, _, _} = triple, opts),
do: do_include?(graph, RDF.coerce_subject(subject), triple, opts)
def include?(graph, {subject, predicate, object, _}, opts),
do: include?(graph, {subject, predicate, object}, opts)
def include?(%__MODULE__{} = graph, {subject, predications}, opts),
do: do_include?(graph, RDF.coerce_subject(subject), predications, opts)
def include?(%__MODULE__{} = graph, %Description{subject: subject} = description, opts),
do: do_include?(graph, subject, description, opts)
def include?(graph, %__MODULE__{} = other_graph, opts) do
other_graph
|> descriptions()
|> Enum.all?(&include?(graph, &1, opts))
end
def include?(graph, input, opts)
when is_list(input) or (is_map(input) and not is_struct(input)) do
Enum.all?(input, &include?(graph, &1, opts))
end
defp do_include?(%__MODULE__{descriptions: descriptions}, subject, input, opts) do
if description = descriptions[subject] do
Description.include?(description, input, opts)
else
false
end
end
@doc """
Checks if a `RDF.Graph` contains statements about the given resource.
## Examples
iex> RDF.Graph.new([{EX.S1, EX.p1, EX.O1}]) |> RDF.Graph.describes?(EX.S1)
true
iex> RDF.Graph.new([{EX.S1, EX.p1, EX.O1}]) |> RDF.Graph.describes?(EX.S2)
false
"""
@spec describes?(t, Statement.coercible_subject()) :: boolean
def describes?(%__MODULE__{} = graph, subject) do
Map.has_key?(graph.descriptions, RDF.coerce_subject(subject))
end
@doc """
Creates a graph from another one by limiting its statements to those using one of the given `subjects`.
If `subjects` contains IRIs that are not used in the `graph`, they're simply ignored.
The optional `properties` argument allows to limit also properties of the subject descriptions.
If `nil` is passed as the `subjects`, the subjects will not be limited.
"""
@spec take(
t,
[Statement.coercible_subject()] | Enum.t() | nil,
[Statement.coercible_predicate()] | Enum.t() | nil
) :: t
def take(graph, subjects, properties \\ nil)
def take(%__MODULE__{} = graph, nil, nil), do: graph
def take(%__MODULE__{descriptions: descriptions} = graph, subjects, nil) do
%__MODULE__{
graph
| descriptions: Map.take(descriptions, Enum.map(subjects, &RDF.coerce_subject/1))
}
end
def take(%__MODULE__{} = graph, subjects, properties) do
graph = take(graph, subjects, nil)
%__MODULE__{
graph
| descriptions:
Map.new(graph.descriptions, fn {subject, description} ->
{subject, Description.take(description, properties)}
end)
}
end
@doc """
Execute the given `query` against the given `graph`.
This is just a convenience delegator function to `RDF.Query.execute!/3` with
the first two arguments swapped so it can be used in a pipeline on a `RDF.Graph`.
See `RDF.Query.execute/3` and `RDF.Query.execute!/3` for more information and examples.
"""
def query(graph, query, opts \\ []) do
RDF.Query.execute!(query, graph, opts)
end
@doc """
Returns a `Stream` for the execution of the given `query` against the given `graph`.
This is just a convenience delegator function to `RDF.Query.stream!/3` with
the first two arguments swapped so it can be used in a pipeline on a `RDF.Graph`.
See `RDF.Query.stream/3` and `RDF.Query.stream!/3` for more information and examples.
"""
def query_stream(graph, query, opts \\ []) do
RDF.Query.stream!(query, graph, opts)
end
@doc """
Returns a nested map of the native Elixir values of a `RDF.Graph`.
When a `:context` option is given with a `RDF.PropertyMap`, predicates will
be mapped to the terms defined in the `RDF.PropertyMap`, if present.
## Examples
iex> RDF.Graph.new([
...> {~I<http://example.com/S1>, ~I<http://example.com/p>, ~L"Foo"},
...> {~I<http://example.com/S2>, ~I<http://example.com/p>, RDF.XSD.integer(42)}
...> ])
...> |> RDF.Graph.values()
%{
"http://example.com/S1" => %{"http://example.com/p" => ["Foo"]},
"http://example.com/S2" => %{"http://example.com/p" => [42]}
}
iex> RDF.Graph.new([
...> {~I<http://example.com/S1>, ~I<http://example.com/p>, ~L"Foo"},
...> {~I<http://example.com/S2>, ~I<http://example.com/p>, RDF.XSD.integer(42)}
...> ])
...> |> RDF.Graph.values(context: [p: ~I<http://example.com/p>])
%{
"http://example.com/S1" => %{p: ["Foo"]},
"http://example.com/S2" => %{p: [42]}
}
"""
@spec values(t, keyword) :: map
def values(%__MODULE__{} = graph, opts \\ []) do
if property_map = PropertyMap.from_opts(opts) do
map(graph, RDF.Statement.default_property_mapping(property_map))
else
map(graph, &RDF.Statement.default_term_mapping/1)
end
end
@doc """
Returns a nested map of a `RDF.Graph` where each element from its triples is mapped with the given function.
The function `fun` will receive a tuple `{statement_position, rdf_term}` where
`statement_position` is one of the atoms `:subject`, `:predicate` or `:object`,
while `rdf_term` is the RDF term to be mapped. When the given function returns
`nil` this will be interpreted as an error and will become the overhaul result
of the `map/2` call.
Note: RDF-star statements where the subject or object is a triple will be ignored.
## Examples
iex> RDF.Graph.new([
...> {~I<http://example.com/S1>, ~I<http://example.com/p>, ~L"Foo"},
...> {~I<http://example.com/S2>, ~I<http://example.com/p>, RDF.XSD.integer(42)}
...> ])
...> |> RDF.Graph.map(fn
...> {:predicate, predicate} ->
...> predicate
...> |> to_string()
...> |> String.split("/")
...> |> List.last()
...> |> String.to_atom()
...> {_, term} ->
...> RDF.Term.value(term)
...> end)
%{
"http://example.com/S1" => %{p: ["Foo"]},
"http://example.com/S2" => %{p: [42]}
}
"""
@spec map(t, Statement.term_mapping()) :: map
def map(description, fun)
def map(%__MODULE__{} = graph, fun) do
Enum.reduce(graph.descriptions, %{}, fn
{subject, _}, map when is_tuple(subject) ->
map
{subject, description}, map ->
case Description.map(description, fun) do
mapped_objects when map_size(mapped_objects) == 0 ->
map
mapped_objects ->
Map.put(
map,
fun.({:subject, subject}),
mapped_objects
)
end
end)
end
@doc """
Checks if two `RDF.Graph`s are equal.
Two `RDF.Graph`s are considered to be equal if they contain the same triples
and have the same name. The prefixes of the graph are irrelevant for equality.
"""
@spec equal?(t | any, t | any) :: boolean
def equal?(graph1, graph2)
def equal?(%__MODULE__{} = graph1, %__MODULE__{} = graph2) do
clear_metadata(graph1) == clear_metadata(graph2)
end
def equal?(_, _), do: false
@doc """
Returns the prefixes of the given `graph` as a `RDF.PrefixMap`.
"""
@spec prefixes(t) :: PrefixMap.t() | nil
def prefixes(%__MODULE__{} = graph), do: graph.prefixes
@doc """
Adds `prefixes` to the given `graph`.
The `prefixes` mappings can be given as any structure convertible to a
`RDF.PrefixMap`.
When a prefix with another mapping already exists it will be overwritten with
the new one. This behaviour can be customized by providing a `conflict_resolver`
function. See `RDF.PrefixMap.merge/3` for more on that.
"""
@spec add_prefixes(
t,
PrefixMap.t() | map | keyword | nil,
PrefixMap.conflict_resolver() | nil
) :: t
def add_prefixes(graph, prefixes, conflict_resolver \\ nil)
def add_prefixes(%__MODULE__{} = graph, nil, _), do: graph
def add_prefixes(%__MODULE__{prefixes: nil} = graph, prefixes, _) do
%__MODULE__{graph | prefixes: PrefixMap.new(prefixes)}
end
def add_prefixes(%__MODULE__{} = graph, additions, nil) do
add_prefixes(graph, additions, :overwrite)
end
def add_prefixes(%__MODULE__{prefixes: prefixes} = graph, additions, conflict_resolver) do
%__MODULE__{graph | prefixes: PrefixMap.merge!(prefixes, additions, conflict_resolver)}
end
@doc """
Deletes `prefixes` from the given `graph`.
The `prefixes` can be a single prefix or a list of prefixes.
Prefixes not in prefixes of the graph are simply ignored.
"""
@spec delete_prefixes(t, PrefixMap.t()) :: t
def delete_prefixes(graph, prefixes)
def delete_prefixes(%__MODULE__{prefixes: nil} = graph, _), do: graph
def delete_prefixes(%__MODULE__{} = graph, deletions) do
%__MODULE__{graph | prefixes: PrefixMap.drop(graph.prefixes, List.wrap(deletions))}
end
@doc """
Clears all prefixes of the given `graph`.
"""
@spec clear_prefixes(t) :: t
def clear_prefixes(%__MODULE__{} = graph) do
%__MODULE__{graph | prefixes: nil}
end
@doc """
Returns the base IRI of the given `graph`.
"""
@spec base_iri(t) :: IRI.t() | nil
def base_iri(%__MODULE__{} = graph), do: graph.base_iri
@doc """
Sets the base IRI of the given `graph`.
The `base_iri` can be given as anything accepted by `RDF.IRI.coerce_base/1`.
"""
@spec set_base_iri(t, IRI.t() | nil) :: t
def set_base_iri(graph, base_iri)
def set_base_iri(%__MODULE__{} = graph, nil) do
%__MODULE__{graph | base_iri: nil}
end
def set_base_iri(%__MODULE__{} = graph, base_iri) do
%__MODULE__{graph | base_iri: IRI.coerce_base(base_iri)}
end
@doc """
Clears the base IRI of the given `graph`.
"""
@spec clear_base_iri(t) :: t
def clear_base_iri(%__MODULE__{} = graph) do
%__MODULE__{graph | base_iri: nil}
end
@doc """
Clears the base IRI and all prefixes of the given `graph`.
"""
@spec clear_metadata(t) :: t
def clear_metadata(%__MODULE__{} = graph) do
graph
|> clear_base_iri()
|> clear_prefixes()
end
defimpl Enumerable do
alias RDF.Graph
def member?(graph, triple), do: {:ok, Graph.include?(graph, triple)}
def count(graph), do: {:ok, Graph.statement_count(graph)}
def slice(graph) do
size = Graph.statement_count(graph)
{:ok, size, &Enumerable.List.slice(Graph.triples(graph), &1, &2, size)}
end
def reduce(graph, acc, fun) do
graph
|> Graph.triples()
|> Enumerable.List.reduce(acc, fun)
end
end
defimpl Collectable do
alias RDF.Graph
def into(original) do
collector_fun = fn
graph, {:cont, list} when is_list(list) ->
Graph.add(graph, List.to_tuple(list))
graph, {:cont, elem} ->
Graph.add(graph, elem)
graph, :done ->
graph
_graph, :halt ->
:ok
end
{original, collector_fun}
end
end
end
|
lib/rdf/graph.ex
| 0.92632 | 0.680003 |
graph.ex
|
starcoder
|
defmodule Kalevala.Verb.Conditions do
@moduledoc """
A verb is not allowed unless all conditions are met
- `location` is an array of all allowed locations, one must match
"""
@derive Jason.Encoder
defstruct [:location]
end
defmodule Kalevala.Verb.Context do
@moduledoc """
Context for running a verb
- `location` where the verb is taking place
"""
@derive Jason.Encoder
defstruct [:location]
end
defmodule Kalevala.Verb do
@moduledoc """
A verb is a discrete action that the player may perform
Things like picking up or dropping items, stealing, etc.
"""
@derive Jason.Encoder
defstruct [:conditions, :icon, :key, :send, :text]
def replace_variables(verbs, variables) do
Enum.map(verbs, fn verb ->
Enum.reduce(variables, verb, fn {key, value}, verb ->
Map.put(verb, :send, String.replace(verb.send, "${#{key}}", value))
end)
end)
end
@doc """
Check if a list of verbs contains a verb that matches the context
"""
def has_matching_verb?(verbs, verb_key, context) do
verb =
Enum.find(verbs, fn verb ->
verb.key == verb_key
end)
case verb != nil do
true ->
matches?(verb, context)
false ->
false
end
end
@doc """
Check if a verb matches the context
"""
def matches?(verb, context) do
matches_location?(verb.conditions, context)
end
@doc """
Check if the location condition matches the context
No location condition == all locations are good
iex> Verb.matches_location?(%{location: ["room"]}, %{location: "room"})
true
iex> Verb.matches_location?(%{location: ["inventory/self"]}, %{location: "inventory/self"})
true
iex> Verb.matches_location?(%{location: ["inventory"]}, %{location: "inventory/self"})
true
"""
def matches_location?(%{location: locations}, context) do
Enum.any?(locations, fn location ->
String.starts_with?(context.location, location)
end)
end
def matches_location?(_conditions, _context), do: true
end
|
lib/kalevala/verb.ex
| 0.793106 | 0.429549 |
verb.ex
|
starcoder
|
defmodule Chunkr.Cursor do
@moduledoc """
Behaviour for encoding and decoding of cursors.
Allows the default Base64 cursor to be replaced via a custom cursor type specific to your
application—for example, to allow signed cursors, etc. See `Chunkr.Cursor.Base64`
Cursors are created from a list of values. Each individual value is encoded by the
`Chunkr.CursorValue.Encode` protocol. Then the values are together encoded into
cursor form via the `c:to_cursor/1` callback.
Some types can be more efficiently encoded than simply relying on their default representation.
For example, DateTime structs can be converted to Unix timestamps, which require far fewer bits.
To achieve more efficient encoding of timestamps, you can provide the following protocol
implementations for encoding and decoding:
defimpl Chunkr.CursorValue.Encode, for: DateTime do
def convert(%DateTime{} = datetime), do: {:dt, DateTime.to_unix(datetime, :microsecond)}
end
defimpl Chunkr.CursorValue.Decode, for: Tuple do
def convert({:dt, unix_timestamp}), do: DateTime.from_unix!(unix_timestamp, :microsecond)
end
Any types that do not have a custom encoding will be passed through as is to the `c:to_cursor/1`
callback.
"""
@type cursor() :: binary()
@type cursor_values() :: [any()]
@doc """
Invoked to translate a list of values into a cursor.
Must return `{:ok, cursor}` if decoding was successful. On error, it must return
`{:error, message}`.
"""
@callback to_cursor(cursor_values :: cursor_values()) :: {:ok, cursor()} | {:error, binary()}
@doc """
Invoked to translate a cursor back to its initial values.
Must return `{:ok, cursor_values}` if decoding was successful. On error, it must return
`{:error, message}`.
"""
@callback to_values(cursor :: cursor()) :: {:ok, cursor_values()} | {:error, binary()}
@doc """
Creates a cursor via the `c:to_cursor/1` callback.
## Example
iex> Chunkr.Cursor.encode(["some", "value", 123], Chunkr.Cursor.Base64)
{:ok, "g2wAAAADbQAAAARzb21lbQAAAAV2YWx1ZWF7ag=="}
"""
@spec encode(cursor_values(), module()) :: {:ok, cursor()} | {:error, binary()}
def encode(cursor_values, cursor_mod) when is_list(cursor_values) do
cursor_values
|> Enum.map(&Chunkr.CursorValue.Encode.convert/1)
|> cursor_mod.to_cursor()
end
@doc """
Same as `encode/2` but raises an error if creation of cursor fails.
## Example
iex> Chunkr.Cursor.encode!(["some", "value", 123], Chunkr.Cursor.Base64)
"g2wAAAADbQAAAARzb21lbQAAAAV2YWx1ZWF7ag=="
"""
@spec encode!(cursor_values(), module()) :: cursor() | none()
def encode!(cursor_values, cursor_mod) when is_list(cursor_values) do
case encode(cursor_values, cursor_mod) do
{:ok, cursor} -> cursor
{:error, message} -> raise(ArgumentError, message)
end
end
@doc """
Decodes a cursor via the `c:to_values/1` callback.
## Example
iex> Chunkr.Cursor.decode("g2wAAAADbQAAAARzb21lbQAAAAV2YWx1ZWF7ag==", Chunkr.Cursor.Base64)
{:ok, ["some", "value", 123]}
"""
@spec decode(cursor(), module()) :: {:ok, cursor_values()} | {:error, any()}
def decode(cursor, cursor_mod) when is_binary(cursor) do
case cursor_mod.to_values(cursor) do
{:ok, cursor_values} -> {:ok, Enum.map(cursor_values, &Chunkr.CursorValue.Decode.convert/1)}
{:error, message} -> {:error, message}
end
end
@doc """
Same as `decode/2` but raises an error for invalid cursors.
## Example
iex> Chunkr.Cursor.decode!("g2wAAAADbQAAAARzb21lbQAAAAV2YWx1ZWF7ag==", Chunkr.Cursor.Base64)
["some", "value", 123]
"""
@spec decode!(cursor(), module()) :: cursor_values() | none()
def decode!(cursor, cursor_mod) do
case decode(cursor, cursor_mod) do
{:ok, cursor_values} -> cursor_values
{:error, message} -> raise(ArgumentError, message)
end
end
end
|
lib/chunkr/cursor.ex
| 0.931392 | 0.671953 |
cursor.ex
|
starcoder
|
defmodule Bitwise do
@moduledoc """
This module provides macro-based operators that perform calculations
on (sets of) bits. These macros can be used in guards. In general,
you should `use` the Bitwise module as a whole:
iex> use Bitwise
iex> bnot 1
-2
iex> 1 &&& 1
1
Alternatively, you can include or skip selected operators:
iex> use Bitwise, only_operators: true
iex> 1 &&& 1
1
"""
@doc """
Allows a developer to `use` this module in their programs with
the following options:
* `:only_operators` - include only operators
* `:skip_operators` - skip operators
"""
defmacro __using__(options) do
except = cond do
Keyword.get(options, :only_operators) ->
[bnot: 1, band: 2, bor: 2, bxor: 2, bsl: 2, bsr: 2]
Keyword.get(options, :skip_operators) ->
[~~~: 1, &&&: 2, |||: 2, ^^^: 2, <<<: 2, >>>: 2]
true -> []
end
quote do
import Bitwise, except: unquote(except)
end
end
@doc """
Calculates the bitwise NOT of its argument.
iex> bnot(2)
-3
iex> bnot(2) &&& 3
1
"""
defmacro bnot(expr) do
quote do: :erlang.bnot(unquote(expr))
end
@doc """
Prefix (unary) operator; calculates the bitwise NOT of its argument.
iex> ~~~2
-3
iex> ~~~2 &&& 3
1
"""
defmacro ~~~expr do
quote do: :erlang.bnot(unquote(expr))
end
@doc """
Calculates the bitwise AND of its arguments.
iex> band(9, 3)
1
"""
defmacro band(left, right) do
quote do: :erlang.band(unquote(left), unquote(right))
end
@doc """
Infix operator; calculates the bitwise AND of its arguments.
iex> 9 &&& 3
1
"""
defmacro left &&& right do
quote do: :erlang.band(unquote(left), unquote(right))
end
@doc """
Calculates the bitwise OR of its arguments.
iex> bor(9, 3)
11
"""
defmacro bor(left, right) do
quote do: :erlang.bor(unquote(left), unquote(right))
end
@doc """
Infix operator; calculates the bitwise OR of its arguments.
iex> 9 ||| 3
11
"""
defmacro left ||| right do
quote do: :erlang.bor(unquote(left), unquote(right))
end
@doc """
Calculates the bitwise XOR of its arguments.
iex> bxor(9, 3)
10
"""
defmacro bxor(left, right) do
quote do: :erlang.bxor(unquote(left), unquote(right))
end
@doc """
Infix operator; calculates the bitwise XOR of its arguments.
iex> 9 ^^^ 3
10
"""
defmacro left ^^^ right do
quote do: :erlang.bxor(unquote(left), unquote(right))
end
@doc """
Calculates the result of an arithmetic left bitshift.
iex> bsl(1, 2)
4
iex> bsl(1, -2)
0
iex> bsl(-1, 2)
-4
iex> bsl(-1, -2)
-1
"""
defmacro bsl(left, right) do
quote do: :erlang.bsl(unquote(left), unquote(right))
end
@doc """
Infix operator; calculates the result of an arithmetic left bitshift.
iex> 1 <<< 2
4
iex> 1 <<< -2
0
iex> -1 <<< 2
-4
iex> -1 <<< -2
-1
"""
defmacro left <<< right do
quote do: :erlang.bsl(unquote(left), unquote(right))
end
@doc """
Calculates the result of an arithmetic right bitshift.
iex> bsr(1, 2)
0
iex> bsr(1, -2)
4
iex> bsr(-1, 2)
-1
iex> bsr(-1, -2)
-4
"""
defmacro bsr(left, right) do
quote do: :erlang.bsr(unquote(left), unquote(right))
end
@doc """
Infix operator; calculates the result of an arithmetic left bitshift.
iex> 1 >>> 2
0
iex> 1 >>> -2
4
iex> -1 >>> 2
-1
iex> -1 >>> -2
-4
"""
defmacro left >>> right do
quote do: :erlang.bsr(unquote(left), unquote(right))
end
end
|
lib/elixir/lib/bitwise.ex
| 0.843089 | 0.60903 |
bitwise.ex
|
starcoder
|
defmodule ElixirALE.I2C do
use GenServer
@moduledoc """
This module allows Elixir code to communicate with devices on an I2C bus.
"""
defmodule State do
@moduledoc false
defstruct port: nil, address: 0, devname: nil
end
@type i2c_address :: 0..127
# Public API
@doc """
Start and link the I2c GenServer.
`devname` is the I2C bus name (e.g., "i2c-1")
`address` is the device's 7-bit address on the I2C bus
Note that `address` can be confusing when reading a datasheet
since sometimes the datasheet mentions the 8-bit address. For an 8-bit
address the least significant bit indicates whether the access is for a
read or a write. Microcontrollers like those on Arduinos often use the 8-bit
address. To convert an 8-bit address to a 7-bit one, divide the address by
two.
All calls to `read/2`, `write/2`, and `write_read/3` access the device
specified by `address`. Some I2C devices can be switched into different
modes where they respond to an alternate address. Rather than having to
create a second `I2c` process, see `read_device/3` and related routines.
If your application is interacting with many devices on the bus and
you're only going to call `read_device/3`, etc., then pass in any number
for the `i2c_address` here.
"""
@spec start_link(binary, i2c_address, [term]) :: {:ok, pid}
def start_link(devname, address, opts \\ []) do
GenServer.start_link(__MODULE__, [devname, address], opts)
end
@doc """
Stop the GenServer and release all resources.
"""
@spec release(pid) :: :ok
def release(pid) do
GenServer.cast pid, :release
end
@doc """
Initiate a read transaction on the I2C bus of `count` bytes.
"""
@spec read(pid, integer) :: binary | {:error, term}
def read(pid, count) do
GenServer.call pid, {:read, count}
end
@doc """
Write the specified `data` to the device.
"""
@spec write(pid, binary) :: :ok | {:error, term}
def write(pid, data) do
GenServer.call pid, {:write, data}
end
@doc """
Write the specified `data` to the device and then read
the specified number of bytes.
"""
@spec write_read(pid, binary, integer) :: binary | {:error, term}
def write_read(pid, write_data, read_count) do
GenServer.call pid, {:wrrd, write_data, read_count}
end
@doc """
Initiate a read transaction to the device at the specified `address`. This
is the same as `read/2` except that an arbitrary device address may be given.
"""
@spec read_device(pid, i2c_address, integer) :: binary | {:error, term}
def read_device(pid, address, count) do
GenServer.call pid, {:read_device, address, count}
end
@doc """
Write the specified `data` to the device at `address`.
"""
@spec write_device(pid, i2c_address, binary) :: :ok | {:error, term}
def write_device(pid, address, data) do
GenServer.call pid, {:write_device, address, data}
end
@doc """
Write the specified `data` to the device and then read
the specified number of bytes. This is similar to `write_read/3` except
with an I2C device address.
"""
@spec write_read_device(pid, i2c_address, binary, integer) ::
binary | {:error, term}
def write_read_device(pid, address, write_data, read_count) do
GenServer.call pid, {:wrrd_device, address, write_data, read_count}
end
@doc """
Return a list of available I2C bus device names. If nothing is returned,
it's possible that the kernel driver for that I2C bus is not enabled or the
kernel's device tree is not configured. On Raspbian, run `raspi-config` and
look in the advanced options.
```
iex> ElixirALE.I2C.device_names
["i2c-1"]
```
"""
@spec device_names() :: [binary]
def device_names() do
Path.wildcard("/dev/i2c-*")
|> Enum.map(fn(p) -> String.replace_prefix(p, "/dev/", "") end)
end
@doc """
Scan the I2C bus for devices by performing a read at each device address
and returning a list of device addresses that respond.
WARNING: This is intended to be a debugging aid. Reading bytes from devices
can advance internal state machines and might cause them to get out of sync
with other code.
```
iex> ElixirALE.I2C.detect_devices("i2c-1")
[4]
```
The return value is a list of device addresses that were detected on the
specified I2C bus. If you get back `'Hh'` or other letters, then IEx
converted the list to an Erlang string. Run `i v()` to get information about
the return value and look at the raw string representation for addresses.
If you already have an `ElixirALE.I2C` `GenServer` running, then you may
pass its `pid` to `detect_devices/1` instead.
"""
@spec detect_devices(pid | binary) :: [integer] | {:error, term}
def detect_devices(pid_or_devname) when is_pid(pid_or_devname) do
Enum.reject(0..127,
&(read_device(pid_or_devname, &1, 1) == {:error, :i2c_read_failed}))
end
def detect_devices(pid_or_devname) when is_binary(pid_or_devname) do
with {:ok, pid} <- start_link(pid_or_devname, 0),
devices = detect_devices(pid),
:ok <- GenServer.stop(pid),
do: devices
end
# gen_server callbacks
def init([devname, address]) do
executable = :code.priv_dir(:elixir_ale) ++ '/ale'
port = Port.open({:spawn_executable, executable},
[{:args, ["i2c", "/dev/#{devname}"]},
{:packet, 2},
:use_stdio,
:binary,
:exit_status])
state = %State{port: port, address: address, devname: devname}
{:ok, state}
end
def handle_call({:read, count}, _from, state) do
{:ok, response} = call_port(state, :read, state.address, count)
{:reply, response, state}
end
def handle_call({:write, data}, _from, state) do
{:ok, response} = call_port(state, :write, state.address, data)
{:reply, response, state}
end
def handle_call({:wrrd, write_data, read_count}, _from, state) do
{:ok, response} =
call_port(state, :wrrd, state.address, {write_data, read_count})
{:reply, response, state}
end
def handle_call({:read_device, address, count}, _from, state) do
{:ok, response} = call_port(state, :read, address, count)
{:reply, response, state}
end
def handle_call({:write_device, address, data}, _from, state) do
{:ok, response} = call_port(state, :write, address, data)
{:reply, response, state}
end
def handle_call({:wrrd_device, address, write_data, read_count}, _from, state) do
{:ok, response} = call_port(state, :wrrd, address, {write_data, read_count})
{:reply, response, state}
end
def handle_cast(:release, state) do
{:stop, :normal, state}
end
# Private helper functions
defp call_port(state, command, address, arguments) do
msg = {command, address, arguments}
send state.port, {self(), {:command, :erlang.term_to_binary(msg)}}
receive do
{_, {:data, response}} ->
{:ok, :erlang.binary_to_term(response)}
_ -> :error
end
end
end
|
lib/elixir_ale/i2c.ex
| 0.840062 | 0.735831 |
i2c.ex
|
starcoder
|
defmodule Membrane.AAC do
@moduledoc """
Capabilities for [Advanced Audio Codec](https://wiki.multimedia.cx/index.php/Understanding_AAC).
"""
@type profile_t :: :main | :LC | :SSR | :LTP | :HE | :HEv2
@type mpeg_version_t :: 2 | 4
@type samples_per_frame_t :: 1024 | 960
@typedoc """
Indicates whether stream contains AAC frames only or are they encapsulated
in [ADTS](https://wiki.multimedia.cx/index.php/ADTS)
"""
@type encapsulation_t :: :none | :ADTS
@typedoc """
Identifiers of [MPEG Audio Object Types](https://wiki.multimedia.cx/index.php/MPEG-4_Audio#Audio_Object_Types)
"""
@type audio_object_type_id_t :: 1..5 | 29
@typedoc """
Identifiers of [MPEG Audio sampling frequencies](https://wiki.multimedia.cx/index.php/MPEG-4_Audio#Sampling_Frequencies)
"""
@type sampling_frequency_id_t :: 0..12 | 15
@typedoc """
Identifiers of [MPEG Audio channel configurations](https://wiki.multimedia.cx/index.php/MPEG-4_Audio#Channel_Configurations)
"""
@type channel_config_id_t :: 0..7
@typedoc """
AAC frame length identifiers.
`0` indicates 1024 samples/frame and `1` - 960 samples/frame.
"""
@type frame_length_id_t :: 0 | 1
@type t :: %__MODULE__{
profile: profile_t,
mpeg_version: mpeg_version_t,
sample_rate: pos_integer,
channels: pos_integer,
samples_per_frame: 1024 | 960,
frames_per_buffer: pos_integer,
encapsulation: encapsulation_t
}
@enforce_keys [
:profile,
:sample_rate,
:channels
]
defstruct @enforce_keys ++
[
mpeg_version: 2,
samples_per_frame: 1024,
frames_per_buffer: 1,
encapsulation: :none
]
defp audio_object_type(),
do:
BiMap.new(%{
1 => :main,
2 => :LC,
3 => :SSR,
4 => :LTP,
5 => :HE,
29 => :HEv2
})
defp sampling_frequency(),
do:
BiMap.new(%{
0 => 96_000,
1 => 88_200,
2 => 64_000,
3 => 48_000,
4 => 44_100,
5 => 32_000,
6 => 24_000,
7 => 22_050,
8 => 16_000,
9 => 12_000,
10 => 11_025,
11 => 8000,
12 => 7350,
15 => :explicit
})
defp channel_config(),
do:
BiMap.new(%{
0 => :AOT_specific,
1 => 1,
2 => 2,
3 => 3,
4 => 4,
5 => 5,
6 => 6,
7 => 8
})
defp frame_length(),
do:
BiMap.new(%{
0 => 1024,
1 => 960
})
@spec aot_id_to_profile(audio_object_type_id_t) :: profile_t
def aot_id_to_profile(audio_object_type_id),
do: BiMap.fetch!(audio_object_type(), audio_object_type_id)
@spec profile_to_aot_id(profile_t) :: audio_object_type_id_t
def profile_to_aot_id(profile), do: BiMap.fetch_key!(audio_object_type(), profile)
@spec sampling_frequency_id_to_sample_rate(sampling_frequency_id_t) :: pos_integer
def sampling_frequency_id_to_sample_rate(sampling_frequency_id),
do: BiMap.fetch!(sampling_frequency(), sampling_frequency_id)
@spec sample_rate_to_sampling_frequency_id(sample_rate :: pos_integer | :explicit) ::
sampling_frequency_id_t
def sample_rate_to_sampling_frequency_id(sample_rate),
do: BiMap.fetch_key!(sampling_frequency(), sample_rate)
@spec channel_config_id_to_channels(channel_config_id_t) :: pos_integer | :AOT_specific
def channel_config_id_to_channels(channel_config_id),
do: BiMap.fetch!(channel_config(), channel_config_id)
@spec channels_to_channel_config_id(channels :: pos_integer | :AOT_specific) ::
channel_config_id_t
def channels_to_channel_config_id(channels), do: BiMap.fetch_key!(channel_config(), channels)
@spec frame_length_id_to_samples_per_frame(frame_length_id_t) :: samples_per_frame_t
def frame_length_id_to_samples_per_frame(frame_length_id),
do: BiMap.fetch!(frame_length(), frame_length_id)
@spec samples_per_frame_to_frame_length_id(samples_per_frame_t) :: pos_integer
def samples_per_frame_to_frame_length_id(samples_per_frame),
do: BiMap.fetch_key!(frame_length(), samples_per_frame)
end
|
lib/membrane_aac_format/aac.ex
| 0.874607 | 0.58056 |
aac.ex
|
starcoder
|
defmodule Plaid.Link do
@moduledoc """
Functions for Plaid `link` endpoint.
"""
import Plaid, only: [make_request_with_cred: 4, validate_cred: 1]
alias Plaid.Utils
@derive Jason.Encoder
defstruct link_token: nil,
expiration: nil,
created_at: nil,
metadata: nil
@type t :: %__MODULE__{
link_token: String.t(),
expiration: String.t(),
created_at: String.t(),
metadata: Plaid.Link.Metadata.t()
}
@type params :: %{required(atom) => String.t()}
@type config :: %{required(atom) => String.t()}
@endpoint :link
defmodule Metadata do
@moduledoc """
Plaid Link Metadata data structure.
"""
@derive Jason.Encoder
defstruct initial_products: nil,
webhook: nil,
country_codes: nil,
language: nil,
account_filters: nil,
redirect_uri: nil,
client_name: nil
@type t :: %__MODULE__{
initial_products: [String.t()],
webhook: String.t(),
country_codes: [String.t()],
language: String.t(),
account_filters: map(),
redirect_uri: String.t(),
client_name: String.t()
}
end
@doc """
Creates a Link Token.
Parameters
```
%{
client_name: "",
language: "",
country_codes: "",
user: %{client_user_id: ""}
}
```
"""
@spec create_link_token(params, config | nil) ::
{:ok, Plaid.Link.t()} | {:error, Plaid.Error.t()}
def create_link_token(params, config \\ %{}) do
config = validate_cred(config)
endpoint = "#{@endpoint}/token/create"
make_request_with_cred(:post, endpoint, config, params)
|> Utils.handle_resp(@endpoint)
end
@doc """
Gets a Link Token's information.
Parameters
```
%{
link_token: ""
}
```
"""
@spec get_link_token(params, config | nil) ::
{:ok, Plaid.Link.t()} | {:error, Plaid.Error.t()}
def get_link_token(params, config \\ %{}) do
config = validate_cred(config)
endpoint = "#{@endpoint}/token/get"
make_request_with_cred(:post, endpoint, config, params)
|> Utils.handle_resp(@endpoint)
end
end
|
lib/plaid/link.ex
| 0.767516 | 0.563828 |
link.ex
|
starcoder
|
defmodule Gmex do
@moduledoc """
A simple wrapper for GraphicsMagick in Elixir.
"""
@default_open_options [
gm_path: "gm"
]
@default_resize_options [
width: :auto,
height: :auto,
resize: :fill
]
@type image :: { :ok, %Gmex.Image{} }
@type gmex_error :: { :error, any }
@type image_info :: [ width: Integer.t, height: Integer.t, size: String.t, format: String.t, quality: Integer.t ]
@type resize_options :: [ width: Integer.t, height: Integer.t, type: :fill | :fit ]
@type open_options :: [ gm_path: String.t ]
@type option_param :: String.t | Integer.t | Float.t
@type option :: [ adjoin: boolean ] |
[ blur: { option_param, option_param } ] |
[ blur: option_param ] |
[ crop: { option_param, option_param, option_param, option_param } ] |
[ crop: { option_param, option_param } ] |
[ edge: option_param ] |
[ extent: { option_param, option_param, option_param, option_param } ] |
[ extent: { option_param, option_param } ] |
[ flatten: boolean ] |
[ fill: String.t ] |
[ strip: boolean ] |
[ format: String.t ] |
[ gravity: String.t ] |
[ magnify: boolean ] |
[ matte: boolean ] |
[ negate: true ] |
[ opaque: String.t ] |
[ quality: Integer.t ] |
[ resize: { option_param, option_param } ] |
[ resize: Integer.t ] |
[ rotate: Integer.t ] |
[ size: { option_param, option_param } ] |
[ size: { option_param, option_param, option_param } ] |
[ thumbnail: { :thumbnail, option_param, option_param } ] |
[ thumbnail: { :thumbnail, option_param } ] |
[ transparent: String.t ] |
[ type: String.t ] |
[ custom: list( option_param ) ]
@doc false
def test_gm( options \\ [] ) do
final_options = Keyword.merge( @default_open_options, options )
executable = Keyword.get( final_options, :gm_path )
if System.find_executable( executable ) == nil do
{ :error, "graphicsmagick executable not found at:#{executable}" }
else
{ :ok, executable }
end
end
@doc """
Opens image source.
## Options
* `:gm_path` - path to GraphicsMagick executable, defaults to `gm`, if the executable is missing an error will be returned.
## Example
iex> Gmex.open( "test/images/blossom.jpg" )
{ :ok, %Gmex.Image{ image: "test/images/blossom.jpg", options: [ "gm" ] } }
iex> Gmex.open( "test/images/blossom.jpg", gm_path: "/404/gm" )
{ :error, "graphicsmagick executable not found at:/404/gm" }
iex> Gmex.open( "non-existing.png" )
{ :error, :enoent }
"""
@spec open( String.t(), [ open_options ] ) :: image | gmex_error
def open( src_path, options \\ [] ) do
with { :ok, executable } <- test_gm( options )
do
if File.exists?( src_path ) do
{ :ok, %Gmex.Image{
image: src_path,
options: [ executable ]
} }
else
{ :error, :enoent }
end
end
end
@doc """
Saves the modified image
## Example
iex> Gmex.open( "test/images/blossom.jpg" )
iex> |> Gmex.save( "newimage.jpg" )
{ :ok, nil }
"""
@spec save( image, String.t() ) :: image | gmex_error
def save( image, dest_path ) do
with { :ok, image_struct } <- image
do
[ executable | final_options ] = image_struct.options
final_options = [ "convert" | [ image_struct.image | final_options ] ] ++ [ dest_path ]
{ result, status_code } = System.cmd executable, final_options , stderr_to_stdout: true
result = result
|> String.replace( "\r", "" )
|> String.replace( "\n", "" )
if status_code == 0 do
{ :ok, nil }
else
{ :error, result }
end
end
end
@doc """
Returns a keywords list with information about the image like width, height, size, format and quality.
"""
@spec get_info( image ) :: { :ok, image_info } | gmex_error
def get_info( image ) do
with { :ok, image_struct } <- image
do
[ executable | _ ] = image_struct.options
{ image_data, status_code } = System.cmd executable, [ "identify", "-format", "width=%w,height=%h,size=%b,format=%m,quality=%Q", image_struct.image ], stderr_to_stdout: true
image_data = image_data
|> String.replace( "\r", "" )
|> String.replace( "\n", "" )
if status_code == 0 do
{ :ok,
String.split( image_data, "," )
|> Enum.reduce( [], fn ( row, acc ) ->
[ field , value ] = String.split( row, "=" )
case field do
"width" ->
width = value
|> String.to_integer()
acc ++ [ width: width ]
"height" ->
width = value
|> String.to_integer()
acc ++ [ height: width ]
"format" ->
format = value
|> String.downcase()
|> String.to_atom()
acc ++ [ format: format ]
"quality" ->
quality = value
|> String.to_integer()
acc ++ [ compression_quality: quality ]
"size" ->
acc ++ [ size: value ]
_ -> acc
end
end ) }
else
{ :error, image_data }
end
end
end
@doc """
Resizes image
## Options
* `:width` - (Optional) width of the resized image, if not specified will be calculated based on proportions.
* `:height` - (Optional) height of the resized image, if not specified will be calculated based on proportions.
* `:type` - (Optional) resize type, can be either :fill or :fit, defaults to :fill.
* `:fill` - Generates images of the specified size with cropping.
* `:fit` - Generates an image that will fit in the specified size, no cropping.
## Example
iex> Gmex.open( "test/images/blossom.jpg" )
iex> |> Gmex.resize( width: 300, height: 200, type: :fill )
iex> |> Gmex.save( "newimage.jpg" )
{ :ok, nil }
"""
@spec resize( image, resize_options ) :: image | gmex_error
def resize( image, options \\ [] ) do
with { :ok, _ } <- image
do
options = Keyword.merge( @default_resize_options, options )
{ _ ,image_data } = image |> get_info
src_width = image_data |> Keyword.get( :width )
src_height = image_data |> Keyword.get( :height )
tar_width = options |> Keyword.get( :width, :auto )
tar_height = options |> Keyword.get( :height, :auto )
src_ratio = src_width / src_height
resize_type = options |> Keyword.get( :type, :fill )
tar_width = cond do
tar_width == :auto and tar_height == :auto -> src_width
tar_width == :auto and tar_height != :auto -> src_width * tar_height / src_height
true -> tar_width
end
tar_height = cond do
tar_height == :auto and tar_width == :auto -> src_width
tar_height == :auto and tar_width != :auto -> src_height * tar_width / src_width
true -> tar_height
end
tar_ratio = tar_width / tar_height
case resize_type do
:fill ->
{ resize_width, resize_height } = if src_ratio >= tar_ratio do
{ src_width / ( src_height / tar_height ), tar_height }
else
{ tar_width, src_height / ( src_width / tar_width ) }
end
image
|> options(
resize: { resize_width, resize_height },
gravity: "center",
crop: { tar_width, tar_height, 0, 0 }
)
:fit ->
image
|> option( resize: { tar_width, tar_height } )
_ -> { :error, "unknown resize type" }
end
end
end
@doc """
Apply a GraphicsMagick option to the given image.
## Example
iex> Gmex.open( "test/images/blossom.jpg" )
iex> |> Gmex.option( negate: true )
iex> |> Gmex.option( resize: { 50, 50 } )
iex> |> Gmex.option( strip: true )
iex> |> Gmex.option( format: "jpg" )
{ :ok, %Gmex.Image{ image: "test/images/blossom.jpg", options: [ "gm", "-negate", "-resize", "50x50", "-strip", "-format", "jpg" ] } }
List of available options:
| Option | GraphicsMagick |
| ---- | ---- |
| adjoin: true | +adjoin |
| adjoin: false | -adjoin |
| blur: { radius, sigma } | -blur radiusxsigma |
| blur: radius | -blur radius |
| crop: { width, height, x_offset, y_offset } | -crop widthxheight+x_offset+y_offset |
| crop: { width, height } | -crop widthxheight |
| edge: edge | -edge edge |
| extent: { width, height, x_offset, y_offset } | -extent widthxheight+x_offset+y_offset |
| extent: { width, height } | -extent widthxheight |
| flatten: true | -flatten |
| fill: color | -fill color |
| strip: true | -strip |
| flip: true | -flip |
| format: format } | -format format |
| gravity: gravity | -gravity gravity |
| magnify: true | -magnify |
| matte: true | +matte |
| matte: false | -matte |
| negate: true | -negate |
| opaque: color | -opaque color |
| quality: quality | -quality quality |
| resize: { width, height } | -resize widthxheight |
| resize: percents | -resize percents% |
| rotate: degrees | -rotate degrees |
| size: { width, height } | -size widthxheight |
| size: { width, height, offset } | -size widthxheight+offset |
| thumbnail: { :thumbnail, width, height } | -thumbnail widthxheight |
| thumbnail: { :thumbnail, percents } | -thumbnail percents% |
| transparent: color | -transparent color |
| type: type | -type type |
| custom: [ arg1, arg2, arg3... ] | arg1 arg2 arg3 ... |
"""
@spec option( image, option ) :: image
def option( { :ok, image = %Gmex.Image{} }, [ adjoin: true ] ) do
{ :ok, Gmex.Image.append_option( image, [ "+adjoin" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ adjoin: false ] ) do
{ :ok, Gmex.Image.append_option( image, [ "-adjoin" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ background: color ] ) do
{ :ok, Gmex.Image.append_option( image, [ "-background", "#{color}" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ blur: radius ] ) do
{ :ok, Gmex.Image.append_option( image, [ "-blur", "#{radius}" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ blur: { radius, sigma } ] ) do
{ :ok, Gmex.Image.append_option( image, [ "-blur", "#{radius}x#{sigma}" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ crop: { width, height } ] ) do
width = Kernel.round( width )
height = Kernel.round( height )
{ :ok, Gmex.Image.append_option( image, [ "-crop", "#{width}x#{height}" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ crop: { width, height, x_offset, y_offset } ] ) do
width = Kernel.round( width )
height = Kernel.round( height )
x_offset = Kernel.round( x_offset )
y_offset = Kernel.round( y_offset )
x_offset = if x_offset >= 0, do: "+#{x_offset}", else: x_offset
y_offset = if y_offset >= 0, do: "+#{y_offset}", else: y_offset
{ :ok, Gmex.Image.append_option( image, [ "-crop", "#{width}x#{height}#{x_offset}#{y_offset}" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ edge: radius ] ) do
{ :ok, Gmex.Image.append_option( image, [ "-edge", "#{radius}" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ extent: { width, height } ] ) do
width = Kernel.round( width )
height = Kernel.round( height )
{ :ok, Gmex.Image.append_option( image, [ "-extent", "#{width}x#{height}" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ extent: { width, height, x_offset, y_offset } ] ) do
width = Kernel.round( width )
height = Kernel.round( height )
x_offset = Kernel.round( x_offset )
y_offset = Kernel.round( y_offset )
x_offset = if x_offset >= 0, do: "+#{x_offset}", else: x_offset
y_offset = if y_offset >= 0, do: "+#{y_offset}", else: y_offset
{ :ok, Gmex.Image.append_option( image, [ "-extent", "#{width}x#{height}#{x_offset}#{y_offset}" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ flatten: true ] ) do
{ :ok, Gmex.Image.append_option( image, [ "-flatten" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ fill: color ] ) do
{ :ok, Gmex.Image.append_option( image, [ "-flatten", "#{color}" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ strip: true ] ) do
{ :ok, Gmex.Image.append_option( image, [ "-strip" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ flip: true ] ) do
{ :ok, Gmex.Image.append_option( image, [ "-flip" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ format: format ] ) do
{ :ok, Gmex.Image.append_option( image, [ "-format", "#{format}" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ gravity: gravity ] ) do
{ :ok, Gmex.Image.append_option( image, [ "-gravity", "#{gravity}" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ magnify: true ] ) do
{ :ok, Gmex.Image.append_option( image, [ "magnify" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ matte: true ] ) do
{ :ok, Gmex.Image.append_option( image, [ "+matte" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ matte: false ] ) do
{ :ok, Gmex.Image.append_option( image, [ "-matte" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ negate: true ] ) do
{ :ok, Gmex.Image.append_option( image, [ "-negate" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ opaque: color ] ) do
{ :ok, Gmex.Image.append_option( image, [ "-opaque", "#{color}" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ quality: quality ] ) do
{ :ok, Gmex.Image.append_option( image, [ "-quality", "#{quality}" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ resize: { width, height } ] ) do
width = Kernel.round( width )
height = Kernel.round( height )
{ :ok, Gmex.Image.append_option( image, [ "-resize", "#{width}x#{height}" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ resize: percents ] ) do
{ :ok, Gmex.Image.append_option( image, [ "-resize", "#{percents}%" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ rotate: degrees ] ) do
{ :ok, Gmex.Image.append_option( image, [ "-rotate", "#{degrees}" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ size: { width, height } ] ) do
width = Kernel.round( width )
height = Kernel.round( height )
{ :ok, Gmex.Image.append_option( image, [ "-size", "#{width}x#{height}" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ size: { width, height, offset } ] ) do
width = Kernel.round( width )
height = Kernel.round( height )
offset = Kernel.round( offset )
{ :ok, Gmex.Image.append_option( image, [ "-size", "#{width}x#{height}+#{offset}" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ thumbnail: { width, height } ] ) do
width = Kernel.round( width )
height = Kernel.round( height )
{ :ok, Gmex.Image.append_option( image, [ "-thumbnail", "#{width}x#{height}" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ thumbnail: percents ] ) do
{ :ok, Gmex.Image.append_option( image, [ "-thumbnail", "#{percents}%" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ transparent: color ] ) do
{ :ok, Gmex.Image.append_option( image, [ "-transparent", "#{color}" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ type: type ] ) do
{ :ok, Gmex.Image.append_option( image, [ "-type", "#{type}" ] ) }
end
def option( { :ok, image = %Gmex.Image{} }, [ custom: other_options ] ) when is_list( other_options ) do
new_options = other_options
|> Enum.map( fn( option ) -> "#{option}" end )
{ :ok, Gmex.Image.append_option( image, new_options ) }
end
def option( { :ok, _image = %Gmex.Image{} }, _option ) do
{ :error, :unknown_option }
end
@doc """
Apply a list GraphicsMagick option to the given image.
## Example
iex> Gmex.open( "test/images/blossom.jpg" )
iex> |> Gmex.options( negate: true, resize: { 50, 50 }, strip: true, format: "jpg" )
{ :ok, %Gmex.Image{ image: "test/images/blossom.jpg", options: [ "gm", "-negate", "-resize", "50x50", "-strip", "-format", "jpg" ] } }
"""
@spec options( image, option ) :: image | gmex_error
def options( { :ok, image = %Gmex.Image{} }, [ option | other_options ] ) do
with { :ok, image } <- option( { :ok, image }, [ option ] ) do
if length( other_options ) === 0 do
{ :ok, image }
else
options( { :ok, image }, other_options )
end
end
end
end
|
lib/gmex.ex
| 0.880103 | 0.455622 |
gmex.ex
|
starcoder
|
defmodule Stripe.Converter do
@doc """
Takes a result map or list of maps from a Stripe response and returns a
struct (e.g. `%Stripe.Card{}`) or list of structs.
If the result is not a supported Stripe object, it just returns a plain map
with atomized keys.
"""
@spec convert_result(%{String.t() => any}) :: struct
def convert_result(result), do: convert_value(result)
@supported_objects ~w(
account
account_link
application_fee
fee_refund
balance
balance_transaction
bank_account
billing_portal.session
card
charge
checkout.session
country_spec
coupon
credit_note
credit_note_line_item
customer
customer_balance_transaction
discount
dispute
event
external_account
file
file_link
invoice
invoiceitem
issuing.authorization
issuing.card
issuing.cardholder
issuing.transaction
line_item
list
login_link
mandate
oauth
order
order_item
order_return
payment_intent
payment_method
payout
person
plan
price
product
recipient
refund
review
setup_intent
sku
source
subscription
subscription_item
subscription_schedule
tax_rate
tax_id
topup
terminal.connection_token
terminal.location
terminal.reader
transfer
transfer_reversal
token
usage_record
usage_record_summary
webhook_endpoint
)
@no_convert_maps ~w(metadata supported_bank_account_currencies)
@doc """
Returns a list of structs to be used for providing JSON-encoders.
## Examples
Say you are using Jason to encode your JSON, you can provide the following protocol,
to directly encode all structs of this library into JSON.
```
for struct <- Stripe.Converter.structs() do
defimpl Jason.Encoder, for: struct do
def encode(value, opts) do
Jason.Encode.map(Map.delete(value, :__struct__), opts)
end
end
end
```
"""
def structs() do
(@supported_objects -- @no_convert_maps)
|> Enum.map(&Stripe.Util.object_name_to_module/1)
end
@spec convert_value(any) :: any
defp convert_value(%{"object" => object_name} = value) when is_binary(object_name) do
case Enum.member?(@supported_objects, object_name) do
true ->
convert_stripe_object(value)
false ->
warn_unknown_object(value)
convert_map(value)
end
end
defp convert_value(value) when is_map(value), do: convert_map(value)
defp convert_value(value) when is_list(value), do: convert_list(value)
defp convert_value(value), do: value
@spec convert_map(map) :: map
defp convert_map(value) do
Enum.reduce(value, %{}, fn {key, value}, acc ->
Map.put(acc, String.to_atom(key), convert_value(value))
end)
end
@spec convert_stripe_object(%{String.t() => any}) :: struct
defp convert_stripe_object(%{"object" => object_name} = value) do
module = Stripe.Util.object_name_to_module(object_name)
struct_keys = Map.keys(module.__struct__) |> List.delete(:__struct__)
check_for_extra_keys(struct_keys, value)
processed_map =
struct_keys
|> Enum.reduce(%{}, fn key, acc ->
string_key = to_string(key)
converted_value =
case string_key do
string_key when string_key in @no_convert_maps -> Map.get(value, string_key)
_ -> Map.get(value, string_key) |> convert_value()
end
Map.put(acc, key, converted_value)
end)
|> module.__from_json__()
struct(module, processed_map)
end
@spec convert_list(list) :: list
defp convert_list(list), do: list |> Enum.map(&convert_value/1)
if Mix.env() == :prod do
defp warn_unknown_object(_), do: :ok
else
defp warn_unknown_object(%{"object" => object_name}) do
require Logger
Logger.warn("Unknown object received: #{object_name}")
end
end
if Mix.env() == :prod do
defp check_for_extra_keys(_, _), do: :ok
else
defp check_for_extra_keys(struct_keys, map) do
require Logger
map_keys =
map
|> Map.keys()
|> Enum.map(&String.to_atom/1)
|> MapSet.new()
struct_keys =
struct_keys
|> MapSet.new()
extra_keys =
map_keys
|> MapSet.difference(struct_keys)
|> Enum.to_list()
unless Enum.empty?(extra_keys) do
object = Map.get(map, "object")
module_name =
object
|> Stripe.Util.object_name_to_module()
|> Stripe.Util.module_to_string()
details = "#{module_name}: #{inspect(extra_keys)}"
message = "Extra keys were received but ignored when converting #{details}"
Logger.warn(message)
end
:ok
end
end
end
|
lib/stripe/converter.ex
| 0.84296 | 0.636226 |
converter.ex
|
starcoder
|
defmodule PasswordlessAuth do
@moduledoc """
PasswordlessAuth is a library gives you the ability to verify a user's
phone number by sending them a verification code, and verifying that
the code they provide matches the code that was sent to their phone number.
Verification codes are stored in an Agent along with the phone number they
were sent to. They are stored with an expiration date/time.
A garbage collector removes expires verification codes from the store.
See PasswordlessAuth.GarbageCollector
"""
use Application
alias PasswordlessAuth.{GarbageCollector, VerificationCode, Store}
@default_verification_code_ttl 300
@default_num_attempts_before_timeout 5
@default_rate_limit_timeout_length 60
# Removed default adapter ExTwilio
@sms_adapter Application.get_env(:passwordless_auth, :sms_adapter)
@type verification_failed_reason() ::
:attempt_blocked | :code_expired | :does_not_exist | :incorrect_code
@doc false
def start(_type, _args) do
children = [
GarbageCollector,
Store
]
opts = [strategy: :one_for_one, name: PasswordlessAuth.Supervisor]
Supervisor.start_link(children, opts)
end
@doc """
Send an SMS with a verification code to the given `phone_number`
The verification code is valid for the number of seconds given to the
`verification_code_ttl` config option (defaults to 300)
Options for the Twilio request can be passed to `opts[:twilio_request_options`.
You'll need to pass at least a `from` or `messaging_service_sid` option
to `options[:twilio_request_options]` for messages to be sent
(see the [Twilio API documentation](https://www.twilio.com/docs/api/messaging/send-messages#conditional-parameters))
For example:
Arguments:
- `phone_number`: The phone number that will receive the text message
- `opts`: Options (see below)
Options:
- `message`: A custom text message template. The verification code
can be injected with this formatting: _"Yarrr, {{code}} be the secret"_.
Defaults to _"Your verification code is: {{code}}"_
- `code_length`: Length of the verification code (defaults to 6)
- `twilio_request_options`: A map of options that are passed to the Twilio request
(see the [Twilio API documentation](https://www.twilio.com/docs/api/messaging/send-messages#conditional-parameters))
Returns `{:ok, twilio_response}` or `{:error, error}`.
"""
@spec create_and_send_verification_code(String.t(), list()) ::
{:ok, map()} | {:error, String.t()}
def create_and_send_verification_code(phone_number, opts \\ []) do
message = opts[:message] || "Your verification code is: {{code}}"
code_length = opts[:code_length] || 6
code = VerificationCode.generate_code(code_length)
ttl =
Application.get_env(:passwordless_auth, :verification_code_ttl) ||
@default_verification_code_ttl
expires = NaiveDateTime.utc_now() |> NaiveDateTime.add(ttl)
sms_request_options = opts[:sms_request_options] || []
request =
Enum.into(sms_request_options, %{
to: phone_number,
body: String.replace(message, "{{code}}", code),
code: code
})
case @sms_adapter.Message.create(request) do
{:ok, response} ->
Agent.update(
Store,
&Map.put(&1, phone_number, %VerificationCode{
code: code,
expires: expires
})
)
{:ok, response}
{:error, message, _code} ->
{:error, message}
end
end
@doc """
Verifies that a the given `phone_number` has the
given `verification_code` stores in state and that
the verification code hasn't expired.
Returns `:ok` or `{:error, :reason}`.
## Examples
iex> PasswordlessAuth.verify_code("+447123456789", "123456")
{:error, :does_not_exist}
"""
@spec verify_code(String.t(), String.t()) :: :ok | {:error, verification_failed_reason()}
def verify_code(phone_number, attempt_code) do
state = Agent.get(Store, fn state -> state end)
with :ok <- check_code_exists(state, phone_number),
verification_code <- Map.get(state, phone_number),
:ok <- check_verification_code_not_expired(verification_code),
:ok <- check_attempt_is_allowed(verification_code),
:ok <- check_attempt_code(verification_code, attempt_code) do
reset_attempts(phone_number)
:ok
else
{:error, :incorrect_code} = error ->
increment_or_block_attempts(phone_number)
error
{:error, _reason} = error ->
error
end
end
@doc """
Removes a code from state based on the given `phone_number`
Returns `{:ok, %VerificationCode{...}}` or `{:error, :reason}`.
"""
@spec remove_code(String.t()) :: {:ok, VerificationCode.t()} | {:error, :does_not_exist}
def remove_code(phone_number) do
state = Agent.get(Store, fn state -> state end)
with :ok <- check_code_exists(state, phone_number) do
code = Agent.get(Store, &Map.get(&1, phone_number))
Agent.update(Store, &Map.delete(&1, phone_number))
{:ok, code}
end
end
@spec check_code_exists(map(), String.t()) :: :ok | {:error, :does_not_exist}
defp check_code_exists(state, phone_number) do
if Map.has_key?(state, phone_number) do
:ok
else
{:error, :does_not_exist}
end
end
@spec check_verification_code_not_expired(VerificationCode.t()) :: :ok | {:error, :code_expired}
defp check_verification_code_not_expired(%VerificationCode{expires: expires}) do
case NaiveDateTime.compare(expires, NaiveDateTime.utc_now()) do
:gt -> :ok
_ -> {:error, :code_expired}
end
end
@spec check_attempt_is_allowed(VerificationCode.t()) :: :ok | {:error, :attempt_blocked}
defp check_attempt_is_allowed(%VerificationCode{attempts_blocked_until: nil}), do: :ok
defp check_attempt_is_allowed(%VerificationCode{attempts_blocked_until: attempts_blocked_until}) do
case NaiveDateTime.compare(attempts_blocked_until, NaiveDateTime.utc_now()) do
:lt -> :ok
_ -> {:error, :attempt_blocked}
end
end
@spec check_attempt_code(VerificationCode.t(), String.t()) :: :ok | {:error, :incorrect_code}
defp check_attempt_code(%VerificationCode{code: code}, attempt_code) do
if attempt_code == code do
:ok
else
{:error, :incorrect_code}
end
end
@spec reset_attempts(String.t()) :: :ok
defp reset_attempts(phone_number) do
Agent.update(Store, &put_in(&1, [phone_number, Access.key(:attempts)], 0))
end
@spec increment_or_block_attempts(String.t()) :: :ok
defp increment_or_block_attempts(phone_number) do
num_attempts_before_timeout =
Application.get_env(:passwordless_auth, :num_attempts_before_timeout) ||
@default_num_attempts_before_timeout
attempts = Agent.get(Store, &get_in(&1, [phone_number, Access.key(:attempts)]))
if attempts < num_attempts_before_timeout - 1 do
Agent.update(Store, &put_in(&1, [phone_number, Access.key(:attempts)], attempts + 1))
else
num_attempts_before_timeout =
Application.get_env(:passwordless_auth, :rate_limit_timeout_length) ||
@default_rate_limit_timeout_length
attempts_blocked_until =
NaiveDateTime.utc_now() |> NaiveDateTime.add(num_attempts_before_timeout)
Agent.update(Store, fn state ->
state
|> put_in([phone_number, Access.key(:attempts)], 0)
|> put_in([phone_number, Access.key(:attempts_blocked_until)], attempts_blocked_until)
end)
end
end
end
|
lib/passwordless_auth.ex
| 0.888777 | 0.556339 |
passwordless_auth.ex
|
starcoder
|
defmodule AWS.CloudWatchLogs do
@moduledoc """
You can use Amazon CloudWatch Logs to monitor, store, and access your log files
from EC2 instances, AWS CloudTrail, or other sources.
You can then retrieve the associated log data from CloudWatch Logs using the
CloudWatch console, CloudWatch Logs commands in the AWS CLI, CloudWatch Logs
API, or CloudWatch Logs SDK.
You can use CloudWatch Logs to:
* **Monitor logs from EC2 instances in real-time**: You can use
CloudWatch Logs to monitor applications and systems using log data. For example,
CloudWatch Logs can track the number of errors that occur in your application
logs and send you a notification whenever the rate of errors exceeds a threshold
that you specify. CloudWatch Logs uses your log data for monitoring so no code
changes are required. For example, you can monitor application logs for specific
literal terms (such as "NullReferenceException") or count the number of
occurrences of a literal term at a particular position in log data (such as
"404" status codes in an Apache access log). When the term you are searching for
is found, CloudWatch Logs reports the data to a CloudWatch metric that you
specify.
* **Monitor AWS CloudTrail logged events**: You can create alarms in
CloudWatch and receive notifications of particular API activity as captured by
CloudTrail. You can use the notification to perform troubleshooting.
* **Archive log data**: You can use CloudWatch Logs to store your
log data in highly durable storage. You can change the log retention setting so
that any log events older than this setting are automatically deleted. The
CloudWatch Logs agent makes it easy to quickly send both rotated and non-rotated
log data off of a host and into the log service. You can then access the raw log
data when you need it.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2014-03-28",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "logs",
global?: false,
protocol: "json",
service_id: "CloudWatch Logs",
signature_version: "v4",
signing_name: "logs",
target_prefix: "Logs_20140328"
}
end
@doc """
Associates the specified AWS Key Management Service (AWS KMS) customer master
key (CMK) with the specified log group.
Associating an AWS KMS CMK with a log group overrides any existing associations
between the log group and a CMK. After a CMK is associated with a log group, all
newly ingested data for the log group is encrypted using the CMK. This
association is stored as long as the data encrypted with the CMK is still within
Amazon CloudWatch Logs. This enables Amazon CloudWatch Logs to decrypt this data
whenever it is requested.
CloudWatch Logs supports only symmetric CMKs. Do not use an associate an
asymmetric CMK with your log group. For more information, see [Using Symmetric and Asymmetric
Keys](https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html).
It can take up to 5 minutes for this operation to take effect.
If you attempt to associate a CMK with a log group but the CMK does not exist or
the CMK is disabled, you receive an `InvalidParameterException` error.
"""
def associate_kms_key(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AssociateKmsKey", input, options)
end
@doc """
Cancels the specified export task.
The task must be in the `PENDING` or `RUNNING` state.
"""
def cancel_export_task(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CancelExportTask", input, options)
end
@doc """
Creates an export task, which allows you to efficiently export data from a log
group to an Amazon S3 bucket.
When you perform a `CreateExportTask` operation, you must use credentials that
have permission to write to the S3 bucket that you specify as the destination.
This is an asynchronous call. If all the required information is provided, this
operation initiates an export task and responds with the ID of the task. After
the task has started, you can use
[DescribeExportTasks](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeExportTasks.html) to get the status of the export task. Each account can only have one active
(`RUNNING` or `PENDING`) export task at a time. To cancel an export task, use
[CancelExportTask](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CancelExportTask.html).
You can export logs from multiple log groups or multiple time ranges to the same
S3 bucket. To separate out log data for each export task, you can specify a
prefix to be used as the Amazon S3 key prefix for all exported objects.
Exporting to S3 buckets that are encrypted with AES-256 is supported. Exporting
to S3 buckets encrypted with SSE-KMS is not supported.
"""
def create_export_task(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateExportTask", input, options)
end
@doc """
Creates a log group with the specified name.
You can create up to 20,000 log groups per account.
You must use the following guidelines when naming a log group:
* Log group names must be unique within a region for an AWS account.
* Log group names can be between 1 and 512 characters long.
* Log group names consist of the following characters: a-z, A-Z,
0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), '.' (period), and '#'
(number sign)
When you create a log group, by default the log events in the log group never
expire. To set a retention policy so that events expire and are deleted after a
specified time, use
[PutRetentionPolicy](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutRetentionPolicy.html). If you associate a AWS Key Management Service (AWS KMS) customer master key
(CMK) with the log group, ingested data is encrypted using the CMK. This
association is stored as long as the data encrypted with the CMK is still within
Amazon CloudWatch Logs. This enables Amazon CloudWatch Logs to decrypt this data
whenever it is requested.
If you attempt to associate a CMK with the log group but the CMK does not exist
or the CMK is disabled, you receive an `InvalidParameterException` error.
CloudWatch Logs supports only symmetric CMKs. Do not associate an asymmetric CMK
with your log group. For more information, see [Using Symmetric and Asymmetric
Keys](https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html).
"""
def create_log_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateLogGroup", input, options)
end
@doc """
Creates a log stream for the specified log group.
A log stream is a sequence of log events that originate from a single source,
such as an application instance or a resource that is being monitored.
There is no limit on the number of log streams that you can create for a log
group. There is a limit of 50 TPS on `CreateLogStream` operations, after which
transactions are throttled.
You must use the following guidelines when naming a log stream:
* Log stream names must be unique within the log group.
* Log stream names can be between 1 and 512 characters long.
* The ':' (colon) and '*' (asterisk) characters are not allowed.
"""
def create_log_stream(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateLogStream", input, options)
end
@doc """
Deletes the specified destination, and eventually disables all the subscription
filters that publish to it.
This operation does not delete the physical resource encapsulated by the
destination.
"""
def delete_destination(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteDestination", input, options)
end
@doc """
Deletes the specified log group and permanently deletes all the archived log
events associated with the log group.
"""
def delete_log_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteLogGroup", input, options)
end
@doc """
Deletes the specified log stream and permanently deletes all the archived log
events associated with the log stream.
"""
def delete_log_stream(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteLogStream", input, options)
end
@doc """
Deletes the specified metric filter.
"""
def delete_metric_filter(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteMetricFilter", input, options)
end
@doc """
Deletes a saved CloudWatch Logs Insights query definition.
A query definition contains details about a saved CloudWatch Logs Insights
query.
Each `DeleteQueryDefinition` operation can delete one query definition.
You must have the `logs:DeleteQueryDefinition` permission to be able to perform
this operation.
"""
def delete_query_definition(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteQueryDefinition", input, options)
end
@doc """
Deletes a resource policy from this account.
This revokes the access of the identities in that policy to put log events to
this account.
"""
def delete_resource_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteResourcePolicy", input, options)
end
@doc """
Deletes the specified retention policy.
Log events do not expire if they belong to log groups without a retention
policy.
"""
def delete_retention_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteRetentionPolicy", input, options)
end
@doc """
Deletes the specified subscription filter.
"""
def delete_subscription_filter(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteSubscriptionFilter", input, options)
end
@doc """
Lists all your destinations.
The results are ASCII-sorted by destination name.
"""
def describe_destinations(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDestinations", input, options)
end
@doc """
Lists the specified export tasks.
You can list all your export tasks or filter the results based on task ID or
task status.
"""
def describe_export_tasks(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeExportTasks", input, options)
end
@doc """
Lists the specified log groups.
You can list all your log groups or filter the results by prefix. The results
are ASCII-sorted by log group name.
"""
def describe_log_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeLogGroups", input, options)
end
@doc """
Lists the log streams for the specified log group.
You can list all the log streams or filter the results by prefix. You can also
control how the results are ordered.
This operation has a limit of five transactions per second, after which
transactions are throttled.
"""
def describe_log_streams(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeLogStreams", input, options)
end
@doc """
Lists the specified metric filters.
You can list all of the metric filters or filter the results by log name,
prefix, metric name, or metric namespace. The results are ASCII-sorted by filter
name.
"""
def describe_metric_filters(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeMetricFilters", input, options)
end
@doc """
Returns a list of CloudWatch Logs Insights queries that are scheduled,
executing, or have been executed recently in this account.
You can request all queries or limit it to queries of a specific log group or
queries with a certain status.
"""
def describe_queries(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeQueries", input, options)
end
@doc """
This operation returns a paginated list of your saved CloudWatch Logs Insights
query definitions.
You can use the `queryDefinitionNamePrefix` parameter to limit the results to
only the query definitions that have names that start with a certain string.
"""
def describe_query_definitions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeQueryDefinitions", input, options)
end
@doc """
Lists the resource policies in this account.
"""
def describe_resource_policies(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeResourcePolicies", input, options)
end
@doc """
Lists the subscription filters for the specified log group.
You can list all the subscription filters or filter the results by prefix. The
results are ASCII-sorted by filter name.
"""
def describe_subscription_filters(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeSubscriptionFilters", input, options)
end
@doc """
Disassociates the associated AWS Key Management Service (AWS KMS) customer
master key (CMK) from the specified log group.
After the AWS KMS CMK is disassociated from the log group, AWS CloudWatch Logs
stops encrypting newly ingested data for the log group. All previously ingested
data remains encrypted, and AWS CloudWatch Logs requires permissions for the CMK
whenever the encrypted data is requested.
Note that it can take up to 5 minutes for this operation to take effect.
"""
def disassociate_kms_key(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisassociateKmsKey", input, options)
end
@doc """
Lists log events from the specified log group.
You can list all the log events or filter the results using a filter pattern, a
time range, and the name of the log stream.
By default, this operation returns as many log events as can fit in 1 MB (up to
10,000 log events) or all the events found within the time range that you
specify. If the results include a token, then there are more log events
available, and you can get additional results by specifying the token in a
subsequent call. This operation can return empty results while there are more
log events available through the token.
The returned log events are sorted by event timestamp, the timestamp when the
event was ingested by CloudWatch Logs, and the ID of the `PutLogEvents` request.
"""
def filter_log_events(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "FilterLogEvents", input, options)
end
@doc """
Lists log events from the specified log stream.
You can list all of the log events or filter using a time range.
By default, this operation returns as many log events as can fit in a response
size of 1MB (up to 10,000 log events). You can get additional log events by
specifying one of the tokens in a subsequent call. This operation can return
empty results while there are more log events available through the token.
"""
def get_log_events(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetLogEvents", input, options)
end
@doc """
Returns a list of the fields that are included in log events in the specified
log group, along with the percentage of log events that contain each field.
The search is limited to a time period that you specify.
In the results, fields that start with @ are fields generated by CloudWatch
Logs. For example, `@timestamp` is the timestamp of each log event. For more
information about the fields that are generated by CloudWatch logs, see
[Supported Logs and Discovered Fields](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_AnalyzeLogData-discoverable-fields.html).
The response results are sorted by the frequency percentage, starting with the
highest percentage.
"""
def get_log_group_fields(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetLogGroupFields", input, options)
end
@doc """
Retrieves all of the fields and values of a single log event.
All fields are retrieved, even if the original query that produced the
`logRecordPointer` retrieved only a subset of fields. Fields are returned as
field name/field value pairs.
The full unparsed log event is returned within `@message`.
"""
def get_log_record(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetLogRecord", input, options)
end
@doc """
Returns the results from the specified query.
Only the fields requested in the query are returned, along with a `@ptr` field,
which is the identifier for the log record. You can use the value of `@ptr` in a
[GetLogRecord](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetLogRecord.html) operation to get the full log record.
`GetQueryResults` does not start a query execution. To run a query, use
[StartQuery](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartQuery.html).
If the value of the `Status` field in the output is `Running`, this operation
returns only partial results. If you see a value of `Scheduled` or `Running` for
the status, you can retry the operation later to see the final results.
"""
def get_query_results(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetQueryResults", input, options)
end
@doc """
Lists the tags for the specified log group.
"""
def list_tags_log_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTagsLogGroup", input, options)
end
@doc """
Creates or updates a destination.
This operation is used only to create destinations for cross-account
subscriptions.
A destination encapsulates a physical resource (such as an Amazon Kinesis
stream) and enables you to subscribe to a real-time stream of log events for a
different account, ingested using
[PutLogEvents](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html). Through an access policy, a destination controls what is written to it. By
default, `PutDestination` does not set any access policy with the destination,
which means a cross-account user cannot call
[PutSubscriptionFilter](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutSubscriptionFilter.html)
against this destination. To enable this, the destination owner must call
[PutDestinationPolicy](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestinationPolicy.html)
after `PutDestination`.
To perform a `PutDestination` operation, you must also have the `iam:PassRole`
permission.
"""
def put_destination(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutDestination", input, options)
end
@doc """
Creates or updates an access policy associated with an existing destination.
An access policy is an [IAM policy document](https://docs.aws.amazon.com/IAM/latest/UserGuide/policies_overview.html)
that is used to authorize claims to register a subscription filter against a
given destination.
"""
def put_destination_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutDestinationPolicy", input, options)
end
@doc """
Uploads a batch of log events to the specified log stream.
You must include the sequence token obtained from the response of the previous
call. An upload in a newly created log stream does not require a sequence token.
You can also get the sequence token in the `expectedSequenceToken` field from
`InvalidSequenceTokenException`. If you call `PutLogEvents` twice within a
narrow time period using the same value for `sequenceToken`, both calls might be
successful or one might be rejected.
The batch of events must satisfy the following constraints:
* The maximum batch size is 1,048,576 bytes. This size is calculated
as the sum of all event messages in UTF-8, plus 26 bytes for each log event.
* None of the log events in the batch can be more than 2 hours in
the future.
* None of the log events in the batch can be older than 14 days or
older than the retention period of the log group.
* The log events in the batch must be in chronological order by
their timestamp. The timestamp is the time the event occurred, expressed as the
number of milliseconds after Jan 1, 1970 00:00:00 UTC. (In AWS Tools for
PowerShell and the AWS SDK for .NET, the timestamp is specified in .NET format:
yyyy-mm-ddThh:mm:ss. For example, 2017-09-15T13:45:30.)
* A batch of log events in a single request cannot span more than 24
hours. Otherwise, the operation fails.
* The maximum number of log events in a batch is 10,000.
* There is a quota of 5 requests per second per log stream.
Additional requests are throttled. This quota can't be changed.
If a call to `PutLogEvents` returns "UnrecognizedClientException" the most
likely cause is an invalid AWS access key ID or secret key.
"""
def put_log_events(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutLogEvents", input, options)
end
@doc """
Creates or updates a metric filter and associates it with the specified log
group.
Metric filters allow you to configure rules to extract metric data from log
events ingested through
[PutLogEvents](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html).
The maximum number of metric filters that can be associated with a log group is
100.
"""
def put_metric_filter(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutMetricFilter", input, options)
end
@doc """
Creates or updates a query definition for CloudWatch Logs Insights.
For more information, see [Analyzing Log Data with CloudWatch Logs Insights](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AnalyzingLogData.html).
To update a query definition, specify its `queryDefinitionId` in your request.
The values of `name`, `queryString`, and `logGroupNames` are changed to the
values that you specify in your update operation. No current values are retained
from the current query definition. For example, if you update a current query
definition that includes log groups, and you don't specify the `logGroupNames`
parameter in your update operation, the query definition changes to contain no
log groups.
You must have the `logs:PutQueryDefinition` permission to be able to perform
this operation.
"""
def put_query_definition(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutQueryDefinition", input, options)
end
@doc """
Creates or updates a resource policy allowing other AWS services to put log
events to this account, such as Amazon Route 53.
An account can have up to 10 resource policies per AWS Region.
"""
def put_resource_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutResourcePolicy", input, options)
end
@doc """
Sets the retention of the specified log group.
A retention policy allows you to configure the number of days for which to
retain log events in the specified log group.
"""
def put_retention_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutRetentionPolicy", input, options)
end
@doc """
Creates or updates a subscription filter and associates it with the specified
log group.
Subscription filters allow you to subscribe to a real-time stream of log events
ingested through
[PutLogEvents](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html)
and have them delivered to a specific destination. When log events are sent to
the receiving service, they are Base64 encoded and compressed with the gzip
format.
The following destinations are supported for subscription filters:
* An Amazon Kinesis stream belonging to the same account as the
subscription filter, for same-account delivery.
* A logical destination that belongs to a different account, for
cross-account delivery.
* An Amazon Kinesis Firehose delivery stream that belongs to the
same account as the subscription filter, for same-account delivery.
* An AWS Lambda function that belongs to the same account as the
subscription filter, for same-account delivery.
There can only be one subscription filter associated with a log group. If you
are updating an existing filter, you must specify the correct name in
`filterName`. Otherwise, the call fails because you cannot associate a second
filter with a log group.
To perform a `PutSubscriptionFilter` operation, you must also have the
`iam:PassRole` permission.
"""
def put_subscription_filter(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutSubscriptionFilter", input, options)
end
@doc """
Schedules a query of a log group using CloudWatch Logs Insights.
You specify the log group and time range to query and the query string to use.
For more information, see [CloudWatch Logs Insights Query Syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html).
Queries time out after 15 minutes of execution. If your queries are timing out,
reduce the time range being searched or partition your query into a number of
queries.
"""
def start_query(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartQuery", input, options)
end
@doc """
Stops a CloudWatch Logs Insights query that is in progress.
If the query has already ended, the operation returns an error indicating that
the specified query is not running.
"""
def stop_query(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopQuery", input, options)
end
@doc """
Adds or updates the specified tags for the specified log group.
To list the tags for a log group, use
[ListTagsLogGroup](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_ListTagsLogGroup.html). To remove tags, use
[UntagLogGroup](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_UntagLogGroup.html).
For more information about tags, see [Tag Log Groups in Amazon CloudWatch Logs](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html#log-group-tagging)
in the *Amazon CloudWatch Logs User Guide*.
"""
def tag_log_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TagLogGroup", input, options)
end
@doc """
Tests the filter pattern of a metric filter against a sample of log event
messages.
You can use this operation to validate the correctness of a metric filter
pattern.
"""
def test_metric_filter(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TestMetricFilter", input, options)
end
@doc """
Removes the specified tags from the specified log group.
To list the tags for a log group, use
[ListTagsLogGroup](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_ListTagsLogGroup.html). To add tags, use
[TagLogGroup](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_TagLogGroup.html).
"""
def untag_log_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UntagLogGroup", input, options)
end
end
|
lib/aws/generated/cloud_watch_logs.ex
| 0.889235 | 0.575946 |
cloud_watch_logs.ex
|
starcoder
|
defmodule Credo.CLI.Output.UI do
@moduledoc """
This module provides functions used to create the UI.
"""
@edge "┃"
@ellipsis "…"
@shell_service Credo.CLI.Output.Shell
if Mix.env() == :test do
def puts, do: nil
def puts(_), do: nil
def puts(_, color) when is_atom(color), do: nil
def warn(_), do: nil
else
defdelegate puts, to: @shell_service
defdelegate puts(v), to: @shell_service
def puts(v, color) when is_atom(color) do
@shell_service.puts([color, v])
end
defdelegate warn(v), to: @shell_service
end
def edge(color, indent \\ 2) when is_integer(indent) do
[:reset, color, @edge |> String.pad_trailing(indent)]
end
def edge, do: @edge
def use_colors(exec) do
@shell_service.use_colors(exec.color)
exec
end
def puts_edge(color, indent \\ 2) when is_integer(indent) do
color
|> edge(indent)
|> puts
end
def wrap_at(text, number) do
"(?:((?>.{1,#{number}}(?:(?<=[^\\S\\r\\n])[^\\S\\r\\n]?|(?=\\r?\\n)|$|[^\\S\\r\\n]))|.{1,#{
number
}})(?:\\r?\\n)?|(?:\\r?\\n|$))"
|> Regex.compile!("u")
|> Regex.scan(text)
|> Enum.map(&List.first/1)
|> List.delete_at(-1)
end
@doc """
Truncate a line to fit within a specified maximum length.
Truncation is indicated by a trailing ellipsis (…), and you can override this
using an optional third argument.
iex> Credo.CLI.Output.UI.truncate(" 7 chars\\n", 7)
" 7 ch…"
iex> Credo.CLI.Output.UI.truncate(" more than 7\\n", 7)
" more…"
iex> Credo.CLI.Output.UI.truncate(" more than 7\\n", 7, " ...")
" m ..."
"""
def truncate(_line, max_length) when max_length <= 0, do: ""
def truncate(line, max_length) when max_length > 0 do
truncate(line, max_length, @ellipsis)
end
def truncate(_line, max_length, _ellipsis) when max_length <= 0, do: ""
def truncate(line, max_length, ellipsis) when max_length > 0 do
cond do
String.length(line) <= max_length ->
line
String.length(ellipsis) >= max_length ->
ellipsis
true ->
chars_to_display = max_length - String.length(ellipsis)
String.slice(line, 0, chars_to_display) <> ellipsis
end
end
end
|
lib/credo/cli/output/ui.ex
| 0.721154 | 0.400456 |
ui.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.