code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
defmodule Oban.Crontab.Parser do
@moduledoc false
@doc """
Parses the given `binary` as cron.
Returns `{:ok, [token], rest, context, position, byte_offset}` or
`{:error, reason, rest, context, line, byte_offset}` where `position`
describes the location of the cron (start position) as `{line, column_on_line}`.
## Options
* `:line` - the initial line, defaults to 1
* `:byte_offset` - the initial byte offset, defaults to 0
* `:context` - the initial context value. It will be converted
to a map
"""
@spec cron(binary, keyword) ::
{:ok, [term], rest, context, line, byte_offset}
| {:error, reason, rest, context, line, byte_offset}
when line: {pos_integer, byte_offset},
byte_offset: pos_integer,
rest: binary,
reason: String.t(),
context: map()
def cron(binary, opts \\ []) when is_binary(binary) do
line = Keyword.get(opts, :line, 1)
offset = Keyword.get(opts, :byte_offset, 0)
context = Map.new(Keyword.get(opts, :context, []))
case(cron__0(binary, [], [], context, {line, offset}, offset)) do
{:ok, acc, rest, context, line, offset} ->
{:ok, :lists.reverse(acc), rest, context, line, offset}
{:error, _, _, _, _, _} = error ->
error
end
end
defp cron__0(rest, acc, stack, context, line, offset) do
cron__1(rest, [], [acc | stack], context, line, offset)
end
defp cron__1(rest, acc, stack, context, line, offset) do
cron__39(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__3(<<",", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__4(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__3(rest, _acc, _stack, context, line, offset) do
{:error,
"expected string \"*\" or ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9', followed by string \"-\", followed by ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9', followed by string \"/\", followed by ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9' or ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9', followed by string \"-\", followed by ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9' or ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9' or string \"*\" or string \",\"",
rest, context, line, offset}
end
defp cron__4(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__2(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__5(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__3(rest, [], stack, context, line, offset)
end
defp cron__6(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__7(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__6(rest, acc, stack, context, line, offset) do
cron__5(rest, acc, stack, context, line, offset)
end
defp cron__7(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__2(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__8(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__6(rest, [], stack, context, line, offset)
end
defp cron__9(rest, acc, stack, context, line, offset) do
cron__10(rest, [], [acc | stack], context, line, offset)
end
defp cron__10(rest, acc, stack, context, line, offset) do
cron__11(rest, [], [acc | stack], context, line, offset)
end
defp cron__11(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__12(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__11(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__8(rest, acc, stack, context, line, offset)
end
defp cron__12(rest, acc, stack, context, line, offset) do
cron__14(rest, acc, [1 | stack], context, line, offset)
end
defp cron__14(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__15(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__14(rest, acc, stack, context, line, offset) do
cron__13(rest, acc, stack, context, line, offset)
end
defp cron__13(rest, acc, [_ | stack], context, line, offset) do
cron__16(rest, acc, stack, context, line, offset)
end
defp cron__15(rest, acc, [1 | stack], context, line, offset) do
cron__16(rest, acc, stack, context, line, offset)
end
defp cron__15(rest, acc, [count | stack], context, line, offset) do
cron__14(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__16(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__17(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__17(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__18(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__18(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__2(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__19(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__9(rest, [], stack, context, line, offset)
end
defp cron__20(rest, acc, stack, context, line, offset) do
cron__21(rest, [], [acc | stack], context, line, offset)
end
defp cron__21(rest, acc, stack, context, line, offset) do
cron__22(rest, [], [acc | stack], context, line, offset)
end
defp cron__22(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__23(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__22(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__19(rest, acc, stack, context, line, offset)
end
defp cron__23(rest, acc, stack, context, line, offset) do
cron__25(rest, acc, [1 | stack], context, line, offset)
end
defp cron__25(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__26(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__25(rest, acc, stack, context, line, offset) do
cron__24(rest, acc, stack, context, line, offset)
end
defp cron__24(rest, acc, [_ | stack], context, line, offset) do
cron__27(rest, acc, stack, context, line, offset)
end
defp cron__26(rest, acc, [1 | stack], context, line, offset) do
cron__27(rest, acc, stack, context, line, offset)
end
defp cron__26(rest, acc, [count | stack], context, line, offset) do
cron__25(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__27(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__28(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__28(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__29(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__28(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__19(rest, acc, stack, context, line, offset)
end
defp cron__29(rest, acc, stack, context, line, offset) do
cron__30(rest, [], [acc | stack], context, line, offset)
end
defp cron__30(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__31(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__30(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__19(rest, acc, stack, context, line, offset)
end
defp cron__31(rest, acc, stack, context, line, offset) do
cron__33(rest, acc, [1 | stack], context, line, offset)
end
defp cron__33(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__34(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__33(rest, acc, stack, context, line, offset) do
cron__32(rest, acc, stack, context, line, offset)
end
defp cron__32(rest, acc, [_ | stack], context, line, offset) do
cron__35(rest, acc, stack, context, line, offset)
end
defp cron__34(rest, acc, [1 | stack], context, line, offset) do
cron__35(rest, acc, stack, context, line, offset)
end
defp cron__34(rest, acc, [count | stack], context, line, offset) do
cron__33(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__35(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__36(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__36(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__37(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__37(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__2(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__38(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__20(rest, [], stack, context, line, offset)
end
defp cron__39(rest, acc, stack, context, line, offset) do
cron__40(rest, [], [acc | stack], context, line, offset)
end
defp cron__40(rest, acc, stack, context, line, offset) do
cron__61(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__42(rest, acc, stack, context, line, offset) do
cron__43(rest, [], [acc | stack], context, line, offset)
end
defp cron__43(rest, acc, stack, context, line, offset) do
cron__44(rest, [], [acc | stack], context, line, offset)
end
defp cron__44(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__45(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__44(rest, _acc, stack, context, line, offset) do
[_, _, _, _, acc | stack] = stack
cron__38(rest, acc, stack, context, line, offset)
end
defp cron__45(rest, acc, stack, context, line, offset) do
cron__47(rest, acc, [1 | stack], context, line, offset)
end
defp cron__47(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__48(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__47(rest, acc, stack, context, line, offset) do
cron__46(rest, acc, stack, context, line, offset)
end
defp cron__46(rest, acc, [_ | stack], context, line, offset) do
cron__49(rest, acc, stack, context, line, offset)
end
defp cron__48(rest, acc, [1 | stack], context, line, offset) do
cron__49(rest, acc, stack, context, line, offset)
end
defp cron__48(rest, acc, [count | stack], context, line, offset) do
cron__47(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__49(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__50(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__50(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__51(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__50(rest, _acc, stack, context, line, offset) do
[_, _, _, acc | stack] = stack
cron__38(rest, acc, stack, context, line, offset)
end
defp cron__51(rest, acc, stack, context, line, offset) do
cron__52(rest, [], [acc | stack], context, line, offset)
end
defp cron__52(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__53(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__52(rest, _acc, stack, context, line, offset) do
[_, _, _, _, acc | stack] = stack
cron__38(rest, acc, stack, context, line, offset)
end
defp cron__53(rest, acc, stack, context, line, offset) do
cron__55(rest, acc, [1 | stack], context, line, offset)
end
defp cron__55(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__56(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__55(rest, acc, stack, context, line, offset) do
cron__54(rest, acc, stack, context, line, offset)
end
defp cron__54(rest, acc, [_ | stack], context, line, offset) do
cron__57(rest, acc, stack, context, line, offset)
end
defp cron__56(rest, acc, [1 | stack], context, line, offset) do
cron__57(rest, acc, stack, context, line, offset)
end
defp cron__56(rest, acc, [count | stack], context, line, offset) do
cron__55(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__57(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__58(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__58(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__59(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__59(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__41(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__60(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__42(rest, [], stack, context, line, offset)
end
defp cron__61(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__62(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__61(rest, acc, stack, context, line, offset) do
cron__60(rest, acc, stack, context, line, offset)
end
defp cron__62(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__41(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__41(<<"/", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__63(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__41(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__38(rest, acc, stack, context, line, offset)
end
defp cron__63(rest, acc, stack, context, line, offset) do
cron__64(rest, [], [acc | stack], context, line, offset)
end
defp cron__64(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__65(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__64(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__38(rest, acc, stack, context, line, offset)
end
defp cron__65(rest, acc, stack, context, line, offset) do
cron__67(rest, acc, [1 | stack], context, line, offset)
end
defp cron__67(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__68(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__67(rest, acc, stack, context, line, offset) do
cron__66(rest, acc, stack, context, line, offset)
end
defp cron__66(rest, acc, [_ | stack], context, line, offset) do
cron__69(rest, acc, stack, context, line, offset)
end
defp cron__68(rest, acc, [1 | stack], context, line, offset) do
cron__69(rest, acc, stack, context, line, offset)
end
defp cron__68(rest, acc, [count | stack], context, line, offset) do
cron__67(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__69(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__70(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__70(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__71(rest, [step: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__71(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__2(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__2(rest, acc, stack, context, line, offset) do
cron__73(rest, [], [{rest, acc, context, line, offset} | stack], context, line, offset)
end
defp cron__73(rest, acc, stack, context, line, offset) do
cron__111(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__75(<<",", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__76(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__75(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__72(rest, acc, stack, context, line, offset)
end
defp cron__76(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__74(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__77(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__75(rest, [], stack, context, line, offset)
end
defp cron__78(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__79(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__78(rest, acc, stack, context, line, offset) do
cron__77(rest, acc, stack, context, line, offset)
end
defp cron__79(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__74(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__80(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__78(rest, [], stack, context, line, offset)
end
defp cron__81(rest, acc, stack, context, line, offset) do
cron__82(rest, [], [acc | stack], context, line, offset)
end
defp cron__82(rest, acc, stack, context, line, offset) do
cron__83(rest, [], [acc | stack], context, line, offset)
end
defp cron__83(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__84(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__83(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__80(rest, acc, stack, context, line, offset)
end
defp cron__84(rest, acc, stack, context, line, offset) do
cron__86(rest, acc, [1 | stack], context, line, offset)
end
defp cron__86(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__87(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__86(rest, acc, stack, context, line, offset) do
cron__85(rest, acc, stack, context, line, offset)
end
defp cron__85(rest, acc, [_ | stack], context, line, offset) do
cron__88(rest, acc, stack, context, line, offset)
end
defp cron__87(rest, acc, [1 | stack], context, line, offset) do
cron__88(rest, acc, stack, context, line, offset)
end
defp cron__87(rest, acc, [count | stack], context, line, offset) do
cron__86(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__88(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__89(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__89(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__90(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__90(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__74(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__91(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__81(rest, [], stack, context, line, offset)
end
defp cron__92(rest, acc, stack, context, line, offset) do
cron__93(rest, [], [acc | stack], context, line, offset)
end
defp cron__93(rest, acc, stack, context, line, offset) do
cron__94(rest, [], [acc | stack], context, line, offset)
end
defp cron__94(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__95(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__94(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__91(rest, acc, stack, context, line, offset)
end
defp cron__95(rest, acc, stack, context, line, offset) do
cron__97(rest, acc, [1 | stack], context, line, offset)
end
defp cron__97(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__98(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__97(rest, acc, stack, context, line, offset) do
cron__96(rest, acc, stack, context, line, offset)
end
defp cron__96(rest, acc, [_ | stack], context, line, offset) do
cron__99(rest, acc, stack, context, line, offset)
end
defp cron__98(rest, acc, [1 | stack], context, line, offset) do
cron__99(rest, acc, stack, context, line, offset)
end
defp cron__98(rest, acc, [count | stack], context, line, offset) do
cron__97(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__99(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__100(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__100(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__101(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__100(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__91(rest, acc, stack, context, line, offset)
end
defp cron__101(rest, acc, stack, context, line, offset) do
cron__102(rest, [], [acc | stack], context, line, offset)
end
defp cron__102(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__103(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__102(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__91(rest, acc, stack, context, line, offset)
end
defp cron__103(rest, acc, stack, context, line, offset) do
cron__105(rest, acc, [1 | stack], context, line, offset)
end
defp cron__105(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__106(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__105(rest, acc, stack, context, line, offset) do
cron__104(rest, acc, stack, context, line, offset)
end
defp cron__104(rest, acc, [_ | stack], context, line, offset) do
cron__107(rest, acc, stack, context, line, offset)
end
defp cron__106(rest, acc, [1 | stack], context, line, offset) do
cron__107(rest, acc, stack, context, line, offset)
end
defp cron__106(rest, acc, [count | stack], context, line, offset) do
cron__105(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__107(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__108(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__108(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__109(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__109(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__74(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__110(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__92(rest, [], stack, context, line, offset)
end
defp cron__111(rest, acc, stack, context, line, offset) do
cron__112(rest, [], [acc | stack], context, line, offset)
end
defp cron__112(rest, acc, stack, context, line, offset) do
cron__133(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__114(rest, acc, stack, context, line, offset) do
cron__115(rest, [], [acc | stack], context, line, offset)
end
defp cron__115(rest, acc, stack, context, line, offset) do
cron__116(rest, [], [acc | stack], context, line, offset)
end
defp cron__116(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__117(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__116(rest, _acc, stack, context, line, offset) do
[_, _, _, _, acc | stack] = stack
cron__110(rest, acc, stack, context, line, offset)
end
defp cron__117(rest, acc, stack, context, line, offset) do
cron__119(rest, acc, [1 | stack], context, line, offset)
end
defp cron__119(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__120(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__119(rest, acc, stack, context, line, offset) do
cron__118(rest, acc, stack, context, line, offset)
end
defp cron__118(rest, acc, [_ | stack], context, line, offset) do
cron__121(rest, acc, stack, context, line, offset)
end
defp cron__120(rest, acc, [1 | stack], context, line, offset) do
cron__121(rest, acc, stack, context, line, offset)
end
defp cron__120(rest, acc, [count | stack], context, line, offset) do
cron__119(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__121(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__122(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__122(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__123(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__122(rest, _acc, stack, context, line, offset) do
[_, _, _, acc | stack] = stack
cron__110(rest, acc, stack, context, line, offset)
end
defp cron__123(rest, acc, stack, context, line, offset) do
cron__124(rest, [], [acc | stack], context, line, offset)
end
defp cron__124(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__125(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__124(rest, _acc, stack, context, line, offset) do
[_, _, _, _, acc | stack] = stack
cron__110(rest, acc, stack, context, line, offset)
end
defp cron__125(rest, acc, stack, context, line, offset) do
cron__127(rest, acc, [1 | stack], context, line, offset)
end
defp cron__127(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__128(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__127(rest, acc, stack, context, line, offset) do
cron__126(rest, acc, stack, context, line, offset)
end
defp cron__126(rest, acc, [_ | stack], context, line, offset) do
cron__129(rest, acc, stack, context, line, offset)
end
defp cron__128(rest, acc, [1 | stack], context, line, offset) do
cron__129(rest, acc, stack, context, line, offset)
end
defp cron__128(rest, acc, [count | stack], context, line, offset) do
cron__127(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__129(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__130(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__130(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__131(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__131(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__113(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__132(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__114(rest, [], stack, context, line, offset)
end
defp cron__133(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__134(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__133(rest, acc, stack, context, line, offset) do
cron__132(rest, acc, stack, context, line, offset)
end
defp cron__134(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__113(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__113(<<"/", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__135(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__113(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__110(rest, acc, stack, context, line, offset)
end
defp cron__135(rest, acc, stack, context, line, offset) do
cron__136(rest, [], [acc | stack], context, line, offset)
end
defp cron__136(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__137(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__136(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__110(rest, acc, stack, context, line, offset)
end
defp cron__137(rest, acc, stack, context, line, offset) do
cron__139(rest, acc, [1 | stack], context, line, offset)
end
defp cron__139(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__140(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__139(rest, acc, stack, context, line, offset) do
cron__138(rest, acc, stack, context, line, offset)
end
defp cron__138(rest, acc, [_ | stack], context, line, offset) do
cron__141(rest, acc, stack, context, line, offset)
end
defp cron__140(rest, acc, [1 | stack], context, line, offset) do
cron__141(rest, acc, stack, context, line, offset)
end
defp cron__140(rest, acc, [count | stack], context, line, offset) do
cron__139(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__141(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__142(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__142(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__143(rest, [step: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__143(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__74(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__72(_, _, [{rest, acc, context, line, offset} | stack], _, _, _) do
cron__144(rest, acc, stack, context, line, offset)
end
defp cron__74(
inner_rest,
inner_acc,
[{rest, acc, context, line, offset} | stack],
inner_context,
inner_line,
inner_offset
) do
_ = {rest, acc, context, line, offset}
cron__73(
inner_rest,
[],
[{inner_rest, inner_acc ++ acc, inner_context, inner_line, inner_offset} | stack],
inner_context,
inner_line,
inner_offset
)
end
defp cron__144(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__145(rest, [minutes: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__145(rest, acc, stack, context, line, offset) do
cron__146(rest, [], [acc | stack], context, line, offset)
end
defp cron__146(rest, acc, stack, context, line, offset) do
cron__147(rest, [], [acc | stack], context, line, offset)
end
defp cron__147(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 === 32 or x0 === 9 do
cron__148(rest, acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__147(rest, _acc, _stack, context, line, offset) do
{:error, "expected ASCII character equal to ' ' or equal to '\\t'", rest, context, line,
offset}
end
defp cron__148(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 === 32 or x0 === 9 do
cron__150(rest, acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__148(rest, acc, stack, context, line, offset) do
cron__149(rest, acc, stack, context, line, offset)
end
defp cron__150(rest, acc, stack, context, line, offset) do
cron__148(rest, acc, stack, context, line, offset)
end
defp cron__149(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__151(rest, acc, stack, context, line, offset)
end
defp cron__151(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__152(rest, [] ++ acc, stack, context, line, offset)
end
defp cron__152(rest, acc, stack, context, line, offset) do
cron__153(rest, [], [acc | stack], context, line, offset)
end
defp cron__153(rest, acc, stack, context, line, offset) do
cron__191(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__155(<<",", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__156(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__155(rest, _acc, _stack, context, line, offset) do
{:error,
"expected string \"*\" or ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9', followed by string \"-\", followed by ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9', followed by string \"/\", followed by ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9' or ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9', followed by string \"-\", followed by ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9' or ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9' or string \"*\" or string \",\"",
rest, context, line, offset}
end
defp cron__156(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__154(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__157(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__155(rest, [], stack, context, line, offset)
end
defp cron__158(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__159(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__158(rest, acc, stack, context, line, offset) do
cron__157(rest, acc, stack, context, line, offset)
end
defp cron__159(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__154(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__160(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__158(rest, [], stack, context, line, offset)
end
defp cron__161(rest, acc, stack, context, line, offset) do
cron__162(rest, [], [acc | stack], context, line, offset)
end
defp cron__162(rest, acc, stack, context, line, offset) do
cron__163(rest, [], [acc | stack], context, line, offset)
end
defp cron__163(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__164(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__163(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__160(rest, acc, stack, context, line, offset)
end
defp cron__164(rest, acc, stack, context, line, offset) do
cron__166(rest, acc, [1 | stack], context, line, offset)
end
defp cron__166(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__167(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__166(rest, acc, stack, context, line, offset) do
cron__165(rest, acc, stack, context, line, offset)
end
defp cron__165(rest, acc, [_ | stack], context, line, offset) do
cron__168(rest, acc, stack, context, line, offset)
end
defp cron__167(rest, acc, [1 | stack], context, line, offset) do
cron__168(rest, acc, stack, context, line, offset)
end
defp cron__167(rest, acc, [count | stack], context, line, offset) do
cron__166(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__168(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__169(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__169(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__170(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__170(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__154(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__171(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__161(rest, [], stack, context, line, offset)
end
defp cron__172(rest, acc, stack, context, line, offset) do
cron__173(rest, [], [acc | stack], context, line, offset)
end
defp cron__173(rest, acc, stack, context, line, offset) do
cron__174(rest, [], [acc | stack], context, line, offset)
end
defp cron__174(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__175(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__174(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__171(rest, acc, stack, context, line, offset)
end
defp cron__175(rest, acc, stack, context, line, offset) do
cron__177(rest, acc, [1 | stack], context, line, offset)
end
defp cron__177(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__178(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__177(rest, acc, stack, context, line, offset) do
cron__176(rest, acc, stack, context, line, offset)
end
defp cron__176(rest, acc, [_ | stack], context, line, offset) do
cron__179(rest, acc, stack, context, line, offset)
end
defp cron__178(rest, acc, [1 | stack], context, line, offset) do
cron__179(rest, acc, stack, context, line, offset)
end
defp cron__178(rest, acc, [count | stack], context, line, offset) do
cron__177(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__179(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__180(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__180(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__181(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__180(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__171(rest, acc, stack, context, line, offset)
end
defp cron__181(rest, acc, stack, context, line, offset) do
cron__182(rest, [], [acc | stack], context, line, offset)
end
defp cron__182(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__183(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__182(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__171(rest, acc, stack, context, line, offset)
end
defp cron__183(rest, acc, stack, context, line, offset) do
cron__185(rest, acc, [1 | stack], context, line, offset)
end
defp cron__185(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__186(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__185(rest, acc, stack, context, line, offset) do
cron__184(rest, acc, stack, context, line, offset)
end
defp cron__184(rest, acc, [_ | stack], context, line, offset) do
cron__187(rest, acc, stack, context, line, offset)
end
defp cron__186(rest, acc, [1 | stack], context, line, offset) do
cron__187(rest, acc, stack, context, line, offset)
end
defp cron__186(rest, acc, [count | stack], context, line, offset) do
cron__185(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__187(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__188(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__188(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__189(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__189(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__154(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__190(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__172(rest, [], stack, context, line, offset)
end
defp cron__191(rest, acc, stack, context, line, offset) do
cron__192(rest, [], [acc | stack], context, line, offset)
end
defp cron__192(rest, acc, stack, context, line, offset) do
cron__213(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__194(rest, acc, stack, context, line, offset) do
cron__195(rest, [], [acc | stack], context, line, offset)
end
defp cron__195(rest, acc, stack, context, line, offset) do
cron__196(rest, [], [acc | stack], context, line, offset)
end
defp cron__196(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__197(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__196(rest, _acc, stack, context, line, offset) do
[_, _, _, _, acc | stack] = stack
cron__190(rest, acc, stack, context, line, offset)
end
defp cron__197(rest, acc, stack, context, line, offset) do
cron__199(rest, acc, [1 | stack], context, line, offset)
end
defp cron__199(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__200(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__199(rest, acc, stack, context, line, offset) do
cron__198(rest, acc, stack, context, line, offset)
end
defp cron__198(rest, acc, [_ | stack], context, line, offset) do
cron__201(rest, acc, stack, context, line, offset)
end
defp cron__200(rest, acc, [1 | stack], context, line, offset) do
cron__201(rest, acc, stack, context, line, offset)
end
defp cron__200(rest, acc, [count | stack], context, line, offset) do
cron__199(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__201(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__202(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__202(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__203(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__202(rest, _acc, stack, context, line, offset) do
[_, _, _, acc | stack] = stack
cron__190(rest, acc, stack, context, line, offset)
end
defp cron__203(rest, acc, stack, context, line, offset) do
cron__204(rest, [], [acc | stack], context, line, offset)
end
defp cron__204(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__205(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__204(rest, _acc, stack, context, line, offset) do
[_, _, _, _, acc | stack] = stack
cron__190(rest, acc, stack, context, line, offset)
end
defp cron__205(rest, acc, stack, context, line, offset) do
cron__207(rest, acc, [1 | stack], context, line, offset)
end
defp cron__207(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__208(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__207(rest, acc, stack, context, line, offset) do
cron__206(rest, acc, stack, context, line, offset)
end
defp cron__206(rest, acc, [_ | stack], context, line, offset) do
cron__209(rest, acc, stack, context, line, offset)
end
defp cron__208(rest, acc, [1 | stack], context, line, offset) do
cron__209(rest, acc, stack, context, line, offset)
end
defp cron__208(rest, acc, [count | stack], context, line, offset) do
cron__207(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__209(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__210(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__210(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__211(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__211(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__193(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__212(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__194(rest, [], stack, context, line, offset)
end
defp cron__213(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__214(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__213(rest, acc, stack, context, line, offset) do
cron__212(rest, acc, stack, context, line, offset)
end
defp cron__214(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__193(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__193(<<"/", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__215(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__193(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__190(rest, acc, stack, context, line, offset)
end
defp cron__215(rest, acc, stack, context, line, offset) do
cron__216(rest, [], [acc | stack], context, line, offset)
end
defp cron__216(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__217(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__216(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__190(rest, acc, stack, context, line, offset)
end
defp cron__217(rest, acc, stack, context, line, offset) do
cron__219(rest, acc, [1 | stack], context, line, offset)
end
defp cron__219(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__220(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__219(rest, acc, stack, context, line, offset) do
cron__218(rest, acc, stack, context, line, offset)
end
defp cron__218(rest, acc, [_ | stack], context, line, offset) do
cron__221(rest, acc, stack, context, line, offset)
end
defp cron__220(rest, acc, [1 | stack], context, line, offset) do
cron__221(rest, acc, stack, context, line, offset)
end
defp cron__220(rest, acc, [count | stack], context, line, offset) do
cron__219(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__221(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__222(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__222(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__223(rest, [step: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__223(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__154(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__154(rest, acc, stack, context, line, offset) do
cron__225(rest, [], [{rest, acc, context, line, offset} | stack], context, line, offset)
end
defp cron__225(rest, acc, stack, context, line, offset) do
cron__263(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__227(<<",", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__228(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__227(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__224(rest, acc, stack, context, line, offset)
end
defp cron__228(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__226(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__229(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__227(rest, [], stack, context, line, offset)
end
defp cron__230(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__231(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__230(rest, acc, stack, context, line, offset) do
cron__229(rest, acc, stack, context, line, offset)
end
defp cron__231(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__226(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__232(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__230(rest, [], stack, context, line, offset)
end
defp cron__233(rest, acc, stack, context, line, offset) do
cron__234(rest, [], [acc | stack], context, line, offset)
end
defp cron__234(rest, acc, stack, context, line, offset) do
cron__235(rest, [], [acc | stack], context, line, offset)
end
defp cron__235(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__236(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__235(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__232(rest, acc, stack, context, line, offset)
end
defp cron__236(rest, acc, stack, context, line, offset) do
cron__238(rest, acc, [1 | stack], context, line, offset)
end
defp cron__238(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__239(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__238(rest, acc, stack, context, line, offset) do
cron__237(rest, acc, stack, context, line, offset)
end
defp cron__237(rest, acc, [_ | stack], context, line, offset) do
cron__240(rest, acc, stack, context, line, offset)
end
defp cron__239(rest, acc, [1 | stack], context, line, offset) do
cron__240(rest, acc, stack, context, line, offset)
end
defp cron__239(rest, acc, [count | stack], context, line, offset) do
cron__238(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__240(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__241(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__241(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__242(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__242(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__226(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__243(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__233(rest, [], stack, context, line, offset)
end
defp cron__244(rest, acc, stack, context, line, offset) do
cron__245(rest, [], [acc | stack], context, line, offset)
end
defp cron__245(rest, acc, stack, context, line, offset) do
cron__246(rest, [], [acc | stack], context, line, offset)
end
defp cron__246(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__247(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__246(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__243(rest, acc, stack, context, line, offset)
end
defp cron__247(rest, acc, stack, context, line, offset) do
cron__249(rest, acc, [1 | stack], context, line, offset)
end
defp cron__249(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__250(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__249(rest, acc, stack, context, line, offset) do
cron__248(rest, acc, stack, context, line, offset)
end
defp cron__248(rest, acc, [_ | stack], context, line, offset) do
cron__251(rest, acc, stack, context, line, offset)
end
defp cron__250(rest, acc, [1 | stack], context, line, offset) do
cron__251(rest, acc, stack, context, line, offset)
end
defp cron__250(rest, acc, [count | stack], context, line, offset) do
cron__249(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__251(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__252(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__252(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__253(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__252(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__243(rest, acc, stack, context, line, offset)
end
defp cron__253(rest, acc, stack, context, line, offset) do
cron__254(rest, [], [acc | stack], context, line, offset)
end
defp cron__254(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__255(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__254(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__243(rest, acc, stack, context, line, offset)
end
defp cron__255(rest, acc, stack, context, line, offset) do
cron__257(rest, acc, [1 | stack], context, line, offset)
end
defp cron__257(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__258(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__257(rest, acc, stack, context, line, offset) do
cron__256(rest, acc, stack, context, line, offset)
end
defp cron__256(rest, acc, [_ | stack], context, line, offset) do
cron__259(rest, acc, stack, context, line, offset)
end
defp cron__258(rest, acc, [1 | stack], context, line, offset) do
cron__259(rest, acc, stack, context, line, offset)
end
defp cron__258(rest, acc, [count | stack], context, line, offset) do
cron__257(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__259(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__260(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__260(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__261(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__261(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__226(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__262(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__244(rest, [], stack, context, line, offset)
end
defp cron__263(rest, acc, stack, context, line, offset) do
cron__264(rest, [], [acc | stack], context, line, offset)
end
defp cron__264(rest, acc, stack, context, line, offset) do
cron__285(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__266(rest, acc, stack, context, line, offset) do
cron__267(rest, [], [acc | stack], context, line, offset)
end
defp cron__267(rest, acc, stack, context, line, offset) do
cron__268(rest, [], [acc | stack], context, line, offset)
end
defp cron__268(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__269(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__268(rest, _acc, stack, context, line, offset) do
[_, _, _, _, acc | stack] = stack
cron__262(rest, acc, stack, context, line, offset)
end
defp cron__269(rest, acc, stack, context, line, offset) do
cron__271(rest, acc, [1 | stack], context, line, offset)
end
defp cron__271(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__272(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__271(rest, acc, stack, context, line, offset) do
cron__270(rest, acc, stack, context, line, offset)
end
defp cron__270(rest, acc, [_ | stack], context, line, offset) do
cron__273(rest, acc, stack, context, line, offset)
end
defp cron__272(rest, acc, [1 | stack], context, line, offset) do
cron__273(rest, acc, stack, context, line, offset)
end
defp cron__272(rest, acc, [count | stack], context, line, offset) do
cron__271(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__273(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__274(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__274(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__275(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__274(rest, _acc, stack, context, line, offset) do
[_, _, _, acc | stack] = stack
cron__262(rest, acc, stack, context, line, offset)
end
defp cron__275(rest, acc, stack, context, line, offset) do
cron__276(rest, [], [acc | stack], context, line, offset)
end
defp cron__276(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__277(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__276(rest, _acc, stack, context, line, offset) do
[_, _, _, _, acc | stack] = stack
cron__262(rest, acc, stack, context, line, offset)
end
defp cron__277(rest, acc, stack, context, line, offset) do
cron__279(rest, acc, [1 | stack], context, line, offset)
end
defp cron__279(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__280(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__279(rest, acc, stack, context, line, offset) do
cron__278(rest, acc, stack, context, line, offset)
end
defp cron__278(rest, acc, [_ | stack], context, line, offset) do
cron__281(rest, acc, stack, context, line, offset)
end
defp cron__280(rest, acc, [1 | stack], context, line, offset) do
cron__281(rest, acc, stack, context, line, offset)
end
defp cron__280(rest, acc, [count | stack], context, line, offset) do
cron__279(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__281(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__282(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__282(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__283(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__283(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__265(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__284(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__266(rest, [], stack, context, line, offset)
end
defp cron__285(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__286(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__285(rest, acc, stack, context, line, offset) do
cron__284(rest, acc, stack, context, line, offset)
end
defp cron__286(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__265(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__265(<<"/", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__287(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__265(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__262(rest, acc, stack, context, line, offset)
end
defp cron__287(rest, acc, stack, context, line, offset) do
cron__288(rest, [], [acc | stack], context, line, offset)
end
defp cron__288(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__289(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__288(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__262(rest, acc, stack, context, line, offset)
end
defp cron__289(rest, acc, stack, context, line, offset) do
cron__291(rest, acc, [1 | stack], context, line, offset)
end
defp cron__291(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__292(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__291(rest, acc, stack, context, line, offset) do
cron__290(rest, acc, stack, context, line, offset)
end
defp cron__290(rest, acc, [_ | stack], context, line, offset) do
cron__293(rest, acc, stack, context, line, offset)
end
defp cron__292(rest, acc, [1 | stack], context, line, offset) do
cron__293(rest, acc, stack, context, line, offset)
end
defp cron__292(rest, acc, [count | stack], context, line, offset) do
cron__291(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__293(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__294(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__294(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__295(rest, [step: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__295(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__226(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__224(_, _, [{rest, acc, context, line, offset} | stack], _, _, _) do
cron__296(rest, acc, stack, context, line, offset)
end
defp cron__226(
inner_rest,
inner_acc,
[{rest, acc, context, line, offset} | stack],
inner_context,
inner_line,
inner_offset
) do
_ = {rest, acc, context, line, offset}
cron__225(
inner_rest,
[],
[{inner_rest, inner_acc ++ acc, inner_context, inner_line, inner_offset} | stack],
inner_context,
inner_line,
inner_offset
)
end
defp cron__296(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__297(rest, [hours: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__297(rest, acc, stack, context, line, offset) do
cron__298(rest, [], [acc | stack], context, line, offset)
end
defp cron__298(rest, acc, stack, context, line, offset) do
cron__299(rest, [], [acc | stack], context, line, offset)
end
defp cron__299(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 === 32 or x0 === 9 do
cron__300(rest, acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__299(rest, _acc, _stack, context, line, offset) do
{:error, "expected ASCII character equal to ' ' or equal to '\\t'", rest, context, line,
offset}
end
defp cron__300(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 === 32 or x0 === 9 do
cron__302(rest, acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__300(rest, acc, stack, context, line, offset) do
cron__301(rest, acc, stack, context, line, offset)
end
defp cron__302(rest, acc, stack, context, line, offset) do
cron__300(rest, acc, stack, context, line, offset)
end
defp cron__301(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__303(rest, acc, stack, context, line, offset)
end
defp cron__303(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__304(rest, [] ++ acc, stack, context, line, offset)
end
defp cron__304(rest, acc, stack, context, line, offset) do
cron__305(rest, [], [acc | stack], context, line, offset)
end
defp cron__305(rest, acc, stack, context, line, offset) do
cron__343(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__307(<<",", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__308(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__307(rest, _acc, _stack, context, line, offset) do
{:error,
"expected string \"*\" or ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9', followed by string \"-\", followed by ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9', followed by string \"/\", followed by ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9' or ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9', followed by string \"-\", followed by ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9' or ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9' or string \"*\" or string \",\"",
rest, context, line, offset}
end
defp cron__308(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__306(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__309(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__307(rest, [], stack, context, line, offset)
end
defp cron__310(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__311(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__310(rest, acc, stack, context, line, offset) do
cron__309(rest, acc, stack, context, line, offset)
end
defp cron__311(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__306(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__312(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__310(rest, [], stack, context, line, offset)
end
defp cron__313(rest, acc, stack, context, line, offset) do
cron__314(rest, [], [acc | stack], context, line, offset)
end
defp cron__314(rest, acc, stack, context, line, offset) do
cron__315(rest, [], [acc | stack], context, line, offset)
end
defp cron__315(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__316(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__315(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__312(rest, acc, stack, context, line, offset)
end
defp cron__316(rest, acc, stack, context, line, offset) do
cron__318(rest, acc, [1 | stack], context, line, offset)
end
defp cron__318(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__319(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__318(rest, acc, stack, context, line, offset) do
cron__317(rest, acc, stack, context, line, offset)
end
defp cron__317(rest, acc, [_ | stack], context, line, offset) do
cron__320(rest, acc, stack, context, line, offset)
end
defp cron__319(rest, acc, [1 | stack], context, line, offset) do
cron__320(rest, acc, stack, context, line, offset)
end
defp cron__319(rest, acc, [count | stack], context, line, offset) do
cron__318(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__320(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__321(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__321(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__322(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__322(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__306(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__323(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__313(rest, [], stack, context, line, offset)
end
defp cron__324(rest, acc, stack, context, line, offset) do
cron__325(rest, [], [acc | stack], context, line, offset)
end
defp cron__325(rest, acc, stack, context, line, offset) do
cron__326(rest, [], [acc | stack], context, line, offset)
end
defp cron__326(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__327(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__326(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__323(rest, acc, stack, context, line, offset)
end
defp cron__327(rest, acc, stack, context, line, offset) do
cron__329(rest, acc, [1 | stack], context, line, offset)
end
defp cron__329(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__330(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__329(rest, acc, stack, context, line, offset) do
cron__328(rest, acc, stack, context, line, offset)
end
defp cron__328(rest, acc, [_ | stack], context, line, offset) do
cron__331(rest, acc, stack, context, line, offset)
end
defp cron__330(rest, acc, [1 | stack], context, line, offset) do
cron__331(rest, acc, stack, context, line, offset)
end
defp cron__330(rest, acc, [count | stack], context, line, offset) do
cron__329(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__331(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__332(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__332(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__333(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__332(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__323(rest, acc, stack, context, line, offset)
end
defp cron__333(rest, acc, stack, context, line, offset) do
cron__334(rest, [], [acc | stack], context, line, offset)
end
defp cron__334(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__335(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__334(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__323(rest, acc, stack, context, line, offset)
end
defp cron__335(rest, acc, stack, context, line, offset) do
cron__337(rest, acc, [1 | stack], context, line, offset)
end
defp cron__337(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__338(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__337(rest, acc, stack, context, line, offset) do
cron__336(rest, acc, stack, context, line, offset)
end
defp cron__336(rest, acc, [_ | stack], context, line, offset) do
cron__339(rest, acc, stack, context, line, offset)
end
defp cron__338(rest, acc, [1 | stack], context, line, offset) do
cron__339(rest, acc, stack, context, line, offset)
end
defp cron__338(rest, acc, [count | stack], context, line, offset) do
cron__337(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__339(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__340(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__340(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__341(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__341(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__306(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__342(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__324(rest, [], stack, context, line, offset)
end
defp cron__343(rest, acc, stack, context, line, offset) do
cron__344(rest, [], [acc | stack], context, line, offset)
end
defp cron__344(rest, acc, stack, context, line, offset) do
cron__365(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__346(rest, acc, stack, context, line, offset) do
cron__347(rest, [], [acc | stack], context, line, offset)
end
defp cron__347(rest, acc, stack, context, line, offset) do
cron__348(rest, [], [acc | stack], context, line, offset)
end
defp cron__348(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__349(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__348(rest, _acc, stack, context, line, offset) do
[_, _, _, _, acc | stack] = stack
cron__342(rest, acc, stack, context, line, offset)
end
defp cron__349(rest, acc, stack, context, line, offset) do
cron__351(rest, acc, [1 | stack], context, line, offset)
end
defp cron__351(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__352(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__351(rest, acc, stack, context, line, offset) do
cron__350(rest, acc, stack, context, line, offset)
end
defp cron__350(rest, acc, [_ | stack], context, line, offset) do
cron__353(rest, acc, stack, context, line, offset)
end
defp cron__352(rest, acc, [1 | stack], context, line, offset) do
cron__353(rest, acc, stack, context, line, offset)
end
defp cron__352(rest, acc, [count | stack], context, line, offset) do
cron__351(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__353(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__354(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__354(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__355(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__354(rest, _acc, stack, context, line, offset) do
[_, _, _, acc | stack] = stack
cron__342(rest, acc, stack, context, line, offset)
end
defp cron__355(rest, acc, stack, context, line, offset) do
cron__356(rest, [], [acc | stack], context, line, offset)
end
defp cron__356(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__357(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__356(rest, _acc, stack, context, line, offset) do
[_, _, _, _, acc | stack] = stack
cron__342(rest, acc, stack, context, line, offset)
end
defp cron__357(rest, acc, stack, context, line, offset) do
cron__359(rest, acc, [1 | stack], context, line, offset)
end
defp cron__359(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__360(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__359(rest, acc, stack, context, line, offset) do
cron__358(rest, acc, stack, context, line, offset)
end
defp cron__358(rest, acc, [_ | stack], context, line, offset) do
cron__361(rest, acc, stack, context, line, offset)
end
defp cron__360(rest, acc, [1 | stack], context, line, offset) do
cron__361(rest, acc, stack, context, line, offset)
end
defp cron__360(rest, acc, [count | stack], context, line, offset) do
cron__359(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__361(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__362(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__362(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__363(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__363(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__345(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__364(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__346(rest, [], stack, context, line, offset)
end
defp cron__365(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__366(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__365(rest, acc, stack, context, line, offset) do
cron__364(rest, acc, stack, context, line, offset)
end
defp cron__366(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__345(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__345(<<"/", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__367(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__345(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__342(rest, acc, stack, context, line, offset)
end
defp cron__367(rest, acc, stack, context, line, offset) do
cron__368(rest, [], [acc | stack], context, line, offset)
end
defp cron__368(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__369(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__368(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__342(rest, acc, stack, context, line, offset)
end
defp cron__369(rest, acc, stack, context, line, offset) do
cron__371(rest, acc, [1 | stack], context, line, offset)
end
defp cron__371(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__372(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__371(rest, acc, stack, context, line, offset) do
cron__370(rest, acc, stack, context, line, offset)
end
defp cron__370(rest, acc, [_ | stack], context, line, offset) do
cron__373(rest, acc, stack, context, line, offset)
end
defp cron__372(rest, acc, [1 | stack], context, line, offset) do
cron__373(rest, acc, stack, context, line, offset)
end
defp cron__372(rest, acc, [count | stack], context, line, offset) do
cron__371(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__373(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__374(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__374(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__375(rest, [step: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__375(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__306(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__306(rest, acc, stack, context, line, offset) do
cron__377(rest, [], [{rest, acc, context, line, offset} | stack], context, line, offset)
end
defp cron__377(rest, acc, stack, context, line, offset) do
cron__415(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__379(<<",", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__380(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__379(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__376(rest, acc, stack, context, line, offset)
end
defp cron__380(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__378(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__381(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__379(rest, [], stack, context, line, offset)
end
defp cron__382(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__383(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__382(rest, acc, stack, context, line, offset) do
cron__381(rest, acc, stack, context, line, offset)
end
defp cron__383(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__378(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__384(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__382(rest, [], stack, context, line, offset)
end
defp cron__385(rest, acc, stack, context, line, offset) do
cron__386(rest, [], [acc | stack], context, line, offset)
end
defp cron__386(rest, acc, stack, context, line, offset) do
cron__387(rest, [], [acc | stack], context, line, offset)
end
defp cron__387(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__388(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__387(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__384(rest, acc, stack, context, line, offset)
end
defp cron__388(rest, acc, stack, context, line, offset) do
cron__390(rest, acc, [1 | stack], context, line, offset)
end
defp cron__390(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__391(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__390(rest, acc, stack, context, line, offset) do
cron__389(rest, acc, stack, context, line, offset)
end
defp cron__389(rest, acc, [_ | stack], context, line, offset) do
cron__392(rest, acc, stack, context, line, offset)
end
defp cron__391(rest, acc, [1 | stack], context, line, offset) do
cron__392(rest, acc, stack, context, line, offset)
end
defp cron__391(rest, acc, [count | stack], context, line, offset) do
cron__390(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__392(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__393(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__393(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__394(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__394(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__378(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__395(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__385(rest, [], stack, context, line, offset)
end
defp cron__396(rest, acc, stack, context, line, offset) do
cron__397(rest, [], [acc | stack], context, line, offset)
end
defp cron__397(rest, acc, stack, context, line, offset) do
cron__398(rest, [], [acc | stack], context, line, offset)
end
defp cron__398(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__399(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__398(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__395(rest, acc, stack, context, line, offset)
end
defp cron__399(rest, acc, stack, context, line, offset) do
cron__401(rest, acc, [1 | stack], context, line, offset)
end
defp cron__401(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__402(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__401(rest, acc, stack, context, line, offset) do
cron__400(rest, acc, stack, context, line, offset)
end
defp cron__400(rest, acc, [_ | stack], context, line, offset) do
cron__403(rest, acc, stack, context, line, offset)
end
defp cron__402(rest, acc, [1 | stack], context, line, offset) do
cron__403(rest, acc, stack, context, line, offset)
end
defp cron__402(rest, acc, [count | stack], context, line, offset) do
cron__401(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__403(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__404(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__404(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__405(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__404(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__395(rest, acc, stack, context, line, offset)
end
defp cron__405(rest, acc, stack, context, line, offset) do
cron__406(rest, [], [acc | stack], context, line, offset)
end
defp cron__406(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__407(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__406(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__395(rest, acc, stack, context, line, offset)
end
defp cron__407(rest, acc, stack, context, line, offset) do
cron__409(rest, acc, [1 | stack], context, line, offset)
end
defp cron__409(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__410(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__409(rest, acc, stack, context, line, offset) do
cron__408(rest, acc, stack, context, line, offset)
end
defp cron__408(rest, acc, [_ | stack], context, line, offset) do
cron__411(rest, acc, stack, context, line, offset)
end
defp cron__410(rest, acc, [1 | stack], context, line, offset) do
cron__411(rest, acc, stack, context, line, offset)
end
defp cron__410(rest, acc, [count | stack], context, line, offset) do
cron__409(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__411(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__412(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__412(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__413(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__413(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__378(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__414(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__396(rest, [], stack, context, line, offset)
end
defp cron__415(rest, acc, stack, context, line, offset) do
cron__416(rest, [], [acc | stack], context, line, offset)
end
defp cron__416(rest, acc, stack, context, line, offset) do
cron__437(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__418(rest, acc, stack, context, line, offset) do
cron__419(rest, [], [acc | stack], context, line, offset)
end
defp cron__419(rest, acc, stack, context, line, offset) do
cron__420(rest, [], [acc | stack], context, line, offset)
end
defp cron__420(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__421(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__420(rest, _acc, stack, context, line, offset) do
[_, _, _, _, acc | stack] = stack
cron__414(rest, acc, stack, context, line, offset)
end
defp cron__421(rest, acc, stack, context, line, offset) do
cron__423(rest, acc, [1 | stack], context, line, offset)
end
defp cron__423(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__424(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__423(rest, acc, stack, context, line, offset) do
cron__422(rest, acc, stack, context, line, offset)
end
defp cron__422(rest, acc, [_ | stack], context, line, offset) do
cron__425(rest, acc, stack, context, line, offset)
end
defp cron__424(rest, acc, [1 | stack], context, line, offset) do
cron__425(rest, acc, stack, context, line, offset)
end
defp cron__424(rest, acc, [count | stack], context, line, offset) do
cron__423(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__425(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__426(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__426(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__427(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__426(rest, _acc, stack, context, line, offset) do
[_, _, _, acc | stack] = stack
cron__414(rest, acc, stack, context, line, offset)
end
defp cron__427(rest, acc, stack, context, line, offset) do
cron__428(rest, [], [acc | stack], context, line, offset)
end
defp cron__428(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__429(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__428(rest, _acc, stack, context, line, offset) do
[_, _, _, _, acc | stack] = stack
cron__414(rest, acc, stack, context, line, offset)
end
defp cron__429(rest, acc, stack, context, line, offset) do
cron__431(rest, acc, [1 | stack], context, line, offset)
end
defp cron__431(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__432(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__431(rest, acc, stack, context, line, offset) do
cron__430(rest, acc, stack, context, line, offset)
end
defp cron__430(rest, acc, [_ | stack], context, line, offset) do
cron__433(rest, acc, stack, context, line, offset)
end
defp cron__432(rest, acc, [1 | stack], context, line, offset) do
cron__433(rest, acc, stack, context, line, offset)
end
defp cron__432(rest, acc, [count | stack], context, line, offset) do
cron__431(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__433(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__434(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__434(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__435(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__435(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__417(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__436(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__418(rest, [], stack, context, line, offset)
end
defp cron__437(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__438(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__437(rest, acc, stack, context, line, offset) do
cron__436(rest, acc, stack, context, line, offset)
end
defp cron__438(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__417(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__417(<<"/", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__439(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__417(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__414(rest, acc, stack, context, line, offset)
end
defp cron__439(rest, acc, stack, context, line, offset) do
cron__440(rest, [], [acc | stack], context, line, offset)
end
defp cron__440(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__441(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__440(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__414(rest, acc, stack, context, line, offset)
end
defp cron__441(rest, acc, stack, context, line, offset) do
cron__443(rest, acc, [1 | stack], context, line, offset)
end
defp cron__443(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__444(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__443(rest, acc, stack, context, line, offset) do
cron__442(rest, acc, stack, context, line, offset)
end
defp cron__442(rest, acc, [_ | stack], context, line, offset) do
cron__445(rest, acc, stack, context, line, offset)
end
defp cron__444(rest, acc, [1 | stack], context, line, offset) do
cron__445(rest, acc, stack, context, line, offset)
end
defp cron__444(rest, acc, [count | stack], context, line, offset) do
cron__443(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__445(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__446(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__446(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__447(rest, [step: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__447(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__378(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__376(_, _, [{rest, acc, context, line, offset} | stack], _, _, _) do
cron__448(rest, acc, stack, context, line, offset)
end
defp cron__378(
inner_rest,
inner_acc,
[{rest, acc, context, line, offset} | stack],
inner_context,
inner_line,
inner_offset
) do
_ = {rest, acc, context, line, offset}
cron__377(
inner_rest,
[],
[{inner_rest, inner_acc ++ acc, inner_context, inner_line, inner_offset} | stack],
inner_context,
inner_line,
inner_offset
)
end
defp cron__448(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__449(rest, [days: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__449(rest, acc, stack, context, line, offset) do
cron__450(rest, [], [acc | stack], context, line, offset)
end
defp cron__450(rest, acc, stack, context, line, offset) do
cron__451(rest, [], [acc | stack], context, line, offset)
end
defp cron__451(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 === 32 or x0 === 9 do
cron__452(rest, acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__451(rest, _acc, _stack, context, line, offset) do
{:error, "expected ASCII character equal to ' ' or equal to '\\t'", rest, context, line,
offset}
end
defp cron__452(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 === 32 or x0 === 9 do
cron__454(rest, acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__452(rest, acc, stack, context, line, offset) do
cron__453(rest, acc, stack, context, line, offset)
end
defp cron__454(rest, acc, stack, context, line, offset) do
cron__452(rest, acc, stack, context, line, offset)
end
defp cron__453(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__455(rest, acc, stack, context, line, offset)
end
defp cron__455(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__456(rest, [] ++ acc, stack, context, line, offset)
end
defp cron__456(rest, acc, stack, context, line, offset) do
cron__457(rest, [], [acc | stack], context, line, offset)
end
defp cron__457(rest, acc, stack, context, line, offset) do
cron__531(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__459(rest, acc, stack, context, line, offset) do
cron__497(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__461(<<",", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__462(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__461(rest, _acc, _stack, context, line, offset) do
{:error,
"expected string \"JAN\" or string \"FEB\" or string \"MAR\" or string \"APR\" or string \"MAY\" or string \"JUN\" or string \"JUL\" or string \"AUG\" or string \"SEP\" or string \"OCT\" or string \"NOV\" or string \"DEC\" or string \"*\" or ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9', followed by string \"-\", followed by ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9', followed by string \"/\", followed by ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9' or ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9', followed by string \"-\", followed by ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9' or ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9' or string \"*\" or string \",\"",
rest, context, line, offset}
end
defp cron__462(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__460(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__463(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__461(rest, [], stack, context, line, offset)
end
defp cron__464(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__465(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__464(rest, acc, stack, context, line, offset) do
cron__463(rest, acc, stack, context, line, offset)
end
defp cron__465(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__460(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__466(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__464(rest, [], stack, context, line, offset)
end
defp cron__467(rest, acc, stack, context, line, offset) do
cron__468(rest, [], [acc | stack], context, line, offset)
end
defp cron__468(rest, acc, stack, context, line, offset) do
cron__469(rest, [], [acc | stack], context, line, offset)
end
defp cron__469(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__470(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__469(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__466(rest, acc, stack, context, line, offset)
end
defp cron__470(rest, acc, stack, context, line, offset) do
cron__472(rest, acc, [1 | stack], context, line, offset)
end
defp cron__472(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__473(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__472(rest, acc, stack, context, line, offset) do
cron__471(rest, acc, stack, context, line, offset)
end
defp cron__471(rest, acc, [_ | stack], context, line, offset) do
cron__474(rest, acc, stack, context, line, offset)
end
defp cron__473(rest, acc, [1 | stack], context, line, offset) do
cron__474(rest, acc, stack, context, line, offset)
end
defp cron__473(rest, acc, [count | stack], context, line, offset) do
cron__472(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__474(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__475(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__475(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__476(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__476(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__460(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__477(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__467(rest, [], stack, context, line, offset)
end
defp cron__478(rest, acc, stack, context, line, offset) do
cron__479(rest, [], [acc | stack], context, line, offset)
end
defp cron__479(rest, acc, stack, context, line, offset) do
cron__480(rest, [], [acc | stack], context, line, offset)
end
defp cron__480(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__481(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__480(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__477(rest, acc, stack, context, line, offset)
end
defp cron__481(rest, acc, stack, context, line, offset) do
cron__483(rest, acc, [1 | stack], context, line, offset)
end
defp cron__483(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__484(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__483(rest, acc, stack, context, line, offset) do
cron__482(rest, acc, stack, context, line, offset)
end
defp cron__482(rest, acc, [_ | stack], context, line, offset) do
cron__485(rest, acc, stack, context, line, offset)
end
defp cron__484(rest, acc, [1 | stack], context, line, offset) do
cron__485(rest, acc, stack, context, line, offset)
end
defp cron__484(rest, acc, [count | stack], context, line, offset) do
cron__483(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__485(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__486(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__486(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__487(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__486(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__477(rest, acc, stack, context, line, offset)
end
defp cron__487(rest, acc, stack, context, line, offset) do
cron__488(rest, [], [acc | stack], context, line, offset)
end
defp cron__488(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__489(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__488(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__477(rest, acc, stack, context, line, offset)
end
defp cron__489(rest, acc, stack, context, line, offset) do
cron__491(rest, acc, [1 | stack], context, line, offset)
end
defp cron__491(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__492(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__491(rest, acc, stack, context, line, offset) do
cron__490(rest, acc, stack, context, line, offset)
end
defp cron__490(rest, acc, [_ | stack], context, line, offset) do
cron__493(rest, acc, stack, context, line, offset)
end
defp cron__492(rest, acc, [1 | stack], context, line, offset) do
cron__493(rest, acc, stack, context, line, offset)
end
defp cron__492(rest, acc, [count | stack], context, line, offset) do
cron__491(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__493(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__494(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__494(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__495(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__495(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__460(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__496(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__478(rest, [], stack, context, line, offset)
end
defp cron__497(rest, acc, stack, context, line, offset) do
cron__498(rest, [], [acc | stack], context, line, offset)
end
defp cron__498(rest, acc, stack, context, line, offset) do
cron__519(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__500(rest, acc, stack, context, line, offset) do
cron__501(rest, [], [acc | stack], context, line, offset)
end
defp cron__501(rest, acc, stack, context, line, offset) do
cron__502(rest, [], [acc | stack], context, line, offset)
end
defp cron__502(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__503(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__502(rest, _acc, stack, context, line, offset) do
[_, _, _, _, acc | stack] = stack
cron__496(rest, acc, stack, context, line, offset)
end
defp cron__503(rest, acc, stack, context, line, offset) do
cron__505(rest, acc, [1 | stack], context, line, offset)
end
defp cron__505(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__506(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__505(rest, acc, stack, context, line, offset) do
cron__504(rest, acc, stack, context, line, offset)
end
defp cron__504(rest, acc, [_ | stack], context, line, offset) do
cron__507(rest, acc, stack, context, line, offset)
end
defp cron__506(rest, acc, [1 | stack], context, line, offset) do
cron__507(rest, acc, stack, context, line, offset)
end
defp cron__506(rest, acc, [count | stack], context, line, offset) do
cron__505(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__507(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__508(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__508(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__509(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__508(rest, _acc, stack, context, line, offset) do
[_, _, _, acc | stack] = stack
cron__496(rest, acc, stack, context, line, offset)
end
defp cron__509(rest, acc, stack, context, line, offset) do
cron__510(rest, [], [acc | stack], context, line, offset)
end
defp cron__510(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__511(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__510(rest, _acc, stack, context, line, offset) do
[_, _, _, _, acc | stack] = stack
cron__496(rest, acc, stack, context, line, offset)
end
defp cron__511(rest, acc, stack, context, line, offset) do
cron__513(rest, acc, [1 | stack], context, line, offset)
end
defp cron__513(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__514(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__513(rest, acc, stack, context, line, offset) do
cron__512(rest, acc, stack, context, line, offset)
end
defp cron__512(rest, acc, [_ | stack], context, line, offset) do
cron__515(rest, acc, stack, context, line, offset)
end
defp cron__514(rest, acc, [1 | stack], context, line, offset) do
cron__515(rest, acc, stack, context, line, offset)
end
defp cron__514(rest, acc, [count | stack], context, line, offset) do
cron__513(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__515(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__516(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__516(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__517(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__517(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__499(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__518(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__500(rest, [], stack, context, line, offset)
end
defp cron__519(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__520(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__519(rest, acc, stack, context, line, offset) do
cron__518(rest, acc, stack, context, line, offset)
end
defp cron__520(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__499(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__499(<<"/", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__521(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__499(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__496(rest, acc, stack, context, line, offset)
end
defp cron__521(rest, acc, stack, context, line, offset) do
cron__522(rest, [], [acc | stack], context, line, offset)
end
defp cron__522(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__523(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__522(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__496(rest, acc, stack, context, line, offset)
end
defp cron__523(rest, acc, stack, context, line, offset) do
cron__525(rest, acc, [1 | stack], context, line, offset)
end
defp cron__525(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__526(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__525(rest, acc, stack, context, line, offset) do
cron__524(rest, acc, stack, context, line, offset)
end
defp cron__524(rest, acc, [_ | stack], context, line, offset) do
cron__527(rest, acc, stack, context, line, offset)
end
defp cron__526(rest, acc, [1 | stack], context, line, offset) do
cron__527(rest, acc, stack, context, line, offset)
end
defp cron__526(rest, acc, [count | stack], context, line, offset) do
cron__525(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__527(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__528(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__528(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__529(rest, [step: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__529(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__460(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__460(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__458(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__530(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__459(rest, [], stack, context, line, offset)
end
defp cron__531(rest, acc, stack, context, line, offset) do
cron__532(rest, [], [acc | stack], context, line, offset)
end
defp cron__532(<<"JAN", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__533(rest, [1] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__532(<<"FEB", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__533(rest, [2] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__532(<<"MAR", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__533(rest, [3] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__532(<<"APR", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__533(rest, [4] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__532(<<"MAY", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__533(rest, [5] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__532(<<"JUN", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__533(rest, [6] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__532(<<"JUL", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__533(rest, [7] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__532(<<"AUG", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__533(rest, '\b' ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__532(<<"SEP", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__533(rest, '\t' ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__532(<<"OCT", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__533(rest, '\n' ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__532(<<"NOV", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__533(rest, '\v' ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__532(<<"DEC", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__533(rest, '\f' ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__532(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__530(rest, acc, stack, context, line, offset)
end
defp cron__533(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__534(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__534(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__458(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__458(rest, acc, stack, context, line, offset) do
cron__536(rest, [], [{rest, acc, context, line, offset} | stack], context, line, offset)
end
defp cron__536(rest, acc, stack, context, line, offset) do
cron__610(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__538(rest, acc, stack, context, line, offset) do
cron__576(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__540(<<",", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__541(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__540(rest, _acc, stack, context, line, offset) do
[_, _, _, acc | stack] = stack
cron__535(rest, acc, stack, context, line, offset)
end
defp cron__541(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__539(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__542(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__540(rest, [], stack, context, line, offset)
end
defp cron__543(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__544(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__543(rest, acc, stack, context, line, offset) do
cron__542(rest, acc, stack, context, line, offset)
end
defp cron__544(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__539(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__545(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__543(rest, [], stack, context, line, offset)
end
defp cron__546(rest, acc, stack, context, line, offset) do
cron__547(rest, [], [acc | stack], context, line, offset)
end
defp cron__547(rest, acc, stack, context, line, offset) do
cron__548(rest, [], [acc | stack], context, line, offset)
end
defp cron__548(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__549(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__548(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__545(rest, acc, stack, context, line, offset)
end
defp cron__549(rest, acc, stack, context, line, offset) do
cron__551(rest, acc, [1 | stack], context, line, offset)
end
defp cron__551(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__552(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__551(rest, acc, stack, context, line, offset) do
cron__550(rest, acc, stack, context, line, offset)
end
defp cron__550(rest, acc, [_ | stack], context, line, offset) do
cron__553(rest, acc, stack, context, line, offset)
end
defp cron__552(rest, acc, [1 | stack], context, line, offset) do
cron__553(rest, acc, stack, context, line, offset)
end
defp cron__552(rest, acc, [count | stack], context, line, offset) do
cron__551(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__553(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__554(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__554(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__555(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__555(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__539(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__556(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__546(rest, [], stack, context, line, offset)
end
defp cron__557(rest, acc, stack, context, line, offset) do
cron__558(rest, [], [acc | stack], context, line, offset)
end
defp cron__558(rest, acc, stack, context, line, offset) do
cron__559(rest, [], [acc | stack], context, line, offset)
end
defp cron__559(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__560(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__559(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__556(rest, acc, stack, context, line, offset)
end
defp cron__560(rest, acc, stack, context, line, offset) do
cron__562(rest, acc, [1 | stack], context, line, offset)
end
defp cron__562(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__563(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__562(rest, acc, stack, context, line, offset) do
cron__561(rest, acc, stack, context, line, offset)
end
defp cron__561(rest, acc, [_ | stack], context, line, offset) do
cron__564(rest, acc, stack, context, line, offset)
end
defp cron__563(rest, acc, [1 | stack], context, line, offset) do
cron__564(rest, acc, stack, context, line, offset)
end
defp cron__563(rest, acc, [count | stack], context, line, offset) do
cron__562(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__564(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__565(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__565(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__566(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__565(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__556(rest, acc, stack, context, line, offset)
end
defp cron__566(rest, acc, stack, context, line, offset) do
cron__567(rest, [], [acc | stack], context, line, offset)
end
defp cron__567(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__568(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__567(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__556(rest, acc, stack, context, line, offset)
end
defp cron__568(rest, acc, stack, context, line, offset) do
cron__570(rest, acc, [1 | stack], context, line, offset)
end
defp cron__570(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__571(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__570(rest, acc, stack, context, line, offset) do
cron__569(rest, acc, stack, context, line, offset)
end
defp cron__569(rest, acc, [_ | stack], context, line, offset) do
cron__572(rest, acc, stack, context, line, offset)
end
defp cron__571(rest, acc, [1 | stack], context, line, offset) do
cron__572(rest, acc, stack, context, line, offset)
end
defp cron__571(rest, acc, [count | stack], context, line, offset) do
cron__570(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__572(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__573(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__573(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__574(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__574(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__539(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__575(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__557(rest, [], stack, context, line, offset)
end
defp cron__576(rest, acc, stack, context, line, offset) do
cron__577(rest, [], [acc | stack], context, line, offset)
end
defp cron__577(rest, acc, stack, context, line, offset) do
cron__598(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__579(rest, acc, stack, context, line, offset) do
cron__580(rest, [], [acc | stack], context, line, offset)
end
defp cron__580(rest, acc, stack, context, line, offset) do
cron__581(rest, [], [acc | stack], context, line, offset)
end
defp cron__581(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__582(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__581(rest, _acc, stack, context, line, offset) do
[_, _, _, _, acc | stack] = stack
cron__575(rest, acc, stack, context, line, offset)
end
defp cron__582(rest, acc, stack, context, line, offset) do
cron__584(rest, acc, [1 | stack], context, line, offset)
end
defp cron__584(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__585(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__584(rest, acc, stack, context, line, offset) do
cron__583(rest, acc, stack, context, line, offset)
end
defp cron__583(rest, acc, [_ | stack], context, line, offset) do
cron__586(rest, acc, stack, context, line, offset)
end
defp cron__585(rest, acc, [1 | stack], context, line, offset) do
cron__586(rest, acc, stack, context, line, offset)
end
defp cron__585(rest, acc, [count | stack], context, line, offset) do
cron__584(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__586(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__587(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__587(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__588(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__587(rest, _acc, stack, context, line, offset) do
[_, _, _, acc | stack] = stack
cron__575(rest, acc, stack, context, line, offset)
end
defp cron__588(rest, acc, stack, context, line, offset) do
cron__589(rest, [], [acc | stack], context, line, offset)
end
defp cron__589(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__590(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__589(rest, _acc, stack, context, line, offset) do
[_, _, _, _, acc | stack] = stack
cron__575(rest, acc, stack, context, line, offset)
end
defp cron__590(rest, acc, stack, context, line, offset) do
cron__592(rest, acc, [1 | stack], context, line, offset)
end
defp cron__592(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__593(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__592(rest, acc, stack, context, line, offset) do
cron__591(rest, acc, stack, context, line, offset)
end
defp cron__591(rest, acc, [_ | stack], context, line, offset) do
cron__594(rest, acc, stack, context, line, offset)
end
defp cron__593(rest, acc, [1 | stack], context, line, offset) do
cron__594(rest, acc, stack, context, line, offset)
end
defp cron__593(rest, acc, [count | stack], context, line, offset) do
cron__592(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__594(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__595(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__595(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__596(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__596(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__578(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__597(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__579(rest, [], stack, context, line, offset)
end
defp cron__598(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__599(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__598(rest, acc, stack, context, line, offset) do
cron__597(rest, acc, stack, context, line, offset)
end
defp cron__599(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__578(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__578(<<"/", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__600(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__578(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__575(rest, acc, stack, context, line, offset)
end
defp cron__600(rest, acc, stack, context, line, offset) do
cron__601(rest, [], [acc | stack], context, line, offset)
end
defp cron__601(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__602(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__601(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__575(rest, acc, stack, context, line, offset)
end
defp cron__602(rest, acc, stack, context, line, offset) do
cron__604(rest, acc, [1 | stack], context, line, offset)
end
defp cron__604(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__605(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__604(rest, acc, stack, context, line, offset) do
cron__603(rest, acc, stack, context, line, offset)
end
defp cron__603(rest, acc, [_ | stack], context, line, offset) do
cron__606(rest, acc, stack, context, line, offset)
end
defp cron__605(rest, acc, [1 | stack], context, line, offset) do
cron__606(rest, acc, stack, context, line, offset)
end
defp cron__605(rest, acc, [count | stack], context, line, offset) do
cron__604(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__606(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__607(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__607(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__608(rest, [step: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__608(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__539(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__539(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__537(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__609(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__538(rest, [], stack, context, line, offset)
end
defp cron__610(rest, acc, stack, context, line, offset) do
cron__611(rest, [], [acc | stack], context, line, offset)
end
defp cron__611(<<"JAN", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__612(rest, [1] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__611(<<"FEB", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__612(rest, [2] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__611(<<"MAR", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__612(rest, [3] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__611(<<"APR", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__612(rest, [4] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__611(<<"MAY", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__612(rest, [5] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__611(<<"JUN", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__612(rest, [6] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__611(<<"JUL", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__612(rest, [7] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__611(<<"AUG", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__612(rest, '\b' ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__611(<<"SEP", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__612(rest, '\t' ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__611(<<"OCT", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__612(rest, '\n' ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__611(<<"NOV", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__612(rest, '\v' ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__611(<<"DEC", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__612(rest, '\f' ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__611(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__609(rest, acc, stack, context, line, offset)
end
defp cron__612(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__613(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__613(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__537(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__535(_, _, [{rest, acc, context, line, offset} | stack], _, _, _) do
cron__614(rest, acc, stack, context, line, offset)
end
defp cron__537(
inner_rest,
inner_acc,
[{rest, acc, context, line, offset} | stack],
inner_context,
inner_line,
inner_offset
) do
_ = {rest, acc, context, line, offset}
cron__536(
inner_rest,
[],
[{inner_rest, inner_acc ++ acc, inner_context, inner_line, inner_offset} | stack],
inner_context,
inner_line,
inner_offset
)
end
defp cron__614(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__615(rest, [months: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__615(rest, acc, stack, context, line, offset) do
cron__616(rest, [], [acc | stack], context, line, offset)
end
defp cron__616(rest, acc, stack, context, line, offset) do
cron__617(rest, [], [acc | stack], context, line, offset)
end
defp cron__617(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 === 32 or x0 === 9 do
cron__618(rest, acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__617(rest, _acc, _stack, context, line, offset) do
{:error, "expected ASCII character equal to ' ' or equal to '\\t'", rest, context, line,
offset}
end
defp cron__618(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 === 32 or x0 === 9 do
cron__620(rest, acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__618(rest, acc, stack, context, line, offset) do
cron__619(rest, acc, stack, context, line, offset)
end
defp cron__620(rest, acc, stack, context, line, offset) do
cron__618(rest, acc, stack, context, line, offset)
end
defp cron__619(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__621(rest, acc, stack, context, line, offset)
end
defp cron__621(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__622(rest, [] ++ acc, stack, context, line, offset)
end
defp cron__622(rest, acc, stack, context, line, offset) do
cron__623(rest, [], [acc | stack], context, line, offset)
end
defp cron__623(rest, acc, stack, context, line, offset) do
cron__697(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__625(rest, acc, stack, context, line, offset) do
cron__663(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__627(<<",", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__628(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__627(rest, _acc, _stack, context, line, offset) do
{:error,
"expected string \"MON\" or string \"TUE\" or string \"WED\" or string \"THU\" or string \"FRI\" or string \"SAT\" or string \"SUN\" or string \"*\" or ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9', followed by string \"-\", followed by ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9', followed by string \"/\", followed by ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9' or ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9', followed by string \"-\", followed by ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9' or ASCII character in the range '0' to '9', followed by ASCII character in the range '0' to '9' or string \"*\" or string \",\"",
rest, context, line, offset}
end
defp cron__628(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__626(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__629(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__627(rest, [], stack, context, line, offset)
end
defp cron__630(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__631(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__630(rest, acc, stack, context, line, offset) do
cron__629(rest, acc, stack, context, line, offset)
end
defp cron__631(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__626(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__632(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__630(rest, [], stack, context, line, offset)
end
defp cron__633(rest, acc, stack, context, line, offset) do
cron__634(rest, [], [acc | stack], context, line, offset)
end
defp cron__634(rest, acc, stack, context, line, offset) do
cron__635(rest, [], [acc | stack], context, line, offset)
end
defp cron__635(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__636(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__635(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__632(rest, acc, stack, context, line, offset)
end
defp cron__636(rest, acc, stack, context, line, offset) do
cron__638(rest, acc, [1 | stack], context, line, offset)
end
defp cron__638(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__639(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__638(rest, acc, stack, context, line, offset) do
cron__637(rest, acc, stack, context, line, offset)
end
defp cron__637(rest, acc, [_ | stack], context, line, offset) do
cron__640(rest, acc, stack, context, line, offset)
end
defp cron__639(rest, acc, [1 | stack], context, line, offset) do
cron__640(rest, acc, stack, context, line, offset)
end
defp cron__639(rest, acc, [count | stack], context, line, offset) do
cron__638(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__640(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__641(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__641(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__642(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__642(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__626(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__643(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__633(rest, [], stack, context, line, offset)
end
defp cron__644(rest, acc, stack, context, line, offset) do
cron__645(rest, [], [acc | stack], context, line, offset)
end
defp cron__645(rest, acc, stack, context, line, offset) do
cron__646(rest, [], [acc | stack], context, line, offset)
end
defp cron__646(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__647(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__646(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__643(rest, acc, stack, context, line, offset)
end
defp cron__647(rest, acc, stack, context, line, offset) do
cron__649(rest, acc, [1 | stack], context, line, offset)
end
defp cron__649(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__650(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__649(rest, acc, stack, context, line, offset) do
cron__648(rest, acc, stack, context, line, offset)
end
defp cron__648(rest, acc, [_ | stack], context, line, offset) do
cron__651(rest, acc, stack, context, line, offset)
end
defp cron__650(rest, acc, [1 | stack], context, line, offset) do
cron__651(rest, acc, stack, context, line, offset)
end
defp cron__650(rest, acc, [count | stack], context, line, offset) do
cron__649(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__651(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__652(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__652(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__653(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__652(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__643(rest, acc, stack, context, line, offset)
end
defp cron__653(rest, acc, stack, context, line, offset) do
cron__654(rest, [], [acc | stack], context, line, offset)
end
defp cron__654(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__655(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__654(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__643(rest, acc, stack, context, line, offset)
end
defp cron__655(rest, acc, stack, context, line, offset) do
cron__657(rest, acc, [1 | stack], context, line, offset)
end
defp cron__657(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__658(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__657(rest, acc, stack, context, line, offset) do
cron__656(rest, acc, stack, context, line, offset)
end
defp cron__656(rest, acc, [_ | stack], context, line, offset) do
cron__659(rest, acc, stack, context, line, offset)
end
defp cron__658(rest, acc, [1 | stack], context, line, offset) do
cron__659(rest, acc, stack, context, line, offset)
end
defp cron__658(rest, acc, [count | stack], context, line, offset) do
cron__657(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__659(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__660(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__660(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__661(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__661(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__626(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__662(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__644(rest, [], stack, context, line, offset)
end
defp cron__663(rest, acc, stack, context, line, offset) do
cron__664(rest, [], [acc | stack], context, line, offset)
end
defp cron__664(rest, acc, stack, context, line, offset) do
cron__685(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__666(rest, acc, stack, context, line, offset) do
cron__667(rest, [], [acc | stack], context, line, offset)
end
defp cron__667(rest, acc, stack, context, line, offset) do
cron__668(rest, [], [acc | stack], context, line, offset)
end
defp cron__668(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__669(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__668(rest, _acc, stack, context, line, offset) do
[_, _, _, _, acc | stack] = stack
cron__662(rest, acc, stack, context, line, offset)
end
defp cron__669(rest, acc, stack, context, line, offset) do
cron__671(rest, acc, [1 | stack], context, line, offset)
end
defp cron__671(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__672(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__671(rest, acc, stack, context, line, offset) do
cron__670(rest, acc, stack, context, line, offset)
end
defp cron__670(rest, acc, [_ | stack], context, line, offset) do
cron__673(rest, acc, stack, context, line, offset)
end
defp cron__672(rest, acc, [1 | stack], context, line, offset) do
cron__673(rest, acc, stack, context, line, offset)
end
defp cron__672(rest, acc, [count | stack], context, line, offset) do
cron__671(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__673(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__674(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__674(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__675(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__674(rest, _acc, stack, context, line, offset) do
[_, _, _, acc | stack] = stack
cron__662(rest, acc, stack, context, line, offset)
end
defp cron__675(rest, acc, stack, context, line, offset) do
cron__676(rest, [], [acc | stack], context, line, offset)
end
defp cron__676(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__677(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__676(rest, _acc, stack, context, line, offset) do
[_, _, _, _, acc | stack] = stack
cron__662(rest, acc, stack, context, line, offset)
end
defp cron__677(rest, acc, stack, context, line, offset) do
cron__679(rest, acc, [1 | stack], context, line, offset)
end
defp cron__679(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__680(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__679(rest, acc, stack, context, line, offset) do
cron__678(rest, acc, stack, context, line, offset)
end
defp cron__678(rest, acc, [_ | stack], context, line, offset) do
cron__681(rest, acc, stack, context, line, offset)
end
defp cron__680(rest, acc, [1 | stack], context, line, offset) do
cron__681(rest, acc, stack, context, line, offset)
end
defp cron__680(rest, acc, [count | stack], context, line, offset) do
cron__679(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__681(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__682(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__682(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__683(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__683(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__665(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__684(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__666(rest, [], stack, context, line, offset)
end
defp cron__685(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__686(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__685(rest, acc, stack, context, line, offset) do
cron__684(rest, acc, stack, context, line, offset)
end
defp cron__686(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__665(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__665(<<"/", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__687(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__665(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__662(rest, acc, stack, context, line, offset)
end
defp cron__687(rest, acc, stack, context, line, offset) do
cron__688(rest, [], [acc | stack], context, line, offset)
end
defp cron__688(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__689(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__688(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__662(rest, acc, stack, context, line, offset)
end
defp cron__689(rest, acc, stack, context, line, offset) do
cron__691(rest, acc, [1 | stack], context, line, offset)
end
defp cron__691(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__692(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__691(rest, acc, stack, context, line, offset) do
cron__690(rest, acc, stack, context, line, offset)
end
defp cron__690(rest, acc, [_ | stack], context, line, offset) do
cron__693(rest, acc, stack, context, line, offset)
end
defp cron__692(rest, acc, [1 | stack], context, line, offset) do
cron__693(rest, acc, stack, context, line, offset)
end
defp cron__692(rest, acc, [count | stack], context, line, offset) do
cron__691(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__693(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__694(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__694(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__695(rest, [step: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__695(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__626(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__626(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__624(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__696(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__625(rest, [], stack, context, line, offset)
end
defp cron__697(rest, acc, stack, context, line, offset) do
cron__698(rest, [], [acc | stack], context, line, offset)
end
defp cron__698(<<"MON", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__699(rest, [1] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__698(<<"TUE", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__699(rest, [2] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__698(<<"WED", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__699(rest, [3] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__698(<<"THU", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__699(rest, [4] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__698(<<"FRI", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__699(rest, [5] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__698(<<"SAT", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__699(rest, [6] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__698(<<"SUN", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__699(rest, [0] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__698(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__696(rest, acc, stack, context, line, offset)
end
defp cron__699(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__700(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__700(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__624(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__624(rest, acc, stack, context, line, offset) do
cron__702(rest, [], [{rest, acc, context, line, offset} | stack], context, line, offset)
end
defp cron__702(rest, acc, stack, context, line, offset) do
cron__776(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__704(rest, acc, stack, context, line, offset) do
cron__742(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__706(<<",", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__707(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__706(rest, _acc, stack, context, line, offset) do
[_, _, _, acc | stack] = stack
cron__701(rest, acc, stack, context, line, offset)
end
defp cron__707(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__705(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__708(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__706(rest, [], stack, context, line, offset)
end
defp cron__709(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__710(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__709(rest, acc, stack, context, line, offset) do
cron__708(rest, acc, stack, context, line, offset)
end
defp cron__710(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__705(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__711(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__709(rest, [], stack, context, line, offset)
end
defp cron__712(rest, acc, stack, context, line, offset) do
cron__713(rest, [], [acc | stack], context, line, offset)
end
defp cron__713(rest, acc, stack, context, line, offset) do
cron__714(rest, [], [acc | stack], context, line, offset)
end
defp cron__714(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__715(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__714(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__711(rest, acc, stack, context, line, offset)
end
defp cron__715(rest, acc, stack, context, line, offset) do
cron__717(rest, acc, [1 | stack], context, line, offset)
end
defp cron__717(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__718(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__717(rest, acc, stack, context, line, offset) do
cron__716(rest, acc, stack, context, line, offset)
end
defp cron__716(rest, acc, [_ | stack], context, line, offset) do
cron__719(rest, acc, stack, context, line, offset)
end
defp cron__718(rest, acc, [1 | stack], context, line, offset) do
cron__719(rest, acc, stack, context, line, offset)
end
defp cron__718(rest, acc, [count | stack], context, line, offset) do
cron__717(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__719(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__720(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__720(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__721(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__721(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__705(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__722(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__712(rest, [], stack, context, line, offset)
end
defp cron__723(rest, acc, stack, context, line, offset) do
cron__724(rest, [], [acc | stack], context, line, offset)
end
defp cron__724(rest, acc, stack, context, line, offset) do
cron__725(rest, [], [acc | stack], context, line, offset)
end
defp cron__725(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__726(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__725(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__722(rest, acc, stack, context, line, offset)
end
defp cron__726(rest, acc, stack, context, line, offset) do
cron__728(rest, acc, [1 | stack], context, line, offset)
end
defp cron__728(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__729(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__728(rest, acc, stack, context, line, offset) do
cron__727(rest, acc, stack, context, line, offset)
end
defp cron__727(rest, acc, [_ | stack], context, line, offset) do
cron__730(rest, acc, stack, context, line, offset)
end
defp cron__729(rest, acc, [1 | stack], context, line, offset) do
cron__730(rest, acc, stack, context, line, offset)
end
defp cron__729(rest, acc, [count | stack], context, line, offset) do
cron__728(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__730(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__731(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__731(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__732(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__731(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__722(rest, acc, stack, context, line, offset)
end
defp cron__732(rest, acc, stack, context, line, offset) do
cron__733(rest, [], [acc | stack], context, line, offset)
end
defp cron__733(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__734(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__733(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__722(rest, acc, stack, context, line, offset)
end
defp cron__734(rest, acc, stack, context, line, offset) do
cron__736(rest, acc, [1 | stack], context, line, offset)
end
defp cron__736(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__737(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__736(rest, acc, stack, context, line, offset) do
cron__735(rest, acc, stack, context, line, offset)
end
defp cron__735(rest, acc, [_ | stack], context, line, offset) do
cron__738(rest, acc, stack, context, line, offset)
end
defp cron__737(rest, acc, [1 | stack], context, line, offset) do
cron__738(rest, acc, stack, context, line, offset)
end
defp cron__737(rest, acc, [count | stack], context, line, offset) do
cron__736(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__738(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__739(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__739(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__740(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__740(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__705(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__741(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__723(rest, [], stack, context, line, offset)
end
defp cron__742(rest, acc, stack, context, line, offset) do
cron__743(rest, [], [acc | stack], context, line, offset)
end
defp cron__743(rest, acc, stack, context, line, offset) do
cron__764(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__745(rest, acc, stack, context, line, offset) do
cron__746(rest, [], [acc | stack], context, line, offset)
end
defp cron__746(rest, acc, stack, context, line, offset) do
cron__747(rest, [], [acc | stack], context, line, offset)
end
defp cron__747(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__748(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__747(rest, _acc, stack, context, line, offset) do
[_, _, _, _, acc | stack] = stack
cron__741(rest, acc, stack, context, line, offset)
end
defp cron__748(rest, acc, stack, context, line, offset) do
cron__750(rest, acc, [1 | stack], context, line, offset)
end
defp cron__750(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__751(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__750(rest, acc, stack, context, line, offset) do
cron__749(rest, acc, stack, context, line, offset)
end
defp cron__749(rest, acc, [_ | stack], context, line, offset) do
cron__752(rest, acc, stack, context, line, offset)
end
defp cron__751(rest, acc, [1 | stack], context, line, offset) do
cron__752(rest, acc, stack, context, line, offset)
end
defp cron__751(rest, acc, [count | stack], context, line, offset) do
cron__750(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__752(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__753(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__753(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__754(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__753(rest, _acc, stack, context, line, offset) do
[_, _, _, acc | stack] = stack
cron__741(rest, acc, stack, context, line, offset)
end
defp cron__754(rest, acc, stack, context, line, offset) do
cron__755(rest, [], [acc | stack], context, line, offset)
end
defp cron__755(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__756(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__755(rest, _acc, stack, context, line, offset) do
[_, _, _, _, acc | stack] = stack
cron__741(rest, acc, stack, context, line, offset)
end
defp cron__756(rest, acc, stack, context, line, offset) do
cron__758(rest, acc, [1 | stack], context, line, offset)
end
defp cron__758(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__759(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__758(rest, acc, stack, context, line, offset) do
cron__757(rest, acc, stack, context, line, offset)
end
defp cron__757(rest, acc, [_ | stack], context, line, offset) do
cron__760(rest, acc, stack, context, line, offset)
end
defp cron__759(rest, acc, [1 | stack], context, line, offset) do
cron__760(rest, acc, stack, context, line, offset)
end
defp cron__759(rest, acc, [count | stack], context, line, offset) do
cron__758(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__760(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__761(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__761(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__762(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__762(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__744(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__763(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__745(rest, [], stack, context, line, offset)
end
defp cron__764(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__765(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__764(rest, acc, stack, context, line, offset) do
cron__763(rest, acc, stack, context, line, offset)
end
defp cron__765(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__744(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__744(<<"/", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__766(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__744(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__741(rest, acc, stack, context, line, offset)
end
defp cron__766(rest, acc, stack, context, line, offset) do
cron__767(rest, [], [acc | stack], context, line, offset)
end
defp cron__767(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__768(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__767(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__741(rest, acc, stack, context, line, offset)
end
defp cron__768(rest, acc, stack, context, line, offset) do
cron__770(rest, acc, [1 | stack], context, line, offset)
end
defp cron__770(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__771(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__770(rest, acc, stack, context, line, offset) do
cron__769(rest, acc, stack, context, line, offset)
end
defp cron__769(rest, acc, [_ | stack], context, line, offset) do
cron__772(rest, acc, stack, context, line, offset)
end
defp cron__771(rest, acc, [1 | stack], context, line, offset) do
cron__772(rest, acc, stack, context, line, offset)
end
defp cron__771(rest, acc, [count | stack], context, line, offset) do
cron__770(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__772(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__773(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__773(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__774(rest, [step: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__774(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__705(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__705(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__703(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__775(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__704(rest, [], stack, context, line, offset)
end
defp cron__776(rest, acc, stack, context, line, offset) do
cron__777(rest, [], [acc | stack], context, line, offset)
end
defp cron__777(<<"MON", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__778(rest, [1] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__777(<<"TUE", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__778(rest, [2] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__777(<<"WED", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__778(rest, [3] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__777(<<"THU", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__778(rest, [4] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__777(<<"FRI", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__778(rest, [5] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__777(<<"SAT", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__778(rest, [6] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__777(<<"SUN", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__778(rest, [0] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__777(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__775(rest, acc, stack, context, line, offset)
end
defp cron__778(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__779(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__779(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__703(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__701(_, _, [{rest, acc, context, line, offset} | stack], _, _, _) do
cron__780(rest, acc, stack, context, line, offset)
end
defp cron__703(
inner_rest,
inner_acc,
[{rest, acc, context, line, offset} | stack],
inner_context,
inner_line,
inner_offset
) do
_ = {rest, acc, context, line, offset}
cron__702(
inner_rest,
[],
[{inner_rest, inner_acc ++ acc, inner_context, inner_line, inner_offset} | stack],
inner_context,
inner_line,
inner_offset
)
end
defp cron__780(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
cron__781(rest, [weekdays: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__781(rest, acc, _stack, context, line, offset) do
{:ok, acc, rest, context, line, offset}
end
end
|
lib/oban/crontab/parser.ex
| 0.878822 | 0.472318 |
parser.ex
|
starcoder
|
defmodule Modbus.Model do
@moduledoc false
def apply(state, {:rc, slave, address, count}) when is_integer(address) and is_integer(count) do
reads(state, {slave, :c, address, count})
end
def apply(state, {:ri, slave, address, count}) when is_integer(address) and is_integer(count) do
reads(state, {slave, :i, address, count})
end
def apply(state, {:rhr, slave, address, count})
when is_integer(address) and is_integer(count) do
reads(state, {slave, :hr, address, count})
end
def apply(state, {:rir, slave, address, count})
when is_integer(address) and is_integer(count) do
reads(state, {slave, :ir, address, count})
end
def apply(state, {:fc, slave, address, value})
when is_integer(address) and not is_list(value) do
write(state, {slave, :c, address, value})
end
def apply(state, {:fc, slave, address, values}) when is_integer(address) and is_list(values) do
writes(state, {slave, :c, address, values})
end
def apply(state, {:phr, slave, address, value})
when is_integer(address) and not is_list(value) do
write(state, {slave, :hr, address, value})
end
def apply(state, {:phr, slave, address, values}) when is_integer(address) and is_list(values) do
writes(state, {slave, :hr, address, values})
end
defp reads(state, {slave, type, address, count}) do
case check_request(state, {slave, type, address, count}) do
true ->
map = Map.fetch!(state, slave)
addr_end = address + count - 1
list =
for point <- address..addr_end do
Map.fetch!(map, {type, point})
end
{:ok, state, list}
false ->
{:error, state}
end
end
defp write(state, {slave, type, address, value}) do
case check_request(state, {slave, type, address, 1}) do
true ->
cmap = Map.fetch!(state, slave)
nmap = Map.put(cmap, {type, address}, value)
{:ok, Map.put(state, slave, nmap)}
false ->
{:error, state}
end
end
defp writes(state, {slave, type, address, values}) do
count = length(values)
case check_request(state, {slave, type, address, count}) do
true ->
cmap = Map.fetch!(state, slave)
addr_end = address + count
{^addr_end, nmap} =
Enum.reduce(values, {address, cmap}, fn value, {i, map} ->
{i + 1, Map.put(map, {type, i}, value)}
end)
{:ok, Map.put(state, slave, nmap)}
false ->
{:error, state}
end
end
def check_request(state, {slave, type, addr, count}) do
map = Map.get(state, slave)
case map do
nil ->
false
_ ->
addr_end = addr + count - 1
Enum.all?(addr..addr_end, fn addr ->
Map.has_key?(map, {type, addr})
end)
end
end
end
|
lib/model.ex
| 0.688678 | 0.557604 |
model.ex
|
starcoder
|
defmodule Firenest.PubSub do
@moduledoc """
A distributed pubsub implementation.
The PubSub implementation runs on top of a `Firenest.Topology`
and uses Elixir's `Registry` to provide a scalable dispatch
implementation.
## Example
PubSub is typically set up as part of your supervision tree
alongside the desired topology:
children = [
{Firenest.Topology, name: MyApp.Topology, adapter: Firenest.Topology.Erlang},
{Firenest.PubSub, name: MyApp.PubSub, topology: MyApp.Topology}
]
Once the topology and pubsub processes are started, processes
may subscribe, unsubscribe and broadcast messages:
# Subscribe the current process to a given topic
Firenest.PubSub.subscribe(MyApp.PubSub, "lobby:messages")
# Broadcasts a message
Firenest.PubSub.broadcast(MyApp.PubSub, "lobby:messages", "hello world")
PubSub will always broadcast to all nodes in the topology,
even if they are not running the PubSub service. In case you
want to broadcast to a subset of your topology, consider creating
multiple topologies.
## Custom dispatching
Firenest.PubSub allows developers to perform custom dispatching
by passing a `dispatcher` module to the broadcast functions.
The dispatcher must be available on all nodes running the PubSub
system. The `dispatch/3` function of the given module will be
invoked with the subscriptions entries, the broadcaster identifier
and the message to broadcast and it is responsible for local message
deliveries.
You may want to use the dispatcher to perform special delivery for
certain subscriptions. This can be done by passing a `value` during
subscriptions. For instance, Phoenix Channels use a custom `value`
to provide "fastlaning", allowing messages broadcast to thousands
or even millions of users to be encoded once and written directly
to sockets instead of being encoded per channel.
"""
@typedoc "An atom identifying the pubsub system."
@type t :: atom()
@type topic :: term()
@type from :: pid()
@type dispatcher :: module
defmodule BroadcastError do
defexception [:message]
end
@doc """
Returns a child specifiction for pubsub with the given `options`.
The `:name` and `:topology` keys are required as part of `options`.
`:name` refers to the name of the pubsub to be started and `:topology`
must point to a topology started by `Firenest.Topology`.
The remaining options are described below.
## Options
* `:name` - the name of the pubsub to be started
* `:topology` - the name of a `Firenest.Topology` that powers
the distribution mechanism
* `:partitions` - the number of partitions under the pubsub system.
Partitioning provides vertical scalability on machines with multiple
cores, allowing subscriptions and broadcasts to happen concurrently.
By default uses one partition for every 4 cores.
"""
@spec child_spec(options) :: Supervisor.child_spec()
when options: [
name: t,
topology: Firenest.Topology.t(),
partitions: pos_integer()
]
defdelegate child_spec(options), to: Firenest.PubSub.Supervisor
@doc """
Returns all topics the `pid` is subscribed to in `pubsub`.
"""
@spec topics(t, pid) :: [topic]
def topics(pubsub, pid) do
Registry.keys(pubsub, pid)
end
@doc """
Subscribes the current process to `topic` in `pubsub`.
A process may subscribe to the same topic more than once.
In such cases, messages will be delivered twice.
The `value` argument is used for those implementing custom
dispatching as explained in the "Custom Disapatching" section
in the module docs. Unless you are implementing custom
dispatching rules, you can safely ignore the `value` argument.
"""
@spec subscribe(t, topic, term) :: :ok
def subscribe(pubsub, topic, value \\ nil) when is_atom(pubsub) do
{:ok, _} = Registry.register(pubsub, topic, value)
:ok
end
@doc """
Unsubscribe the current process from `topic` in `pubsub`.
In case the current process is subscribed to the topic multiple times,
this call will unsubscribe all entries at once.
"""
@spec unsubscribe(t, topic) :: :ok
def unsubscribe(pubsub, topic) when is_atom(pubsub) do
Registry.unregister(pubsub, topic)
end
@doc """
Broadcasts the given `message` on `topic` in `pubsub`.
Returns `:ok` or `{:error, reason}` in case of failures in
the distributed brodcast.
"""
@spec broadcast(t, topic | [topic], term, dispatcher) :: :ok | {:error, term}
def broadcast(pubsub, topic, message, dispatcher \\ __MODULE__)
when is_atom(pubsub) and is_atom(dispatcher) do
topics = List.wrap(topic)
{:ok, {topology, remote}} = Registry.meta(pubsub, :pubsub)
broadcast = {:broadcast, topics, message, dispatcher}
with :ok <- Firenest.Topology.broadcast(topology, remote, broadcast) do
dispatch(pubsub, :none, topics, message, dispatcher)
end
end
@doc """
Broadcasts the given `message` on `topic` in `pubsub`.
Returns `:ok` or raises `Firenest.PubSub.BroadcastError` in case of
failures in the distributed brodcast.
"""
@spec broadcast!(t, topic | [topic], term, dispatcher) :: :ok | no_return
def broadcast!(pubsub, topic, message, dispatcher \\ __MODULE__) do
case broadcast(pubsub, topic, message, dispatcher) do
:ok -> :ok
{:error, error} -> raise BroadcastError, "broadcast!/3 failed with #{inspect(error)}"
end
end
@doc """
Broadcasts the given `message` on `topic` in `pubsub` from the given `pid`.
By passing a `pid`, `Firenest.PubSub` the message won't be broadcast
to `pid`. This is typically invoked with `pid == self()` so messages
are not delivered to the broadcasting process.
Returns `:ok` or `{:error, reason}` in case of failures in
the distributed brodcast.
"""
@spec broadcast_from(t, pid, topic | [topic], term, dispatcher) :: :ok | {:error, term()}
def broadcast_from(pubsub, pid, topic, message, dispatcher \\ __MODULE__)
when is_atom(pubsub) and is_pid(pid) and is_atom(dispatcher) do
topics = List.wrap(topic)
{:ok, {topology, remote}} = Registry.meta(pubsub, :pubsub)
broadcast = {:broadcast, topics, message, dispatcher}
with :ok <- Firenest.Topology.broadcast(topology, remote, broadcast) do
dispatch(pubsub, pid, topics, message, dispatcher)
end
end
@doc """
Broadcasts the given `message` on `topic` in `pubsub` from the given `pid`.
By passing a `pid`, `Firenest.PubSub` the message won't be broadcast
to `pid`. This is typically invoked with `pid == self()` so messages
are not delivered to the broadcasting process.
Returns `:ok` or raises `Firenest.PubSub.BroadcastError` in case of
failures in the distributed brodcast.
"""
@spec broadcast_from!(t, pid, topic | [topic], term, dispatcher) :: :ok | no_return
def broadcast_from!(pubsub, pid, topic, message, dispatcher \\ __MODULE__) do
case broadcast_from(pubsub, pid, topic, message, dispatcher) do
:ok -> :ok
{:error, error} -> raise BroadcastError, "broadcast_from!/4 failed with #{inspect(error)}"
end
end
@doc """
Broadcasts locally the given `message` on `topic` in `pubsub`.
Returns `:ok`.
"""
@spec local_broadcast(t, topic | [topic], term, dispatcher) :: :ok
def local_broadcast(pubsub, topic, message, dispatcher \\ __MODULE__)
when is_atom(pubsub) and is_atom(dispatcher) do
dispatch(pubsub, :none, List.wrap(topic), message, dispatcher)
end
@doc """
Broadcasts locally the given `message` on `topic` in `pubsub` from the given `pid`.
By passing a `pid`, `Firenest.PubSub` the message won't be broadcast
to `pid`. This is typically invoked with `pid == self()` so messages
are not delivered to the broadcasting process.
"""
@spec local_broadcast_from(t, pid, topic | [topic], term, dispatcher) :: :ok
def local_broadcast_from(pubsub, from, topic, message, dispatcher \\ __MODULE__)
when is_atom(pubsub) and is_pid(from) and is_atom(dispatcher) do
dispatch(pubsub, from, List.wrap(topic), message, dispatcher)
end
@doc false
def dispatch(entries, from, message) do
Enum.each(entries, fn
{pid, _} when pid == from -> :ok
{pid, _} -> send(pid, message)
end)
end
defp dispatch(pubsub, from, topics, message, dispatcher) do
mfa = {dispatcher, :dispatch, [from, message]}
for topic <- topics do
Registry.dispatch(pubsub, topic, mfa)
end
:ok
end
end
defmodule Firenest.PubSub.Dispatcher do
@moduledoc false
use GenServer
def start_link({name, pubsub}) do
GenServer.start_link(__MODULE__, pubsub, name: name)
end
def init(pubsub) do
{:ok, pubsub}
end
def handle_info({:broadcast, topics, message, dispatcher}, pubsub) do
mfargs = {dispatcher, :dispatch, [:none, message]}
for topic <- topics do
Registry.dispatch(pubsub, topic, mfargs)
end
{:noreply, pubsub}
end
end
defmodule Firenest.PubSub.Supervisor do
@moduledoc false
use Supervisor
def start_link(options) do
pubsub = options[:name]
topology = options[:topology]
unless pubsub && topology do
raise ArgumentError,
"Firenest.PubSub.child_spec/1 expects :name and :topology as options"
end
supervisor = Module.concat(pubsub, "Supervisor")
Supervisor.start_link(__MODULE__, {pubsub, topology, options}, name: supervisor)
end
def init({pubsub, topology, options}) do
partitions =
options[:partitions] || System.schedulers_online() |> Kernel./(4) |> Float.ceil() |> trunc()
remote = Module.concat(pubsub, "Dispatcher")
registry = [
meta: [pubsub: {topology, remote}],
partitions: partitions,
keys: :duplicate,
name: pubsub
]
children = [
{Registry, registry},
{Firenest.PubSub.Dispatcher, {remote, pubsub}}
]
Supervisor.init(children, strategy: :rest_for_one)
end
end
|
lib/firenest/pub_sub.ex
| 0.926199 | 0.646795 |
pub_sub.ex
|
starcoder
|
defmodule Day14B do
def solveB(input) do
# load_grid("day14test.in")
Day14.get_grid(input)
|> used_squares(0, 0, MapSet.new)
|> visit(0)
end
def load_grid(filename) do
# Convenience function to restore a map from a previous computation.
File.stream!(filename, [:utf8], :line)
|> Enum.map(&parse_line/1)
end
def parse_line(line) do
line
|> String.trim
|> String.split(",")
|> Enum.map(&String.to_integer/1)
end
def used_squares(grid, x, y, set) do
# Return a MapSet of {x, y} for each used square at (x, y)
case grid do
[] ->
set
[ [] | other_lines ] ->
used_squares(other_lines, 0, y + 1, set)
[ line | other_lines ] ->
[square | other_squares ] = line
set =
if square == 1 do
MapSet.put(set, {x, y})
else
set
end
used_squares([other_squares | other_lines], x + 1, y, set)
end
end
def visit(squares, regions) do
# Return the number of regions formed by the squares
if Enum.empty? squares do
regions
else
first = Enum.at squares, 0
seen = bfs_expand MapSet.new([first]), MapSet.new([first]), squares
squares
|> MapSet.difference(seen)
|> visit(regions + 1)
end
end
def bfs_expand(to_visit, visited, squares) do
# Return a set of all the squares in the same region
if Enum.empty? to_visit do
visited
else
neighbors =
adjacent(to_visit)
|> MapSet.intersection(squares)
|> MapSet.difference(visited)
visited = MapSet.union(visited, neighbors)
squares = MapSet.difference(squares, neighbors)
bfs_expand(neighbors, visited, squares)
end
end
def adjacent(positions) do
# Return all adjacent squares to the given list of (x, y) positions.
# Adjacent squares might not be used squares or even on the grid.
Enum.reduce(positions, MapSet.new, fn({x, y}, acc) ->
[{x, y + 1}, {x + 1, y}, {x, y - 1}, {x - 1, y}]
|> MapSet.new
|> MapSet.union(acc)
end)
end
end
|
2017/elixir/day14/lib/day14b.ex
| 0.750278 | 0.53783 |
day14b.ex
|
starcoder
|
defmodule Zigler.Parser.Imports do
@moduledoc """
For parsing, looking for imports, cimports, and usingnamespace directives.
To be completed later
"""
import NimbleParsec
defstruct imports: [], identifier: nil, pub: false
@typep identifier_t :: :usingnamespace | String.t
@type t :: %__MODULE__{
imports: [{identifier_t, Path.t} | {:pub, identifier_t, Path.t}],
identifier: atom,
pub: boolean
}
# designed to ninja in this struct as necessary.
@type line_info :: {non_neg_integer, non_neg_integer}
@type parsec_retval :: {[String.t], t}
initialize = post_traverse(empty(), :initializer)
@spec initializer(String.t, [String.t], t, line_info, non_neg_integer)
:: parsec_retval
defp initializer(_, _, context, _, _), do: {[], struct(__MODULE__, context)}
whitespace = ascii_string([?\s, ?\n], min: 1)
filename = ascii_string([not: ?"], min: 1)
identifier = ascii_char([?a..?z, ?A..?Z, ?_])
|> optional(ascii_string([?a..?z, ?A..?Z, ?0..?9, ?_], min: 1))
|> reduce({IO, :iodata_to_binary, []})
usingnamespace = optional(
string("pub")
|> concat(ignore(whitespace)))
|> string("usingnamespace")
|> ignore(whitespace)
import_const = optional(
string("pub")
|> concat(ignore(whitespace)))
|> ignore(
string("const")
|> concat(whitespace))
|> concat(identifier)
|> ignore(
optional(whitespace)
|> string("=")
|> optional(whitespace))
prefix = choice(
[usingnamespace, import_const])
|> post_traverse(:register_identifier)
import_stmt =
prefix
|> ignore(
string("@import")
|> optional(whitespace)
|> string("(")
|> optional(whitespace)
|> string("\""))
|> concat(filename)
|> ignore(
string("\"")
|> optional(whitespace)
|> string(")"))
|> post_traverse(:register_import)
include_stmt =
ignore(
string("@cInclude")
|> optional(whitespace)
|> string("(")
|> optional(whitespace)
|> string("\""))
|> concat(filename)
|> ignore(
string("\"")
|> optional(whitespace)
|> string(")"))
|> post_traverse(:register_include)
defp register_identifier(_rest, ["usingnamespace" | rest], context, _, _) do
{[], %{context | identifier: :usingnamespace, pub: pub?(rest)}}
end
defp register_identifier(_rest, [identifier | rest], context, _, _) do
{[], %{context | identifier: identifier, pub: pub?(rest)}}
end
defp pub?([]), do: false
defp pub?(["pub"]), do: true
defp register_import(_rest, [path], context, _, _) do
{[],
%{context |
imports: [make_identifier(path, context) | context.imports],
identifier: nil,
pub: false}}
end
defp make_identifier(path, context) do
if context.pub do
{:pub, context.identifier, path}
else
{context.identifier, path}
end
end
defp register_include(_rest, [path], context, _, _) do
{[], %{context | imports: [{:cinclude, path} | context.imports]}}
end
if Mix.env == :test do
defparsec :parse_import_const, concat(initialize, import_const)
defparsec :parse_import_stmt, concat(initialize, import_stmt)
defparsec :parse_include_stmt, concat(initialize, include_stmt)
end
parse_imports =
initialize
|> repeat(choice([
import_stmt,
include_stmt,
ascii_char([0..255])
]))
defparsec :parse_imports, parse_imports
def parse(code) do
{:ok, _, _, %Zigler.Parser.Imports{imports: imports}, _, _} = parse_imports(code)
imports
end
end
|
lib/zigler/parser/imports.ex
| 0.599016 | 0.430506 |
imports.ex
|
starcoder
|
defmodule Spell do
@moduledoc """
Spell Corrector
Inspired by <NAME>'s essay: http://norvig.com/spell-correct.html
"""
@file_path "lib/big.txt"
@external_resource @file_path
@pattern Regex.compile!("\\w+")
@letters ?a..?z
@words @file_path
|> File.stream!()
|> Stream.flat_map(fn line ->
line = String.downcase(line)
@pattern
|> Regex.scan(line)
|> List.flatten()
end)
|> Enum.reduce(%{}, fn word, acc ->
Map.update(acc, word, 1, &(&1 + 1))
end)
@total_words @words |> Map.values() |> Enum.sum()
@doc """
Most probable spelling correction for word
"""
def correction(word) do
word
|> String.downcase()
|> candidates()
|> Enum.max_by(&probability/1)
end
@doc """
Probability of word
"""
def probability(word, n \\ @total_words) do
Map.get(@words, String.downcase(word), 0) / n
end
@doc """
Current list of words
"""
def dictionary, do: @words
def most_common(amount) do
@words
|> Enum.sort(fn {_, x}, {_, y} -> x >= y end)
|> Enum.take(amount)
end
# Generate possible spelling correction for word
defp candidates(word) do
cond do
(candidates = known([word])) != [] ->
candidates
(candidates = word |> edits1() |> known()) != [] ->
candidates
(candidates = word |> edits2() |> known()) != [] ->
candidates
true ->
[word]
end
end
# The subset of words that appear in the dictionary of words
def known(words) do
@words
|> Map.take(words)
|> Map.keys()
end
# All edits that are one edit away from word
def edits1(word) do
splits = splits(word)
splits
|> deletes()
|> transposes(splits)
|> replaces(splits)
|> inserts(splits)
|> MapSet.to_list()
end
# All edits that are two edits away from word.
def edits2(word) do
for e1 <- edits1(word), e2 <- edits1(e1) do
e2
end
end
defp splits(word) do
for idx <- 0..String.length(word) do
{left, right} = String.split_at(word, idx)
{String.to_charlist(left), String.to_charlist(right)}
end
end
# Removes one letter
defp deletes(splits) do
for {left, [_ | right]} <- splits, right != [], into: MapSet.new() do
:erlang.iolist_to_binary([left, right])
end
end
# swap two adjacent letter
defp transposes(set, splits) do
for {left, [a, b | right]} <- splits, into: set do
:erlang.iolist_to_binary([left, b, a, right])
end
end
# change one letter for another
defp replaces(set, splits) do
for {left, [_ | right]} <- splits, c <- @letters, right != [], into: set do
:erlang.iolist_to_binary([left, c, right])
end
end
# add a letter
defp inserts(set, splits) do
for {left, right} <- splits, c <- @letters, into: set do
:erlang.iolist_to_binary([left, c, right])
end
end
end
|
lib/spell.ex
| 0.75037 | 0.435421 |
spell.ex
|
starcoder
|
defmodule Caravan.Cluster.Config do
@moduledoc """
Config for `Caravan.Cluster.DnsStrategy.`
- topology: topology name passed to `Cluster.Strategy.connect_nodes/4`
- query: The name to query for SRV records. Something like: `prod-likes-service-dist-consul`
- dns_client: module implementing `Caravan.DnsClient`. Defaults to `Caravan.DnsClient.InetRes`.
- node_sname: the base of a node name. App name is a good candidate.
- connect: Override to use a different transport mechanism. Uses `:libcluster` defaults otherwise.
- disconnect: Override to use a different transport mechanism. Uses `:libcluster` defaults otherwise.
- list_nodes: Override to use a different transport mechanism. Uses `:libcluster` defaults otherwise.
- poll_interval: poll the dns server on this interval. Defaults to `5_000`
"""
@default_poll_interval 5_000
@type t :: %__MODULE__{
topology: atom,
query: String.t(),
dns_client: atom,
node_sname: String.t(),
connect: {:atom, :atom, list},
disconnect: {:atom, :atom, list},
list_nodes: {:atom, :atom, list},
poll_interval: integer
}
defstruct [
:topology,
:query,
:dns_client,
:node_sname,
:connect,
:disconnect,
:list_nodes,
:poll_interval
]
@doc """
Takes a `Cluster.Strategy.State` and returns a Config struct
"""
@spec new(state :: Cluster.Strategy.State.t()) :: t()
def new(%Cluster.Strategy.State{
topology: topo,
connect: connect,
disconnect: disconnect,
list_nodes: list_nodes,
config: config
}) do
query = Keyword.fetch!(config, :query)
node_sname = Keyword.fetch!(config, :node_sname)
poll_interval = Keyword.get(config, :poll_interval, @default_poll_interval)
dns_client = Keyword.get(config, :dns_client, Caravan.DnsClient.InetRes)
%__MODULE__{
topology: topo,
connect: connect,
disconnect: disconnect,
list_nodes: list_nodes,
query: query,
dns_client: dns_client,
node_sname: node_sname,
poll_interval: poll_interval
}
end
end
|
lib/caravan/cluster/config.ex
| 0.890056 | 0.413566 |
config.ex
|
starcoder
|
defmodule AmqpOne.TypeManager.XML do
@moduledoc """
This module provides access to the XML specification of AMQP and provides
the type definitions.
It used during compilation to generate various functions, modules,
type and struct definitions. Many functions cannot be used properly after
the compilation, unless the specification is provided by the user.
"""
import Record
Record.defrecord :xmlElement, Record.extract(:xmlElement, from_lib: "xmerl/include/xmerl.hrl")
Record.defrecord :xmlAttribute, Record.extract(:xmlAttribute, from_lib: "xmerl/include/xmerl.hrl")
Record.defrecord :xmlText, Record.extract(:xmlText, from_lib: "xmerl/include/xmerl.hrl")
Record.defrecord :xmlNamespace, Record.extract(:xmlNamespace, from_lib: "xmerl/include/xmerl.hrl")
alias AmqpOne.TypeManager.{Type, Field, Descriptor, Encoding, Choice}
@doc "Takes the xmerl_scan results and produces a type spec"
def convert_xml({type, _}), do: convert_xml(type)
def convert_xml(doc) when is_record(doc, :xmlElement) and xmlElement(doc, :name) in [:amqp] do
xmlElement(doc, :content)
|> Enum.map(&convert_xml/1)
|> Enum.filter(fn
nil -> false
_ -> true
end)
# |> IO.inspect()
|> Enum.reduce(%{}, fn
nil, map -> map
types, map when is_map(types) -> Map.merge(map, types)
end)
# |> IO.inspect()
end
def convert_xml(type) when is_record(type, :xmlElement) and xmlElement(type, :name) in [:t, :section] do
# IO.puts ("convert_xml: #{inspect xmlElement(type, :name)}")
xmlElement(type, :content)
|> Enum.map(&convert_xml/1)
|> collect_children
|> Map.get(:type, [])
|> Stream.map(fn t = %Type{name: name} -> {name, t} end)
|> Enum.into(%{})
end
def convert_xml(type) when is_record(type, :xmlElement) and xmlElement(type, :name) == :type do
attrs = xmlElement(type, :attributes) |> Enum.map(&convert_xml/1)
children = xmlElement(type, :content) |> Enum.map(&convert_xml/1) |> collect_children
name = attrs[:name]
provides = case attrs[:provides] do
nil -> []
s when is_binary(s) -> String.split(s, ",")
end
enc = case children[:enc] do
nil -> []
encs -> encs
end
# IO.puts "convert_xml: type #{inspect name}"
%Type{name: name, label: attrs[:label], class: attrs[:class],
encodings: enc, fields: children[:field], choices: children[:choice],
source: attrs[:source], provides: provides,
descriptor: children[:desc]}
end
def convert_xml(field) when is_record(field, :xmlElement) and xmlElement(field, :name) == :field do
attrs = xmlElement(field, :attributes) |> Enum.map(&convert_xml/1)
name = normalize_fieldname(attrs[:name])
requires = case attrs[:requires] do
nil -> []
s when is_binary(s) -> String.split(s, ",")
end
type = attrs[:type]
%Field{name: name, label: attrs[:label], type: type,
requires: requires, default: attrs[:default],
mandatory: boolean(attrs[:mandatory]), multiple: boolean(attrs[:multiple])}
end
def convert_xml(desc) when is_record(desc, :xmlElement) and xmlElement(desc, :name) == :descriptor do
attrs = xmlElement(desc, :attributes) |> Enum.map(&convert_xml/1)
# code is an element of two 32 bit numbers, separated by a colon.
{code, <<>>} = attrs[:code]
|> String.replace("0x", "")
|> String.replace(":", "")
|> Integer.parse(16)
%Descriptor{name: attrs[:name], code: code}
end
def convert_xml(choice) when is_record(choice, :xmlElement) and xmlElement(choice, :name) == :choice do
attrs = xmlElement(choice, :attributes) |> Enum.map(&convert_xml/1)
%Choice{name: attrs[:name], value: attrs[:value]}
end
def convert_xml(enc) when is_record(enc, :xmlElement) and xmlElement(enc, :name) == :encoding do
attrs = xmlElement(enc, :attributes) |> Enum.map(&convert_xml/1)
{width, _rest} = Integer.parse(attrs[:width])
"0x" <> hex = attrs[:code]
{code_val, _rest} = Integer.parse(hex, 16)
# IO.puts "Code_val #{inspect code_val} of code: #{attrs[:code]}"
code = <<code_val::integer-size(8)>>
%Encoding{name: attrs[:name], label: attrs[:label], category: attrs[:category],
code: code, width: width}
end
# catch all unknown elements
def convert_xml(enc) when is_record(enc, :xmlElement), do: nil
def convert_xml(attr) when is_record(attr, :xmlAttribute) and
xmlAttribute(attr, :name) == :class do
{:class, xmlAttribute(attr, :value) |> List.to_atom}
end
def convert_xml(attr) when is_record(attr, :xmlAttribute) and
xmlAttribute(attr, :name) == :category do
{:category, xmlAttribute(attr, :value) |> List.to_atom}
end
def convert_xml(attr) when is_record(attr, :xmlAttribute) and
xmlAttribute(attr, :name) == :source do
{:source,"#{xmlAttribute(attr, :value)}"}
end
def convert_xml(attr) when is_record(attr, :xmlAttribute) and
xmlAttribute(attr, :value) == 'true' do
{xmlAttribute(attr, :name), true}
end
def convert_xml(attr) when is_record(attr, :xmlAttribute) and
xmlAttribute(attr, :value) == 'false' do
{xmlAttribute(attr, :name), false}
end
def convert_xml(attr) when is_record(attr, :xmlAttribute) and
is_list(xmlAttribute(attr, :value)) do
{xmlAttribute(attr, :name), "#{xmlAttribute(attr, :value)}"}
end
def convert_xml(attr) when is_record(attr, :xmlAttribute) do
{xmlAttribute(attr, :name), xmlAttribute(attr, :value)}
end
def convert_xml(txt) when is_record(txt, :xmlText), do: nil
@spec collect_children([tuple]) :: Map.t(:type|:enc|:field|:choice|:desc, [tuple])
def collect_children(children) do
# effectively an ordered Enum.group_by
children
|> Stream.reject(&(&1 == nil))
|> Stream.map(fn(value) ->
case value do
%Type{} -> {:type, value}
%Encoding{} -> {:enc, value}
%Field{} -> {:field, value}
%Choice{} -> {:choice, value}
%Descriptor{} -> {:desc, value}
%{} -> {:nothing, nil}
end
end)
|> Enum.reduce(%{}, fn({key, value}, acc) ->
# this is slow, but there aren't that many keys
acc |> Map.update(key, [value], fn(old) -> old ++ [value] end)
end)
end
defp boolean(nil), do: false
defp boolean(true), do: true
defp boolean(false), do: false
@doc """
The XML specification of the primitive types of AMQP 1.0.
"""
def xml_spec(), do: File.read!("spec/amqp-core-v1/amqp-core-types-v1.0-os.xml")
@doc """
Converts the Frame specification (`amqp-core-transport-v1.0-os.xml`) into
the type definition
"""
@spec frame_spec(String.t) :: %{String.t => Type.t}
def frame_spec(file) do
File.read!("spec/amqp-core-v1/" <> file)
|> String.to_char_list
|> :xmerl_scan.string
|> convert_xml
end
def generate_struct(%Type{class: :composite} = t, parent_mod) do
IO.puts "Found comp type #{t.name}"
fs = if t.fields == nil, do: [], else: t.fields |> Enum.map(&extract_field/1)
field_list = fs |> Enum.map(fn f -> {f.name, f.value} end)
type_list = fs
|> Enum.map(fn f -> {f.name, f.type} end)
|> Enum.map(fn {n, t} -> quote do unquote(n) :: unquote(t) end end)
mod_name = struct_name(t.name, parent_mod)
quote do
defmodule unquote(mod_name) do
defstruct unquote(field_list)
@type t :: %unquote(mod_name){} # {unquote(type_list)}
end
end
end
def generate_struct(%Type{} = t, _parent_mod) do
IO.puts "Ignore simple type #{t.name}"
[]
end
def struct_name(name, parent_mod) do
Atom.to_string(parent_mod) <>
"." <> (name |> String.capitalize)
|> normalize_fieldname
end
def extract_field(%Field{name: n, type: t} = f) do
name = n |> normalize_fieldname
type = t |> underscore |> amqp_type
value = case f do
%Field{multiple: true} -> []
%Field{default: nil} -> nil
%Field{default: "true"} when type == :boolean -> true
%Field{default: "false"} when type == :boolean -> false
%Field{default: d} ->
if type in [:integer, :non_neg_integer, :pos_integer] do
{num, <<>>} = Integer.parse(d, 10)
num
else
d
end
end
%{name: name, value: value, type: type}
end
def normalize_fieldname(name) do
name |> underscore |> String.to_atom
end
def underscore(a) when is_atom(a) do
Atom.to_string(a) |> underscore
end
def underscore(string) do
String.replace(string, "-", "_")
end
@doc "map amqp type to their Elixir counterparts (if they have other names)"
def amqp_type("*"), do: :any
def amqp_type("array"), do: :list
def amqp_type("string"), do: :"Elixir.String.t"
def amqp_type("symbol"), do: :atom
def amqp_type("uuid"), do: :binary
def amqp_type(f) when f in ["double", "float"], do: :float
def amqp_type(n) when n in ["ubyte", "ushort", "uint", "ulong"], do: :non_neg_integer
def amqp_type(i) when i in ["byte", "short", "int", "long", "timestamp"], do: :integer
def amqp_type(any_other_type), do: String.to_atom(any_other_type)
defmacro frame_structs(file) do
frame_spec(file)
|> Enum.reject(fn entry -> entry == [] end)
|> Enum.map(fn {name, type} -> generate_struct type, __CALLER__.module end)
end
defmacro add_frames_to_typemanager(file) do
add_statements = frame_spec(file)
|> Enum.reject(fn entry -> entry == [] end)
|> Enum.map(fn {name, type} ->
t = Macro.escape(type)
s = struct_name(type.name, __CALLER__.module)
struct = Macro.escape(s)
if type.class == :composite do
# IO.puts "add frame #{inspect type}"
IO.puts "the struct name is: #{inspect s}"
quote do
AmqpOne.TypeManager.add_type(unquote(name), unquote(t))
AmqpOne.TypeManager.add_type(unquote(t))
AmqpOne.TypeManager.add_type(%unquote(struct){}, unquote(t))
end
else
quote do
AmqpOne.TypeManager.add_type(unquote(name), unquote(t))
AmqpOne.TypeManager.add_type(unquote(t))
end
end
end)
end
@doc """
Generate the typespecs from the XML specification.
Returns the `type_spec` function which takes a type name as
argument and returns the Elixir equivalent of the XML spec.
"""
defmacro typespec(xml_string) do
{s, _} = Code.eval_quoted(xml_string)
String.to_char_list(s)
|> :xmerl_scan.string
|> convert_xml
|> Enum.map(fn({name, spec}) ->
quote do
def type_spec(unquote(name)), do: unquote(Macro.escape(spec))
end
end)
end
end
|
lib/type_xml.ex
| 0.627495 | 0.459925 |
type_xml.ex
|
starcoder
|
defmodule Genex.Tools.Mutation do
use Bitwise
alias Genex.Types.Chromosome
@moduledoc """
Implementation of several population mutation methods.
Mutation takes place according to some rate. Mutation is useful for introducing novelty into the population. This ensures your solutions don't prematurely converge.
Future versions of Genex will provide the ability to define the "aggressiveness" of mutations. As of this version of Genex, mutations effect the ENTIRE chromosome.
"""
@doc false
def bit_flip(probability: probability), do: &bit_flip(&1, probability)
@doc """
Perform a bit-flip mutation.
This mutation performs a binary XOR on every gene in the Chromosome.
Returns `Chromosome`.
# Parameters
- `chromosome`: `Chromosome` to mutate.
"""
@spec bit_flip(Chromosome.t()) :: Chromosome.t()
def bit_flip(chromosome) do
genes =
chromosome.genes
|> Enum.map(fn x ->
1 ^^^ x
end)
%Chromosome{
genes: genes,
size: chromosome.size,
weights: chromosome.weights,
f: chromosome.f,
collection: chromosome.collection
}
end
@doc false
def bit_flip, do: &bit_flip(&1)
@doc """
Perform a bit-flip mutation.
This mutation performs a binary XOR on a gene with probability `p` in the Chromosome.
Returns `Chromosome`.
# Parameters
- `chromosome`: `Chromosome` to mutate.
- `p`: Probability of bitflip.
"""
@spec bit_flip(Chromosome.t(), float()) :: Chromosome.t()
def bit_flip(chromosome, p) do
genes =
chromosome.genes
|> Enum.map(fn x ->
if :rand.uniform() < p do
1 ^^^ x
else
x
end
end)
%Chromosome{
genes: genes,
size: chromosome.size,
weights: chromosome.weights,
f: chromosome.f,
collection: chromosome.collection
}
end
@doc false
def scramble(radiation: radiation), do: &scramble(&1, radiation)
@doc """
Perform a scramble mutation.
This mutation scrambles the genes of the Chromosome.
Returns `%Chromosome{}`.
# Parameters
- `chromosome`: `Chromosome` to mutate.
"""
@spec scramble(Chromosome.t()) :: Chromosome.t()
def scramble(chromosome) do
genes =
chromosome.genes
|> Enum.shuffle()
%Chromosome{
genes: genes,
size: chromosome.size,
weights: chromosome.weights,
f: chromosome.f,
collection: chromosome.collection
}
end
@doc false
def scramble, do: &scramble(&1)
@doc """
Perform a scramble mutation on a random slice of size `n`.
This mutation scrambles the genes of the Chromosome between two random points.
Returns `%Chromosome{}`.
# Parameters
- `chromosome`: `Chromosome` to mutate.
"""
@spec scramble(Chromosome.t(), integer()) :: Chromosome.t()
def scramble(_, _), do: :ok
@doc """
Performs creep mutation.
This mutation generates a random number between `min` and `max` at every gene in the chromosome.
Returns `Chromosome`.
# Parameters
- `chromosome`: `Chromosome` to mutate.
- `min`: lower bound
- `max`: upper bound
"""
@spec creep(Chromosome.t(), integer(), integer()) :: Chromosome.t()
def creep(chromosome, min, max) do
genes =
chromosome.genes
|> Enum.map(fn _ ->
Enum.random(min..max)
end)
%Chromosome{
genes: genes,
size: chromosome.size,
weights: chromosome.weights,
f: chromosome.f,
collection: chromosome.collection
}
end
@doc """
Performs creep mutation with at random genes.
This mutation generates a random number between `min` and `max` at genes with probability `p` in the chromosome.
Returns `Chromosome`.
# Parameters
- `chromosome`: `Chromosome` to mutate.
- `p`: Probability of mutation.
- `min`: lower bound
- `max`: upper bound
"""
def creep(chromosome, p, min, max) do
genes =
chromosome.genes
|> Enum.map(fn x ->
if :rand.uniform() < p do
Enum.random(min..max)
else
x
end
end)
%Chromosome{
genes: genes,
size: length(genes),
weights: chromosome.weights,
f: chromosome.f,
collection: chromosome.collection
}
end
@doc false
def creep(min: min, max: max), do: &creep(&1, min, max)
def creep(min: min, max: max, radiation: radiation), do: &creep(&1, radiation, min, max)
@doc false
def gaussian(radiation: radiation), do: &gaussian(&1, radiation)
@doc """
Performs a gaussian mutation.
This mutation generates a random number at every gene in the chromosome. The random number is from a normal distribution produced from the mean and variance of the genes in the chromosome.
Returns `Chromosome`.
# Parameters
- `chromosome`: `Chromosome` to mutate.
"""
@spec gaussian(Chromosome.t()) :: Chromosome.t()
def gaussian(chromosome) do
mu = Enum.sum(chromosome.genes) / length(chromosome.genes)
sigma =
chromosome.genes
|> Enum.map(fn x -> (mu - x) * (mu - x) end)
|> Enum.sum()
|> Kernel./(length(chromosome.genes))
genes =
chromosome.genes
|> Enum.map(fn _ ->
:rand.normal(mu, sigma)
end)
%Chromosome{
genes: genes,
size: chromosome.size,
weights: chromosome.weights,
f: chromosome.f,
collection: chromosome.collection
}
end
@doc false
def gaussian, do: &gaussian(&1)
@doc """
Performs a gaussian mutation at random genes.
This mutation generates a random number at random genes with probability `p` in the chromosome. The random number is from a normal distribution produced from the mean and variance of the genes in the chromosome.
Returns `Chromosome`.
# Parameters
- `chromosome`: `Chromosome` to mutate.
- `p`: Probability of mutation.
"""
@spec gaussian(Chromosome.t(), float()) :: Chromosome.t()
def gaussian(chromosome, p) do
mu = Enum.sum(chromosome.genes) / length(chromosome.genes)
sigma =
chromosome.genes
|> Enum.map(fn x -> (mu - x) * (mu - x) end)
|> Enum.sum()
|> Kernel./(length(chromosome.genes))
genes =
chromosome.genes
|> Enum.map(fn x ->
if :rand.uniform() < p do
:rand.normal(mu, sigma)
else
x
end
end)
%Chromosome{
genes: genes,
size: chromosome.size,
weights: chromosome.weights,
f: chromosome.f,
collection: chromosome.collection
}
end
@doc false
def polynomial_bounded, do: :ok
@doc false
def swap, do: :ok
@doc false
def invert, do: :ok
@doc false
def invert_center, do: :ok
@doc false
def isotropic, do: :ok
end
|
lib/genex/tools/mutation.ex
| 0.942334 | 0.688455 |
mutation.ex
|
starcoder
|
defmodule GatherSubmissions.Student.Reader do
@moduledoc """
Provides a function for reading students' information from a CSV file.
"""
alias GatherSubmissions.Student
defmodule DuplicateHeaderError do
defexception message: "Duplicate headers in input CSV file"
end
defmodule MissingHeaderError do
defexception [:header]
@impl true
def message(exception) do
"Missing header '#{exception.header}' in CSV file"
end
end
@doc """
Reads a CSV file with student information and returns a list of `t:GatherSubmissions.Student.t/0` structs.
The first line of the CSV has to contain a header with the names of each field in the
CSV file. The `header_map` parameter must contain the keys `"name"`, `"surname"` and `"user_id"`
mapped to the names of the corresponding fields in the CSV file. Optionally, it could
also map the key `"group"` key to the name of the column containing the stundent's group.
This function raises the following exceptions:
* `DuplicateHeaderError` when the `header_map` contains several keys mapped to the same value.
* `MissingHeaderError` when the `header_map` does not contain the mandatory keys: `"name"`, `"surname"`, and `"user_id"`.
"""
@spec read_students_from_csv(String.t(), %{String.t() => String.t()}) :: [Student.t()]
def read_students_from_csv(filename, header_map) do
check_no_duplicate_headers(header_map)
File.stream!(filename)
|> CSV.decode!(headers: true)
|> Enum.map(&line_to_student(&1, header_map))
end
defp check_no_duplicate_headers(header) do
values = Map.values(header)
if Enum.uniq(values) == values do
:ok
else
raise DuplicateHeaderError
end
end
defp check_header_fields(map, header) do
case Enum.find(Map.values(header), nil, &(not Map.has_key?(map, &1))) do
nil -> :ok
field -> raise MissingHeaderError, header: field
end
end
defp line_to_student(map, header) do
check_header_fields(map, header)
%Student{
name: map[header["name"]],
surname: map[header["surname"]],
user: map[header["user_id"]],
group: if(Map.has_key?(header, "group"), do: map[header["group"]], else: nil)
}
end
end
|
lib/student/reader.ex
| 0.784979 | 0.560162 |
reader.ex
|
starcoder
|
defmodule Ada.Metrics.Reporter do
@moduledoc false
use GenServer
require Logger
def start_link(opts) do
GenServer.start_link(__MODULE__, opts, name: __MODULE__)
end
@impl true
def init(opts) do
state = Enum.into(opts, default_state())
case state.engine.connect() do
:ok ->
attach_reporters(state)
{:ok, state}
error ->
log_connection_error(error, state)
:ignore
end
end
defp log_connection_error(reason, state) do
Logger.warn(fn ->
"""
Couldn't start the #{inspect(state.engine)} metrics sink for reason:
#{inspect(reason)}
The device will function normally, but its performance metrics will not
be reported.
"""
end)
end
defp attach_reporters(state) do
events = [
[:vm, :proc_count],
[:vm, :proc_limit],
[:vm, :port_count],
[:vm, :port_limit],
[:vm, :atom_count],
[:vm, :messages_in_queues],
[:vm, :modules],
[:vm, :run_queue],
[:vm, :reductions],
[:vm, :memory, :total],
[:vm, :memory, :procs_used],
[:vm, :memory, :atom_used],
[:vm, :memory, :binary],
[:vm, :memory, :ets],
[:vm, :io, :bytes_in],
[:vm, :io, :bytes_out],
[:vm, :io, :count],
[:vm, :io, :words_reclaimed],
[:vm, :scheduler_wall_time, :active],
[:vm, :scheduler_wall_time, :total],
[:http_server, :request, :ok],
[:http_server, :request, :error],
[:http_client, :request, :ok],
[:http_client, :request, :error],
[:scheduler, :execution, :ok],
[:scheduler, :execution, :error]
]
:telemetry.attach_many("ada", events, &send_metric/4, state)
end
defp send_metric([:vm, measurement], %{value: value}, meta, state) do
opts = [
tags: [
"host:#{state.host}",
"family:#{state.family}"
]
]
send_vm_metric(meta.type, state.engine, "vm_#{measurement}", value, opts)
end
defp send_metric([:vm, :scheduler_wall_time, field], %{value: value}, meta, state) do
opts = [
tags: [
"host:#{state.host}",
"family:#{state.family}",
"scheduler_number:#{meta.scheduler_number}"
]
]
send_vm_metric(meta.type, state.engine, "vm_scheduler_wall_time.#{field}", value, opts)
end
defp send_metric([:vm, measurement, field], %{value: value}, meta, state) do
opts = [
tags: [
"host:#{state.host}",
"family:#{state.family}"
]
]
send_vm_metric(meta.type, state.engine, "vm_#{measurement}.#{field}", value, opts)
end
defp send_metric([:http_server, :request, result], %{duration: duration}, meta, state) do
opts = [
tags: [
"host:#{state.host}",
"family:#{state.family}",
"status:#{meta.resp_status}"
]
]
state.engine.timing("http_server.#{result}", to_ms(duration), opts)
state.engine.gauge("http_server.req_body_size", meta.req_body_length, opts)
state.engine.gauge("http_server.resp_body_size", meta.resp_body_length, opts)
end
defp send_metric([:http_client, :request, result], value, meta, state) do
case result do
:ok ->
opts = [
tags: [
"host:#{state.host}",
"family:#{state.family}",
"method:#{meta.method}",
"host:#{meta.host}",
"status:#{meta.status}"
]
]
state.engine.timing("http_client.ok", to_ms(value.duration), opts)
state.engine.gauge("http_client.size", value.resp_size, opts)
:error ->
opts = [
tags: [
"host:#{state.host}",
"family:#{state.family}",
"method:#{meta.method}",
"host:#{meta.host}"
]
]
state.engine.timing("http_client.error", to_ms(value.duration), opts)
end
end
defp send_metric([:scheduler, :execution, result], %{duration: duration}, meta, state) do
opts = [
tags: [
"host:#{state.host}",
"family:#{state.family}",
"workflow:#{workflow_to_tag(meta.workflow)}"
]
]
state.engine.timing("scheduler_execution.#{result}", to_ms(duration), opts)
end
defp send_metric(_name, _value, _meta, _state), do: :ok
defp send_vm_metric(type, engine, name, value, opts) do
case type do
:counter ->
engine.increment(name, value, opts)
:gauge ->
engine.gauge(name, value, opts)
:timing ->
engine.timing(name, value, opts)
end
end
defp to_ms(microseconds), do: System.convert_time_unit(microseconds, :microsecond, :millisecond)
defp workflow_to_tag(workflow) do
[_, _, camelcase_name] = Module.split(workflow)
Macro.underscore(camelcase_name)
end
defp default_state do
%{family: "ada", host: get_hostname()}
end
def get_hostname do
case Application.get_env(:nerves_init_gadget, :mdns_domain) do
nil ->
{:ok, hostname_chars} = :inet.gethostname()
List.to_string(hostname_chars)
mdns_domain when is_binary(mdns_domain) ->
mdns_domain
end
end
end
|
lib/ada/metrics/reporter.ex
| 0.686055 | 0.405508 |
reporter.ex
|
starcoder
|
defmodule VexValidators.Type do
@types [:integer, :float, :boolean, :atom, :string, :binary, :list, :map, :tuple]
@tests %{
1 => [:integer],
1.0 => [:float],
true => [:boolean, :atom],
:atom => [:atom],
"string" => [:binary, :string],
<<0>> => [:binary, :string],
[] => [:list],
%{} => [:map],
{} => [:tuple],
}
@moduledoc """
Ensure a value is a spefic type.
## Options
The `options` can be a keyword list with any of the following keys:
* `:is`: The type of the value. The values of this options can be:
* `:integer`: The value must be an integer.
* `:float`: The value must be a float.
* `:boolean`: The value must be a boolean.
* `:atom`: The value must be an atom.
* `:string` or `:binary`: The value must be a binary/string.
* `:list`: The value must be a list.
* `:map`: The value must be a map.
* `:tuple`: The value must be a tuple.
The `options` can also be an atom instead of the keyword list, which will be the value of the `:is` option.
## Examples
#{Enum.map(@types, fn t ->
["\n\n Examples when using the `:is` option with the value `#{inspect(t)}`:\n\n"] ++
Enum.map(@tests, fn {v, ts} ->
" iex> VexValidators.Type.validate(#{inspect(v)}, is: #{inspect(t)})\n " <>
if t in ts, do: ":ok", else: "{:error, \"must be of type #{t}\"}"
end)
end)
|> List.flatten() |> Enum.join("\n")}
## Custom Error Messages
Custom error messages (in EEx format), provided as :message, can use the following values:
iex> VexValidators.Type.__validator__(:message_fields)
[value: "Bad value", is: "Is type"]
For examples please see the [Vex documentation](https://github.com/CargoSense/vex#custom-eex-error-renderer-messages).
"""
use Vex.Validator
@doc false
@message_fields [value: "Bad value", is: "Is type"]
def validate(value, options) when options in @types, do: validate(value, is: options)
def validate(value, options) when is_list(options) do
unless_skipping(value, options) do
is = options[:is]
case do_validate(value, is) do
:ok -> :ok
{:error, reason} -> {:error, message(options, reason, value: value, is: is)}
end
end
end
defp do_validate(value, :integer) when is_integer(value), do: :ok
defp do_validate(value, :float) when is_float(value), do: :ok
defp do_validate(value, :boolean) when is_boolean(value), do: :ok
defp do_validate(value, :atom) when is_atom(value), do: :ok
defp do_validate(value, :string) when is_binary(value), do: :ok
defp do_validate(value, :binary) when is_binary(value), do: :ok
defp do_validate(value, :list) when is_list(value), do: :ok
defp do_validate(value, :map) when is_map(value), do: :ok
defp do_validate(value, :tuple) when is_tuple(value), do: :ok
defp do_validate(_, type), do: {:error, "must be of type #{type}"}
end
|
lib/vex_validators/type.ex
| 0.79799 | 0.776919 |
type.ex
|
starcoder
|
defmodule Etop.Utils do
@moduledoc """
Utility helpers for Etop.
"""
@kb 1024
@mb @kb * @kb
@gb @mb * @kb
@tb @gb * @kb
@pb @tb * @kb
@doc """
Center a string in the given length.
Return a string of length >= the given length with the given string centered.
The returned string is padded (leading and trailing) with the given padding (default " ")
## Examples
iex> Etop.Utils.center("Test", 8)
" Test "
iex> Etop.Utils.center('Test', 7, "-")
"-Test--"
iex> Etop.Utils.center("test", 2)
"test"
"""
@spec center(any(), integer(), String.t()) :: String.t()
def center(item, len, char \\ " ")
def center(item, len, char) when is_binary(item) do
str_len = String.length(item)
len1 = if str_len < len, do: div(len - str_len, 2) + str_len, else: 0
item |> pad(len1, char) |> pad_t(len, char)
end
def center(item, len, char), do: item |> to_string() |> center(len, char)
@doc """
Returns the server's local naive datetime with the microsecond field truncated to the
given precision (:microsecond, :millisecond or :second).
## Arguments
* datetime (default utc_now)
* precision (default :second)
## Examples
iex> datetime = Etop.Utils.local_time()
iex> datetime.year >= 2020
true
iex> datetime = Etop.Utils.local_time(:millisecond)
iex> elem(datetime.microsecond, 1)
3
iex> datetime = NaiveDateTime.utc_now()
iex> expected = NaiveDateTime.add(datetime, Etop.Utils.timezone_offset())
iex> Etop.Utils.local_time(datetime) == %{expected | microsecond: {0, 0}}
true
iex> datetime = NaiveDateTime.utc_now()
iex> expected = NaiveDateTime.add(datetime, Etop.Utils.timezone_offset())
iex> Etop.Utils.local_time(datetime, :microsecond) == expected
true
"""
@spec local_time(DateTime.t() | NaiveDateTime.t(), atom()) :: NaiveDateTime.t()
def local_time(datetime \\ NaiveDateTime.utc_now(), precision \\ :second)
def local_time(%NaiveDateTime{} = datetime, precision) do
datetime
|> NaiveDateTime.to_erl()
|> :calendar.universal_time_to_local_time()
|> NaiveDateTime.from_erl!()
|> Map.put(:microsecond, datetime.microsecond)
|> NaiveDateTime.truncate(precision)
end
def local_time(%DateTime{} = datetime, precision) do
datetime
|> DateTime.to_naive()
|> local_time(precision)
end
def local_time(precision, _) when is_atom(precision) do
local_time(NaiveDateTime.utc_now(), precision)
end
@doc """
Pad (leading) the given string with spaces for the given length.
## Examples
iex> Etop.Utils.pad("Test", 8)
" Test"
iex> Etop.Utils.pad("Test", 2)
"Test"
iex> Etop.Utils.pad(100, 4, "0")
"0100"
"""
@spec pad(any(), integer(), String.t()) :: String.t()
def pad(string, len, char \\ " ")
def pad(string, len, char) when is_binary(string), do: String.pad_leading(string, len, char)
def pad(item, len, char), do: item |> to_string() |> pad(len, char)
@doc """
Pad (trailing) the given string with spaces for the given length.
## Examples
iex> Etop.Utils.pad_t("Test", 8)
"Test "
iex> Etop.Utils.pad_t("Test", 2)
"Test"
iex> Etop.Utils.pad_t(10.1, 5, "0")
"10.10"
"""
@spec pad_t(any(), integer(), String.t()) :: String.t()
def pad_t(string, len, char \\ " ")
def pad_t(string, len, char) when is_binary(string), do: String.pad_trailing(string, len, char)
def pad_t(item, len, char), do: item |> to_string() |> pad_t(len, char)
def create_load, do: create_load(5_000_000, &(&1 * 10 + 4))
def creat_load(count) when is_integer(count), do: create_load(count, &(&1 * 10 + 4))
def creat_load(load) when is_function(load, 1), do: create_load(5_000_000, load)
@doc """
Run a short, but heavy load on the system.
Runs a tight loop for 1 = 1..5M, i * 10 + 4.
"""
def create_load(count, load) when is_integer(count) and is_function(load, 1) do
Enum.each(1..5_000_000, load)
end
@doc """
Runs the `run_load/0` num times, sleeping for 1 second between them.
"""
def run_load(num \\ 10, opts \\ []) do
log = opts[:log]
count = opts[:count] || 5_000_000
load = opts[:load] || (&(&1 * 10 + 4))
sleep = Keyword.get(opts, :sleep, 1000)
spawn(fn ->
for i <- 1..num do
create_load(count, load)
if sleep, do: Process.sleep(sleep)
if log, do: IO.puts("Done #{i} of #{num}")
end
if log, do: IO.puts("Done running #{num} iterations")
end)
end
@doc """
Configurable sort.
## Arguments
* `list` - the enumerable to be sorted.
* `field` (:reductions_diff) - the field to be sorted on.
* `field_fn` (fn field -> &elem(&1, 1)[field] end) - function to get the field.
* `sorter_fn` (&>/2) -> Sort comparator (default descending)
## Examples
iex> data = [one: %{a: 3, b: 2}, two: %{a: 1, b: 3}]
iex> Etop.Utils.sort(data, :b)
[two: %{a: 1, b: 3}, one: %{a: 3, b: 2}]
iex> data = [one: %{a: 3, b: 2}, two: %{a: 1, b: 3}]
iex> Etop.Utils.sort(data, :a, sorter: &<=/2)
[two: %{a: 1, b: 3}, one: %{a: 3, b: 2}]
iex> data = [%{a: 1, b: 2}, %{a: 2, b: 3}]
iex> Etop.Utils.sort(data, :a, mapper: & &1[:a])
[%{a: 2, b: 3}, %{a: 1, b: 2}]
iex> data = [x: %{a: 1, b: 1}, z: %{a: 2, b: 0}, y: %{a: 1, b: 2}]
iex> Etop.Utils.sort(data, :a, secondary: :b)
[z: %{a: 2, b: 0}, y: %{a: 1, b: 2}, x: %{a: 1, b: 1}]
iex> data = [w: %{a: 1, b: 3}, x: %{a: 1, b: 1}, z: %{a: 2, b: 0}, y: %{a: 1, b: 2}]
iex> data |> Etop.Utils.sort(:a, secondary: :b, mapper: &elem(&1, 1)) |> Keyword.keys()
[:z, :w, :y, :x]
"""
def sort(list, field, opts \\ []) do
mapper = sort_mapper(field, opts[:mapper], opts[:secondary])
sorter = opts[:sorter] || (&>/2)
Enum.sort_by(list, mapper, sorter)
end
defp sort_mapper(field, nil, nil) do
&elem(&1, 1)[field]
end
defp sort_mapper(field, nil, field) do
sort_mapper(field, nil, nil)
end
defp sort_mapper(field, nil, secondary) do
&{elem(&1, 1)[field], elem(&1, 1)[secondary]}
end
defp sort_mapper(_, mapper, nil) do
mapper
end
defp sort_mapper(field, mapper, secondary) do
fn x ->
item = mapper.(x)
{item[field], item[secondary]}
end
end
@doc """
Get the server's timezone offset in seconds.
"""
@spec timezone_offset() :: integer
def timezone_offset do
NaiveDateTime.diff(NaiveDateTime.from_erl!(:calendar.local_time()), NaiveDateTime.utc_now())
end
@doc """
Scale a number into xb unit with label.
## Examples
iex> Etop.Utils.size_string_b(100.123)
"100.12B"
iex> Etop.Utils.size_string_b(10.5, 0)
"11B"
iex> Etop.Utils.size_string_b(1500)
"1.46KB"
"""
@spec size_string_b(number(), integer()) :: String.t()
def size_string_b(size, rnd \\ 2)
def size_string_b(size, rnd) when size < @kb,
do: float_to_string(size, rnd) <> "B"
def size_string_b(size, rnd),
do: size_string_kb(size / @kb, rnd)
@doc """
Scale a number into xb unit with label.
## Examples
iex> Etop.Utils.size_string_kb(0.253)
"0.25KB"
iex> Etop.Utils.size_string_kb(0.253, 1)
"0.3KB"
iex> Etop.Utils.size_string_kb(1500)
"1.46MB"
iex> Etop.Utils.size_string_kb(1024 * 1024 * 3)
"3.0GB"
iex> Etop.Utils.size_string_kb(1024 * 1024 * 1024 * 2.5)
"2.5TB"
iex> Etop.Utils.size_string_kb(1024 * 1024 * 1024 * 1024 * 1.5, 0)
"2PB"
iex> Etop.Utils.size_string_kb(1024 * 1024 * 1024 * 1024 * 1024, 0)
"1EB"
"""
@spec size_string_kb(number(), integer()) :: String.t()
def size_string_kb(size, rnd \\ 2)
def size_string_kb(size, rnd) when size < @kb do
float_to_string(size, rnd) <> "KB"
end
def size_string_kb(size, rnd) when size < @mb do
float_to_string(size / @kb, rnd) <> "MB"
end
def size_string_kb(size, rnd) when size < @gb do
float_to_string(size / @mb, rnd) <> "GB"
end
def size_string_kb(size, rnd) when size < @tb do
float_to_string(size / @gb, rnd) <> "TB"
end
def size_string_kb(size, rnd) when size < @pb do
float_to_string(size / @tb, rnd) <> "PB"
end
def size_string_kb(size, rnd) do
float_to_string(size / @pb, rnd) <> "EB"
end
@doc """
Round a number and convert to a string.
iex> Etop.Utils.float_to_string(1.125, 2)
"1.13"
iex> Etop.Utils.float_to_string(1.125, 1)
"1.1"
iex> Etop.Utils.float_to_string(1.5, 0)
"2"
iex> Etop.Utils.float_to_string(100, 0)
"100"
"""
@spec float_to_string(number(), integer()) :: String.t()
def float_to_string(size, 0) when is_float(size),
do: size |> round() |> to_string()
def float_to_string(size, rnd) when is_float(size),
do: size |> Float.round(rnd) |> to_string()
def float_to_string(size, _rnd),
do: to_string(size)
end
|
lib/etop/utils.ex
| 0.893292 | 0.495484 |
utils.ex
|
starcoder
|
defmodule DeltaCrdt.AWLWWMap do
@opaque crdt_state :: CausalDotMap.t()
@opaque crdt_delta :: CausalDotMap.t()
@type key :: term()
@type value :: term()
@type node_id :: term()
@moduledoc """
An add-wins last-write-wins map.
This CRDT is an add-wins last-write-wins map. This means:
* The data structure is of a map. So you can store the following values:
```
%{key: "value"}
%{"1" => %{another_map: "what!"}}
%{123 => {:a, :tuple}}
```
* Both keys and values are of type `term()` (aka `any()`).
* Add-wins means that if there is a conflict between an add and a remove operation, the add operation will win out. This is in contrast to remove-wins, where the remove operation would win.
* Last-write-wins means that if there is a conflict between two write operations, the latest (as marked with a timestamp) will win. Underwater, every delta contains a timestamp which is used to resolve the conflicts.
"""
alias DeltaCrdt.{CausalDotMap, AWSet, ORMap}
@doc "Convenience function to create an empty add-wins last-write-wins map"
@spec new() :: crdt_state()
def new(), do: %CausalDotMap{}
@doc "Add (or overwrite) a key-value pair to the map"
@spec add(key :: key(), val :: value(), i :: node_id(), crdt_state()) :: crdt_delta()
def add(key, val, i, map) do
{AWSet, :add, [{val, System.system_time(:nanosecond)}]}
|> ORMap.apply(key, i, map)
end
@doc "Remove a key and it's corresponding value from the map"
@spec remove(key :: key(), i :: node_id(), crdt_state()) :: crdt_delta()
def remove(key, i, map), do: ORMap.remove(key, i, map)
@doc "Remove all key-value pairs from the map"
@spec clear(node_id(), crdt_state()) :: crdt_delta()
def clear(i, map), do: ORMap.clear(i, map)
@doc """
Read the state of the map
**Note: this operation is expensive, so it's best not to call this more often than necessary.**
"""
@spec read(map :: crdt_state()) :: map()
def read(%{state: map}) do
Map.new(map, fn {key, values} ->
{val, _ts} = Enum.max_by(Map.keys(values.state), fn {_val, ts} -> ts end)
{key, val}
end)
end
def strict_expansion?(state, delta) do
case DeltaCrdt.SemiLattice.bottom?(delta) do
true ->
check_remove_expansion(state, delta)
false ->
check_add_expansion(state, delta)
end
end
defp check_add_expansion(state, delta) do
case MapSet.to_list(delta.causal_context.dots) do
[] ->
false
[{x, y}] ->
Map.get(state.causal_context.maxima, x, -1) < y
end
end
defp check_remove_expansion(state, delta) do
case MapSet.to_list(delta.causal_context.dots) do
[] ->
false
[dot] ->
Enum.filter(state.state, fn {key, _map} -> MapSet.member?(delta.keys, key) end)
|> Enum.any?(fn {_key, dot_map} ->
Enum.any?(dot_map.state, fn {_key, %{state: dot_set}} ->
MapSet.member?(dot_set, dot)
end)
end)
end
end
def join_decomposition(delta) do
Enum.map(delta.causal_context.dots, fn dot ->
Enum.find(delta.state, fn {_key, dot_map} ->
Enum.find(dot_map.state, fn {_key, %{state: state}} ->
MapSet.member?(state, dot)
end)
end)
|> case do
nil ->
%DeltaCrdt.CausalDotMap{
causal_context: DeltaCrdt.CausalContext.new([dot]),
state: %{},
keys: delta.keys
}
{key, dots} ->
%DeltaCrdt.CausalDotMap{
causal_context: DeltaCrdt.CausalContext.new([dot]),
state: %{key => dots},
keys: MapSet.new([key])
}
end
end)
end
def minimum_deltas(state, delta) do
join_decomposition(delta)
|> Enum.filter(fn d -> strict_expansion?(state, d) end)
end
end
|
lib/delta_crdt/aw_lww_map.ex
| 0.871037 | 0.850965 |
aw_lww_map.ex
|
starcoder
|
defmodule HNLive.Watcher do
@moduledoc """
`HNLive.Watcher` is a long-running `GenServer`, which should be started as
part of the application supervision tree.
`HNLive.Watcher` provides updates via `Phoenix.PubSub`
when the top stories change. Subscribe to the updates via `subscribe/1`.
These updates are broadcast as
`{:update_top_newest, %{score: [%TopStory{}] , comments: [%TopStory{}]}}`
The `score` and `comments` entries are sorted by score, and number of comments,
respectively. Please note that either of the entries may be `[]` which indicates
that no updates were available for this particular entry.
The watcher also broadcasts updates when the number of subscribers to the corresponding
PubSub topic changes.
These updates are broadcast as `{:subscriber_count, subscriber_count}`, where
`subscriber_count` is a non-negative integer.
"""
use GenServer
alias HNLive.{Api, Api.Story}
alias Phoenix.PubSub
# time after which initial HN API call to get newest stories
# will be retried if it fails
@retry_init_after 5000
# interval between HN API calls to get updated story IDs
@update_interval 10000
# number of top stories returned by get_top_newest_stories
@top_story_count 10
# name of PubSub service used for broadcasting updates
@pubsub_server HNLive.PubSub
# topic of PubSub channel used for broadcasting updates
@pubsub_topic "hackernews_watcher"
defmodule SubscriberCountTracker do
@moduledoc """
Originally used `Phoenix.Presence` to track the number of subscribers, which meant recalculating
this individually in each connected LiveView (since "presence_diff" events are sent to the LiveView).
Using a simple `Phoenix.Tracker`, we keep track of this information centrally in the Watcher and
only broadcast the resulting subscriber count.
"""
use Phoenix.Tracker
def start_link(opts) do
opts = Keyword.merge([name: __MODULE__], opts)
Phoenix.Tracker.start_link(__MODULE__, opts, opts)
end
@impl true
def init(opts) do
pubsub_server = Keyword.fetch!(opts, :pubsub_server)
pubsub_topic = Keyword.fetch!(opts, :pubsub_topic)
{:ok, %{pubsub_server: pubsub_server, pubsub_topic: pubsub_topic, subscriber_count: 0}}
end
@impl true
def handle_diff(
diff,
%{
pubsub_server: pubsub_server,
pubsub_topic: pubsub_topic,
subscriber_count: subscriber_count
} = state
) do
{joins, leaves} = Map.get(diff, pubsub_topic, {[], []})
subscriber_count = subscriber_count + length(joins) - length(leaves)
PubSub.broadcast!(pubsub_server, pubsub_topic, {:subscriber_count, subscriber_count})
{:ok, %{state | subscriber_count: subscriber_count}}
end
end
defmodule TopStory do
@type t() :: %TopStory{
id: non_neg_integer(),
score: non_neg_integer(),
title: String.t(),
comments: non_neg_integer(),
creation_time: String.t(),
url: String.t(),
updated: boolean()
}
defstruct id: 0, score: 0, title: "", comments: 0, url: "", creation_time: "", updated: false
end
# Client
def start_link(state) do
GenServer.start_link(__MODULE__, state, name: __MODULE__)
SubscriberCountTracker.start_link(pubsub_server: @pubsub_server, pubsub_topic: @pubsub_topic)
end
@doc """
Returns the top (sorted by `:score` of number of `:comments`) newest stories.
"""
@spec get_top_newest_stories(:score | :comments) :: [TopStory.t()]
def get_top_newest_stories(sort_by \\ :score) do
GenServer.call(__MODULE__, {:get_top_newest_stories, sort_by})
end
@doc """
Subscribes to notifications when top stories are updated or subscriber count changes,
see module documentation for event format. Expects a LiveView socket ID as argument.
"""
def subscribe(socket_id) do
:ok = PubSub.subscribe(@pubsub_server, @pubsub_topic)
{:ok, _} =
Phoenix.Tracker.track(SubscriberCountTracker, self(), @pubsub_topic, socket_id, %{})
end
# Server
@impl true
def init(_) do
run_init()
run_get_updated_ids()
{:ok, %{stories: %{}, last_updated_ids: [], top_newest: %{}}}
end
@impl true
def handle_call(
{:get_top_newest_stories, sort_by},
_from,
%{top_newest: top_newest} = state
) do
{:reply, Map.get(top_newest, sort_by, []), state}
end
@impl true
def handle_info({:init, stories}, state) do
# When the watcher starts, the 500 newest stories are initially retrieved using
# `HNLive.Api.get_newest_stories/0`. We handle the result here.
if map_size(stories) == 0, do: run_init(@retry_init_after)
{:noreply,
%{
state
| stories: stories,
top_newest: update_top_newest(stories)
}}
end
@impl true
def handle_info(
{:get_updated_ids, updated_ids},
%{stories: stories, last_updated_ids: last_updated_ids} = state
) do
# Every 10 seconds updates are downloaded
# using `HNLive.Api.get_updates/0`. We handle the result here.
new_state =
case updated_ids do
# same ids retrieved as last time around? nothing to be done
{:ok, ^last_updated_ids} ->
state
{:ok, updated_ids} ->
# get smallest story id, or 0 if stories is empty
{min_id, _} = Enum.min_by(stories, &elem(&1, 0), fn -> {0, nil} end)
filtered_ids = Enum.filter(updated_ids, &(&1 >= min_id))
run_api_task(:updates, fn -> Api.get_many_stories(filtered_ids) end)
%{state | last_updated_ids: updated_ids}
# ignore errors
_ ->
state
end
run_get_updated_ids(@update_interval)
{:noreply, new_state}
end
@impl true
def handle_info(
{:updates, updated_stories},
%{stories: stories, top_newest: top_newest} = state
) do
# Updated stories were downloaded using `HNLive.Api.get_many_stories/1.`
# The updated stories are now merged with the previously retrieved stories.
stories =
Map.merge(stories, updated_stories)
|> Enum.sort_by(&elem(&1, 0), :desc)
# Only the 500 newest stories are considered (and kept in memory) when updating
# the top 10 stories by score and number of comments.
|> Enum.take(500)
|> Enum.into(%{})
{:noreply,
%{
state
| stories: stories,
top_newest: update_top_newest(stories, top_newest)
}}
end
# Helper which runs `api_fn` as linked `Task` after an optional `timeout`
# (which defaults to 0) and sends the result to the calling process
# as a `{name, result}` tuple.
defp run_api_task(name, api_fn, timeout \\ 0) do
pid = self()
Task.start_link(fn ->
Process.sleep(timeout)
send(pid, {name, api_fn.()})
end)
end
defp run_init(timeout \\ 0),
do: run_api_task(:init, &Api.get_newest_stories/0, timeout)
defp run_get_updated_ids(timeout \\ 0),
do: run_api_task(:get_updated_ids, &Api.get_updates/0, timeout)
defp update_top_newest(stories, previous_top_newest \\ %{}) do
[{top_newest_by_score, changes_by_score}, {top_newest_by_comments, changes_by_comments}] =
Enum.map(
[:score, :comments],
&get_top_newest_and_changes(&1, stories, Map.get(previous_top_newest, &1, []))
)
# only broadcast updates if any changes were found
if length(changes_by_score) > 0 || length(changes_by_comments) > 0,
do:
PubSub.broadcast!(
@pubsub_server,
@pubsub_topic,
{:update_top_newest, %{score: changes_by_score, comments: changes_by_comments}}
)
%{score: top_newest_by_score, comments: top_newest_by_comments}
end
defp get_top_newest_and_changes(sort_by, stories, previous_top_newest) do
top_newest =
stories
|> Enum.map(fn {story_id, %Story{} = story} ->
%TopStory{
id: story_id,
score: story.score,
title: story.title,
comments: story.comments,
url: story.url
}
end)
|> Enum.sort_by(&Map.fetch!(&1, sort_by), :desc)
|> Enum.take(@top_story_count)
current_time = DateTime.utc_now() |> DateTime.to_unix()
# convert the time elapsed since creation of the story into
# a human-readable string
top_newest =
Enum.map(top_newest, fn story ->
creation_time = stories[story.id].creation_time
Map.put(story, :creation_time, humanize_time(current_time - creation_time))
end)
# compare new and previous top stories and mark changes by setting
# :updated in the story map
mark_updated =
Enum.zip(top_newest, previous_top_newest)
|> Enum.map(fn {new, old} ->
Map.put(
new,
:updated,
new.id != old.id || new.score != old.score || new.comments != old.comments
)
end)
changes =
cond do
# mark_updated will be [] if previous_top_newest == [] because
# the Enum.zip above will result in an empty list then
mark_updated == [] -> top_newest
Enum.any?(mark_updated, & &1.updated) -> mark_updated
true -> []
end
{top_newest, changes}
end
defp humanize_time(seconds) do
cond do
seconds == 1 -> "1 second ago"
seconds < 60 -> "#{seconds} seconds ago"
seconds < 120 -> "1 minute ago"
seconds < 3600 -> "#{div(seconds, 60)} minutes ago"
seconds < 7200 -> "1 hour ago"
seconds < 3600 * 24 -> "#{div(seconds, 3600)} hours ago"
true -> "> 1 day ago"
end
end
end
|
lib/hnlive/watcher.ex
| 0.876238 | 0.563438 |
watcher.ex
|
starcoder
|
defmodule Xandra.RetryStrategy do
@moduledoc """
A behaviour that handles how to retry failed queries.
This behaviour makes it possible to customize the strategy that Xandra uses to
retry failed queries. By default, Xandra does not retry failed queries, and
does not provide any default retry strategy since retrying queries based on
the failure reason is very tied to application logic.
A module that implements the `Xandra.RetryStrategy` behaviour can be passed to
several functions in the `Xandra` module: look at the documentation for
`Xandra` for more information.
## Usage
When a query fails and a retry strategy module was passed as an option, Xandra
will:
1. invoke the `c:new/1` callback with the options passed to the failing
function to initialize the given retry strategy
1. ask the retry strategy whether to retry or error out (`c:retry/3`) until
either the query succeeds or `c:retry/3` says to error out
The `c:new/1` and `c:retry/3` callbacks will be invoked in the same
process that executed the original query.
If `c:retry/3` says to retry a query, such query will be retried on a
different Xandra connection than the one the query was first executed
through. For more information, see the documentation for `c:retry/3`.
## Examples
This is an example of a retry strategy that retries a fixed number of times
before failing. It reads the allowed number of retries from the options.
defmodule MyApp.CounterRetryStrategy do
@behaviour Xandra.RetryStrategy
def new(options) do
Keyword.fetch!(options, :retry_count)
end
def retry(_error, _options, _retries_left = 0) do
:error
end
def retry(_error, options, retries_left) do
{:retry, options, retries_left - 1}
end
end
Another interesting example could be a retry strategy based on downgrading
consistency: for example, we could execute all queries with a high consistency
(such as `:all`) at first, and in case of failure, try again with a lower
consistency (such as `:quorum`), finally giving up if that fails as well.
defmodule MyApp.DowngradingConsistencyRetryStrategy do
@behaviour Xandra.RetryStrategy
def new(_options) do
:no_state
end
def retry(_error, options, :no_state) do
case Keyword.fetch(options, :consistency) do
# No consistency was specified, so we don't bother to retry.
:error ->
:error
{:ok, :all} ->
{:retry, Keyword.put(options, :consistency, :quorum), :no_state}
{:ok, _other} ->
:error
end
end
end
"""
@type state :: term
@doc """
Initializes the state of a retry strategy based on the given `options`.
"""
@callback new(options :: keyword) :: state
@doc """
Determines whether to retry the failed query or return the error.
The first argument is the error that caused the query to fail: for example, it
could be a `Xandra.Error` struct with reason `:read_timeout`. This can be used
to determine the retry strategy based on the failure reason. The second
argument is the options given to the function that failed while executing the
query. The third argument is the retry strategy state returned either by
`c:new/1` (if this was the first attempt to retry) or by subsequent calls to
`c:retry/3`.
If `:error` is returned, the function that was trying to execute the query
will return the error to the caller instead of retrying.
If `{:retry, new_options, new_state}` is returned, the function that was
trying to execute the query will be invoked again with the same query and
`new_options` as its options. `new_state` will be used if the query fails
again: in that case, `c:retry/3` will be invoked again with `new_state` as its
third argument. This process will continue until either the query is executed
successfully or this callback returns `:error`.
Note that when `{:retry, new_options, new_state}` is returned, the query will
be executed again *on a different Xandra connection*. This behaviour is
particularly useful with pooled connections and especially when using
`Xandra.Cluster` as the pool, since it will mean that there's a chance the
retried query will be executed on a different node altogether.
"""
@callback retry(error :: term, options :: keyword, state) ::
:error | {:retry, new_options :: keyword, new_state :: state}
@doc false
@spec run_with_retrying(keyword, (() -> result)) :: result when result: var
def run_with_retrying(options, fun) do
case Keyword.pop(options, :retry_strategy) do
{nil, _options} -> fun.()
{retry_strategy, options} -> run_with_retrying(options, retry_strategy, fun)
end
end
defp run_with_retrying(options, retry_strategy, fun) do
with {:error, reason} <- fun.() do
{retry_state, options} =
Keyword.pop_lazy(options, :retrying_state, fn ->
retry_strategy.new(options)
end)
case retry_strategy.retry(reason, options, retry_state) do
:error ->
{:error, reason}
{:retry, new_options, new_retry_state} ->
new_options = Keyword.put(new_options, :retrying_state, new_retry_state)
run_with_retrying(new_options, retry_strategy, fun)
other ->
raise ArgumentError,
"invalid return value #{inspect(other)} from " <>
"retry strategy #{inspect(retry_strategy)} " <>
"with state #{inspect(retry_state)}"
end
end
end
end
|
lib/xandra/retry_strategy.ex
| 0.928132 | 0.651313 |
retry_strategy.ex
|
starcoder
|
defmodule Elixirdo.Instance.MonadTrans.Reader do
alias Elixirdo.Instance.MonadTrans.Reader, as: ReaderT
use Elixirdo.Base
use Elixirdo.Typeclass.Monad.Trans, import_typeclasses: true
use Elixirdo.Typeclass.Monad.Reader, import_monad_reader: true
defstruct [:data]
deftype reader_t(r, m, a) :: %ReaderT{data: (r -> m(a))}
def new(data) do
%ReaderT{data: data}
end
def run(%ReaderT{data: data}) do
data
end
def run(reader_t_a, r) do
(run(reader_t_a)).(r)
end
def map(f, reader_t_a) do
new(
fn r ->
f.(run(reader_t_a, r))
end
)
end
definstance functor reader_t(r, m), m: functor do
def fmap(f, reader_t_a) do
new(
fn r ->
functor_a = run(reader_t_a, r)
Functor.fmap(f, functor_a, m)
end
)
end
end
definstance applicative reader_t(r, m), m: applicative do
def pure(a) do
new(
fn _ ->
Applicative.pure(a, m)
end
)
end
def ap(reader_t_f, reader_t_a) do
new(
fn r ->
applicative_f = run(reader_t_f, r)
applicative_a = run(reader_t_a, r)
Applicative.ap(applicative_f, applicative_a, m)
end
)
end
end
definstance monad reader_t(r, m), m: monad do
def bind(reader_t_a, afb) do
new(
fn r ->
monad m do
a <- run(reader_t_a, r)
run(afb.(a), r)
end
end
)
end
end
definstance monad_trans reader_t(r, m) do
def lift(monad_a) do
new(
fn _ ->
monad_a
end
)
end
end
definstance monad_reader reader_t(r, m), m: monad do
def local(f, reader_a) do
with_reader(f, reader_a, m)
end
def ask() do
new(fn r -> Monad.return(r, m) end)
end
def reader(f) do
new(fn r -> Monad.return(f.(r), m) end)
end
end
def with_reader(f, reader_a) do
new(fn r -> run(reader_a, f.(r)) end)
end
def with_reader(f, reader_a, _m) do
with_reader(f, reader_a)
end
end
|
lib/elixirdo/instance/monad_trans/reader.ex
| 0.581541 | 0.480905 |
reader.ex
|
starcoder
|
defmodule LiveViewStudio.Donations do
@moduledoc """
The Donations context.
"""
import Ecto.Query, warn: false
alias LiveViewStudio.Repo
alias LiveViewStudio.Donations.Donation
def almost_expired?(donation) do
donation.days_until_expires <= 10
end
@doc """
Returns the list of donations.
## Examples
iex> list_donations()
[%Donation{}, ...]
"""
def list_donations do
Repo.all(Donation)
end
@doc """
Returns a list of donations matching the given `criteria`.
Example Criteria:
[
paginate: %{page: 2, per_page: 5},
sort: %{sort_by: :item, sort_order: :asc}
]
"""
def list_donations(criteria) when is_list(criteria) do
query = from(d in Donation)
Enum.reduce(criteria, query, fn
{:paginate, %{page: page, per_page: per_page}}, query ->
from q in query,
offset: ^((page - 1) * per_page),
limit: ^per_page
{:sort, %{sort_by: sort_by, sort_order: sort_order}}, query ->
from q in query, order_by: [{^sort_order, ^sort_by}]
end)
|> Repo.all()
end
@doc """
Gets a single donation.
Raises `Ecto.NoResultsError` if the Donation does not exist.
## Examples
iex> get_donation!(123)
%Donation{}
iex> get_donation!(456)
** (Ecto.NoResultsError)
"""
def get_donation!(id), do: Repo.get!(Donation, id)
@doc """
Creates a donation.
## Examples
iex> create_donation(%{field: value})
{:ok, %Donation{}}
iex> create_donation(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_donation(attrs \\ %{}) do
%Donation{}
|> Donation.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a donation.
## Examples
iex> update_donation(donation, %{field: new_value})
{:ok, %Donation{}}
iex> update_donation(donation, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_donation(%Donation{} = donation, attrs) do
donation
|> Donation.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a donation.
## Examples
iex> delete_donation(donation)
{:ok, %Donation{}}
iex> delete_donation(donation)
{:error, %Ecto.Changeset{}}
"""
def delete_donation(%Donation{} = donation) do
Repo.delete(donation)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking donation changes.
## Examples
iex> change_donation(donation)
%Ecto.Changeset{data: %Donation{}}
"""
def change_donation(%Donation{} = donation, attrs \\ %{}) do
Donation.changeset(donation, attrs)
end
end
|
live_view_studio/lib/live_view_studio/donations.ex
| 0.804713 | 0.451206 |
donations.ex
|
starcoder
|
defmodule Rummage.Ecto.Hooks.Search do
@moduledoc """
`Rummage.Ecto.Hooks.Search` is the default search hook that comes shipped
with `Rummage.Ecto`.
This module can be overridden with a custom module while using `Rummage.Ecto`
in `Ecto` struct module.
Usage:
For a regular search:
This returns a `queryable` which upon running will give a list of `Parent`(s)
searched by ascending `field_1`
```elixir
alias Rummage.Ecto.Hooks.Search
searched_queryable = Search.run(Parent, %{"search" => %{"field_1" => %{"assoc" => [], "search_type" => "like", "search_term" => "field_!"}}})
```
For a case-insensitive search:
This returns a `queryable` which upon running will give a list of `Parent`(s)
searched by ascending case insensitive `field_1`.
Keep in mind that `case_insensitive` can only be called for `text` fields
```elixir
alias Rummage.Ecto.Hooks.Search
searched_queryable = Search.run(Parent, %{"search" => %{"field_1" => %{"assoc" => [], "search_type" => "ilike", "search_term" => "field_!"}}})
```
There are many other `search_types`. Check out `Rummage.Ecto.Services.BuildSearchQuery`'s docs
to explore more `search_types`
This module can be overridden with a custom module while using `Rummage.Ecto`
in `Ecto` struct module:
In the `Ecto` module:
```elixir
Rummage.Ecto.rummage(queryable, rummage, search: CustomHook)
```
OR
Globally for all models in `config.exs`:
```elixir
config :rummage_ecto,
Rummage.Ecto,
default_search: CustomHook
```
The `CustomHook` must implement `behaviour `Rummage.Ecto.Hook`. For examples of `CustomHook`, check out some
`custom_hooks` that are shipped with elixir: `Rummage.Ecto.CustomHooks.SimpleSearch`, `Rummage.Ecto.CustomHooks.SimpleSort`,
Rummage.Ecto.CustomHooks.SimplePaginate
"""
import Ecto.Query
alias Rummage.Ecto.Services.BuildSearchQuery
@behaviour Rummage.Ecto.Hook
@doc """
Builds a search queryable on top of the given `queryable` from the rummage parameters
from the given `rummage` struct.
## Examples
When rummage struct passed doesn't have the key "search", it simply returns the
queryable itself:
iex> alias Rummage.Ecto.Hooks.Search
iex> import Ecto.Query
iex> Search.run(Parent, %{})
Parent
When the queryable passed is not just a struct:
iex> alias Rummage.Ecto.Hooks.Search
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> Search.run(queryable, %{})
#Ecto.Query<from p in "parents">
When rummage `struct` passed has the key `"search"`, but with a value of `%{}`, `""`
or `[]` it simply returns the `queryable` itself:
iex> alias Rummage.Ecto.Hooks.Search
iex> import Ecto.Query
iex> Search.run(Parent, %{"search" => %{}})
Parent
iex> alias Rummage.Ecto.Hooks.Search
iex> import Ecto.Query
iex> Search.run(Parent, %{"search" => ""})
Parent
iex> alias Rummage.Ecto.Hooks.Search
iex> import Ecto.Query
iex> Search.run(Parent, %{"search" => %{}})
Parent
When rummage `struct` passed has the key "search", with `field`, `associations`
`search_type` and `term` it returns a searched version of the `queryable` passed in
as the argument:
When `associations` is an empty `list`:
When rummage `struct` passed has `search_type` of `like`, it returns
a searched version of the `queryable` with `like` search query:
iex> alias Rummage.Ecto.Hooks.Search
iex> import Ecto.Query
iex> rummage = %{"search" => %{"field_1" => %{"assoc" => [], "search_type" => "like", "search_term" => "field_!"}}}
%{"search" => %{"field_1" => %{"assoc" => [], "search_type" => "like", "search_term" => "field_!"}}}
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> Search.run(queryable, rummage)
#Ecto.Query<from p in subquery(from p in "parents"), where: like(p.field_1, ^"field_!")>
When rummage `struct` passed has `search_type` of `ilike` (case insensitive), it returns
a searched version of the `queryable` with `ilike` search query:
iex> alias Rummage.Ecto.Hooks.Search
iex> import Ecto.Query
iex> rummage = %{"search" => %{"field_1" => %{"assoc" => [], "search_type" => "ilike", "search_term" => "field_!"}}}
%{"search" => %{"field_1" => %{"assoc" => [], "search_type" => "ilike", "search_term" => "field_!"}}}
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> Search.run(queryable, rummage)
#Ecto.Query<from p in subquery(from p in "parents"), where: ilike(p.field_1, ^"field_!")>
When rummage `struct` passed has `search_type` of `eq`, it returns
a searched version of the `queryable` with `==` search query:
iex> alias Rummage.Ecto.Hooks.Search
iex> import Ecto.Query
iex> rummage = %{"search" => %{"field_1" => %{"assoc" => [], "search_type" => "eq", "search_term" => "field_!"}}}
%{"search" => %{"field_1" => %{"assoc" => [], "search_type" => "eq", "search_term" => "field_!"}}}
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> Search.run(queryable, rummage)
#Ecto.Query<from p in subquery(from p in "parents"), where: p.field_1 == ^"field_!">
When rummage `struct` passed has `search_type` of `gt`, it returns
a searched version of the `queryable` with `>` search query:
iex> alias Rummage.Ecto.Hooks.Search
iex> import Ecto.Query
iex> rummage = %{"search" => %{"field_1" => %{"assoc" => [], "search_type" => "gt", "search_term" => "field_!"}}}
%{"search" => %{"field_1" => %{"assoc" => [], "search_type" => "gt", "search_term" => "field_!"}}}
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> Search.run(queryable, rummage)
#Ecto.Query<from p in subquery(from p in "parents"), where: p.field_1 > ^"field_!">
When rummage `struct` passed has `search_type` of `lt`, it returns
a searched version of the `queryable` with `<` search query:
iex> alias Rummage.Ecto.Hooks.Search
iex> import Ecto.Query
iex> rummage = %{"search" => %{"field_1" => %{"assoc" => [], "search_type" => "lt", "search_term" => "field_!"}}}
%{"search" => %{"field_1" => %{"assoc" => [], "search_type" => "lt", "search_term" => "field_!"}}}
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> Search.run(queryable, rummage)
#Ecto.Query<from p in subquery(from p in "parents"), where: p.field_1 < ^"field_!">
When rummage `struct` passed has `search_type` of `gteq`, it returns
a searched version of the `queryable` with `>=` search query:
iex> alias Rummage.Ecto.Hooks.Search
iex> import Ecto.Query
iex> rummage = %{"search" => %{"field_1" => %{"assoc" => [], "search_type" => "gteq", "search_term" => "field_!"}}}
%{"search" => %{"field_1" => %{"assoc" => [], "search_type" => "gteq", "search_term" => "field_!"}}}
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> Search.run(queryable, rummage)
#Ecto.Query<from p in subquery(from p in "parents"), where: p.field_1 >= ^"field_!">
When rummage `struct` passed has `search_type` of `lteq`, it returns
a searched version of the `queryable` with `<=` search query:
iex> alias Rummage.Ecto.Hooks.Search
iex> import Ecto.Query
iex> rummage = %{"search" => %{"field_1" => %{"assoc" => [], "search_type" => "lteq", "search_term" => "field_!"}}}
%{"search" => %{"field_1" => %{"assoc" => [], "search_type" => "lteq", "search_term" => "field_!"}}}
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> Search.run(queryable, rummage)
#Ecto.Query<from p in subquery(from p in "parents"), where: p.field_1 <= ^"field_!">
When `associations` is not an empty `list`:
When rummage `struct` passed has `search_type` of `like`, it returns
a searched version of the `queryable` with `like` search query:
iex> alias Rummage.Ecto.Hooks.Search
iex> import Ecto.Query
iex> rummage = %{"search" => %{"field_1" => %{"assoc" => ["parent", "parent"], "search_type" => "like", "search_term" => "field_!"}}}
%{"search" => %{"field_1" => %{"assoc" => ["parent", "parent"], "search_type" => "like", "search_term" => "field_!"}}}
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> Search.run(queryable, rummage)
#Ecto.Query<from p0 in subquery(from p in "parents"), join: p1 in assoc(p0, :parent), join: p2 in assoc(p1, :parent), where: like(p2.field_1, ^"field_!")>
When rummage `struct` passed has `search_type` of `lteq`, it returns
a searched version of the `queryable` with `<=` search query:
iex> alias Rummage.Ecto.Hooks.Search
iex> import Ecto.Query
iex> rummage = %{"search" => %{"field_1" => %{"assoc" => ["parent", "parent"], "search_type" => "lteq", "search_term" => "field_!"}}}
%{"search" => %{"field_1" => %{"assoc" => ["parent", "parent"], "search_type" => "lteq", "search_term" => "field_!"}}}
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> Search.run(queryable, rummage)
#Ecto.Query<from p0 in subquery(from p in "parents"), join: p1 in assoc(p0, :parent), join: p2 in assoc(p1, :parent), where: p2.field_1 <= ^"field_!">
When rummage `struct` passed has an empty string as `search_term`, it returns the `queryable` itself:
iex> alias Rummage.Ecto.Hooks.Search
iex> import Ecto.Query
iex> rummage = %{"search" => %{"field_1" => %{"assoc" => ["parent", "parent"], "search_type" => "lteq", "search_term" => ""}}}
%{"search" => %{"field_1" => %{"assoc" => ["parent", "parent"], "search_type" => "lteq", "search_term" => ""}}}
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> Search.run(queryable, rummage)
#Ecto.Query<from p in "parents">
When rummage `struct` passed has nil as `search_term`, it returns the `queryable` itself:
iex> alias Rummage.Ecto.Hooks.Search
iex> import Ecto.Query
iex> rummage = %{"search" => %{"field_1" => %{"assoc" => ["parent", "parent"], "search_type" => "lteq", "search_term" => nil}}}
%{"search" => %{"field_1" => %{"assoc" => ["parent", "parent"], "search_type" => "lteq", "search_term" => nil}}}
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> Search.run(queryable, rummage)
#Ecto.Query<from p in "parents">
When rummage `struct` passed has an empty array as `search_term`, it returns the `queryable` itself:
iex> alias Rummage.Ecto.Hooks.Search
iex> import Ecto.Query
iex> rummage = %{"search" => %{"field_1" => %{"assoc" => ["parent", "parent"], "search_type" => "lteq", "search_term" => []}}}
%{"search" => %{"field_1" => %{"assoc" => ["parent", "parent"], "search_type" => "lteq", "search_term" => []}}}
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> Search.run(queryable, rummage)
#Ecto.Query<from p in "parents">
When `associations` is an empty `string`:
When rummage `struct` passed has `search_type` of `like`, it returns
a searched version of the `queryable` with `like` search query:
iex> alias Rummage.Ecto.Hooks.Search
iex> import Ecto.Query
iex> rummage = %{"search" => %{"field_1" => %{"assoc" => "", "search_type" => "like", "search_term" => "field_!"}}}
%{"search" => %{"field_1" => %{"assoc" => "", "search_type" => "like", "search_term" => "field_!"}}}
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> Search.run(queryable, rummage)
#Ecto.Query<from p in subquery(from p in "parents"), where: like(p.field_1, ^"field_!")>
When rummage `struct` passed has `search_type` of `is_nil`, it returns
a searched version of the `queryable` with `IS NULL` search query:
iex> alias Rummage.Ecto.Hooks.Search
iex> import Ecto.Query
iex> rummage = %{"search" => %{"field_1" => %{"assoc" => [], "search_type" => "is_nil", "search_term" => "true"}}}
%{"search" => %{"field_1" => %{"assoc" => [], "search_type" => "is_nil", "search_term" => "true"}}}
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> Search.run(queryable, rummage)
#Ecto.Query<from p in subquery(from p in "parents"), where: is_nil(p.field_1)>
When rummage `struct` passed has `search_type` of `between`, it returns
a searched version of the `queryable` with `BETWEEN` search query:
iex> alias Rummage.Ecto.Hooks.Search
iex> import Ecto.Query
iex> rummage = %{"search" => %{"field_1" => %{"assoc" => [], "search_type" => "between", "search_term" => ["first", "last"]}}}
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> Search.run(queryable, rummage)
#Ecto.Query<from p in subquery(from p in "parents"), where: p.field_1 >= ^"first", where: p.field_1 <= ^"last">
"""
@spec run(Ecto.Query.t(), map) :: {Ecto.Query.t(), map}
def run(queryable, rummage) do
search_params = Map.get(rummage, "search")
case search_params do
a when a in [nil, [], {}, [""], "", %{}] -> queryable
_ -> handle_search(queryable, search_params)
end
end
@doc """
Implementation of `before_hook` for `Rummage.Ecto.Hooks.Search`. This just returns back `rummage` at this point.
It doesn't matter what `queryable` or `opts` are, it just returns back `rummage`.
## Examples
iex> alias Rummage.Ecto.Hooks.Search
iex> Search.before_hook(Parent, %{}, %{})
%{}
"""
@spec before_hook(Ecto.Query.t(), map, map) :: map
def before_hook(_queryable, rummage, _opts), do: rummage
defp handle_search(queryable, search_params) do
search_params
|> Map.to_list()
|> Enum.reduce(queryable, &search_queryable(&1, &2))
end
defp search_queryable(param, queryable) do
field =
param
|> elem(0)
|> String.to_atom()
field_params =
param
|> elem(1)
association_names =
case field_params["assoc"] do
a when a in [nil, "", []] -> []
assoc -> assoc
end
search_type = field_params["search_type"]
search_term = field_params["search_term"]
case search_term do
s when s in [nil, "", []] ->
queryable
_ ->
queryable = from(e in subquery(queryable))
association_names
|> Enum.reduce(queryable, &join_by_association(&1, &2))
|> BuildSearchQuery.run(field, search_type, search_term)
end
end
defp join_by_association(association, queryable) do
join(queryable, :inner, [..., p1], p2 in assoc(p1, ^String.to_atom(association)))
end
end
|
lib/rummage_ecto/hooks/search.ex
| 0.808937 | 0.879406 |
search.ex
|
starcoder
|
defmodule OMG.Output do
@moduledoc """
`OMG.Output` and `OMG.Output.Protocol` represent the outputs of transactions, i.e. the valuables or other pieces of
data spendable via transactions on the child chain, and/or exitable to the root chain.
This module specificially dispatches generic calls to the various specific types
"""
alias OMG.Crypto
alias OMG.RawData
@output_types Map.keys(OMG.WireFormatTypes.output_type_modules())
@type t :: %__MODULE__{
output_type: binary(),
owner: Crypto.address_t(),
currency: Crypto.address_t(),
amount: non_neg_integer()
}
@type error_t() :: {:error, atom()}
defstruct [:output_type, :owner, :currency, :amount]
@doc """
Reconstructs the structure from a list of RLP items
"""
@spec reconstruct(any()) :: t() | error_t()
def reconstruct(_rlp_data)
def reconstruct([raw_type, [_owner, _currency, _amount]] = rlp_data) when is_binary(raw_type) do
with {:ok, type, owner, currency, amont} <- clean_and_validate_data(rlp_data),
do: %__MODULE__{
output_type: type,
owner: owner,
currency: currency,
amount: amont
}
end
def reconstruct([_raw_type, [_owner, _currency, _amount]]), do: {:error, :unrecognized_output_type}
def reconstruct(_), do: {:error, :malformed_outputs}
def from_db_value(%{owner: owner, currency: currency, amount: amount, output_type: output_type})
when is_binary(owner) and is_binary(currency) and is_integer(amount) and is_integer(output_type) do
%__MODULE__{owner: owner, currency: currency, amount: amount, output_type: output_type}
end
def to_db_value(%__MODULE__{owner: owner, currency: currency, amount: amount, output_type: output_type})
when is_binary(owner) and is_binary(currency) and is_integer(amount) and is_integer(output_type) do
%{owner: owner, currency: currency, amount: amount, output_type: output_type}
end
def get_data_for_rlp(%__MODULE__{owner: owner, currency: currency, amount: amount, output_type: output_type}),
do: [output_type, [owner, currency, amount]]
# TODO(achiurizo)
# remove the validation here and port the error tuple response handling into ex_plasma.
defp clean_and_validate_data([raw_type, [owner, currency, amount]]) do
with {:ok, parsed_type} <- RawData.parse_uint256(raw_type),
{:ok, _} <- valid_output_type?(parsed_type),
{:ok, parsed_owner} <- RawData.parse_address(owner),
{:ok, _} <- non_zero_owner?(owner),
{:ok, parsed_currency} <- RawData.parse_address(currency),
{:ok, parsed_amount} <- RawData.parse_amount(amount),
do: {:ok, parsed_type, parsed_owner, parsed_currency, parsed_amount}
end
defp non_zero_owner?(<<0::160>>), do: {:error, :output_guard_cant_be_zero}
defp non_zero_owner?(_), do: {:ok, :valid}
defp valid_output_type?(type) when type in @output_types, do: {:ok, :valid}
defp valid_output_type?(_), do: {:error, :unrecognized_output_type}
end
|
apps/omg/lib/omg/output.ex
| 0.80038 | 0.575141 |
output.ex
|
starcoder
|
defmodule ElixirKeeb.Representation do
alias ElixirKeeb.Structs.KeycodeBehavior
@moduledoc """
This module exists to get a string representation that
can be used by the `simple_keyboard` Javascript library
in the web dashboard provided by the `poncho` `elixir_keeb_ui`
project.
"""
alias ElixirKeeb.Utils
@doc """
This function expects a module that is using the
`ElixirKeeb.Layout` module, since it relies on two functions
provided by the usage of the module: `all_layouts/0` and
`keycode_by_physical_position/3`. It will return a string
representation of the keyboard layout that will be consumed
by the `simple_keyboard` Javascript library.
"""
def to_dashboard(keeb_module) do
layers = keeb_module.all_layouts()
dashboard_representation = layers
|> Utils.zip_with_index()
|> Enum.map(&layer_to_dashboard(&1, keeb_module))
# the first row will be used to indicate
# which layer is currently active
first_row = Keyword.keys(dashboard_representation)
|> Enum.map(fn layer -> "{#{layer}}" end)
|> Enum.join(" ")
dashboard_representation
|> Enum.map(fn {layer_key, layer_representation} ->
{layer_key, [first_row | layer_representation]}
end)
|> Enum.into(%{})
end
defp layer_to_dashboard({layer, layer_index}, keeb_module) do
layer_representation =
layer
|> Utils.zip_with_index()
|> Enum.map(fn {row, row_index} ->
row
|> Utils.zip_with_index()
|> Enum.map(fn {_keycode, col_index} ->
keeb_module.keycode_by_physical_position(
row_index, col_index, layer_index)
|> string_representation()
end)
|> Enum.join(" ")
end)
layer_representation_with_key(
layer_index, layer_representation)
end
defp layer_representation_with_key(0, layer_representation) do
{:default, layer_representation}
end
defp layer_representation_with_key(index, layer_representation) do
{String.to_atom("layer_#{index}"), layer_representation}
end
def string_representation(keycode) when is_atom(keycode) do
to_string(keycode) |> String.replace("kc_", "")
end
def string_representation(%KeycodeBehavior{
action: action,
layer: layer
}) when action in [:toggle, :lock] do
"{layer_#{layer}}"
end
def string_representation(%KeycodeBehavior{
action: :macro,
identifier: macro_id,
}) do
"macro_#{macro_id}"
end
def string_representation(%KeycodeBehavior{
action: :record,
identifier: recording_id,
}) do
"record_#{recording_id}"
end
def string_representation(%KeycodeBehavior{
action: :replay,
identifier: recording_id,
}) do
"replay_#{recording_id}"
end
def string_representation(%KeycodeBehavior{
action: :tap_or_toggle,
tap_or_toggle: %{
tap: tap_key,
toggle: toggle_key
},
}) do
"tap_or_toggle_#{tap_key}_#{toggle_key}"
end
end
|
lib/representation.ex
| 0.808937 | 0.402921 |
representation.ex
|
starcoder
|
defmodule Firenest.Topology do
@moduledoc """
Defines and interacts with Firenest topologies.
The topology is the building block in Firenest. It specifies:
* How failures are handled (temporary and permanent)
* How messages are sent across nodes
* How messages are broadcast in the cluster
The topology allows named processes running on other nodes
to be reached via broadcasts or direct messages. The named
processes currently are identified by the local atom name.
An instance of `Firenest.Topology` must be started per node,
via the `child_spec/1` function, alongside the proper adapter.
All topologies are also locally named.
Firenest ships with a default topology called `Firenest.Topology.Erlang`
that uses the Erlang distribution to build a fully meshed topology.
"""
@typedoc "An atom identifying the topology name."
@type t() :: atom()
@typedoc "How named processes are identified by topology."
@type name() :: atom()
@typedoc "A unique identidier for a node in the topology."
@type node_ref() :: {name :: node(), id :: term()}
@typedoc """
The plane (such as connection) to broadcast/send messages on.
Currently `plane` is always `:default`.
"""
@type plane() :: atom()
@doc """
Returns the child specification for a topology.
When started, the topology must create an ETS table with the same
name as the topology and register the key `:adapter` under it,
pointing to a module that implements the topology callbacks.
"""
@callback child_spec(keyword()) :: Supervisor.child_spec()
@doc """
Returns the name of the current node in `topology`.
"""
@callback node(t()) :: node_ref()
@doc """
Returns all other nodes in the `topology` (does not include the current node).
"""
@callback nodes(t()) :: [node_ref()]
@doc """
Broadcasts `message` to all processes named `name` on all other nodes in `topology`.
The plane allows developers to configure different planes (such as connections)
to broadcast the message. Currently `plane` is always `:default`.
"""
@callback broadcast(t(), name(), plane(), message :: term()) :: :ok | {:error, term()}
@doc """
Sends a `message` to the process named `name` in `node` running on the `topology`.
The plane allows developers to configure different planes (such as connections)
to broadcast the message. Currently `plane` is always `:default`.
"""
@callback send(t(), node_ref(), name(), plane(), message :: term()) :: :ok | {:error, term()}
@doc """
Syncs the given `pid` across the topology using its name.
"""
@callback sync_named(t(), pid()) :: {:ok, [node_ref()]} | {:error, {:already_synced, pid}}
@doc """
Returns the child specification for a topology.
The `:adapter` and `:name` keys are required as part of `options`.
All other keys have their semantics dictated by the adapter.
## Examples
This is used to start the topology as part of your supervision tree:
{Firenest.Topology, topology: MyApp.Topology, adapter: Firenest.Topology.Erlang}
"""
@spec child_spec(keyword()) :: Supervisor.child_spec()
def child_spec(options) do
name = options[:name]
{adapter, options} = Keyword.pop(options, :adapter)
unless adapter && name do
raise ArgumentError, "Firenest.Topology.child_spec/1 expects :adapter and :name as options"
end
adapter.child_spec(options)
end
@doc """
Returns the name of the current node in `topology`.
iex> Firenest.Topology.node(MyApp.Topology)
{:foo@example, _}
If the node is not connected to any other node, it may fail.
"""
@spec node(t()) :: node_ref()
def node(topology) when is_atom(topology) do
adapter!(topology).node(topology)
end
@doc """
Returns all other nodes in the `topology` (does not include the current node).
iex> Firenest.Topology.nodes(MyApp.Topology)
[{:bar@example, _}, {:baz@example, _}]
"""
@spec nodes(t()) :: [node_ref()]
def nodes(topology) when is_atom(topology) do
adapter!(topology).nodes(topology)
end
@doc """
Broadcasts `message` to all processes named `name` on all other nodes in `topology`.
The message is not broadcast to the process named `name`
in the current node.
Returns `:ok` or `{:error, reason}`.
"""
@spec broadcast(t(), name(), message :: term()) :: :ok | {:error, term()}
def broadcast(topology, name, message) when is_atom(topology) and is_atom(name) do
adapter!(topology).broadcast(topology, name, :default, message)
end
@doc """
Sends `message` to processes named `name` in node identified by `node_ref`.
Returns `:ok` or `{:error, reason}`. In particular,
`{:error, :noconnection}` must be returned if the node
name is not known.
However, keep in mind `:ok` does not guarantee the message
was delivered nor processed by the receiving `name`, since
`name` may have disconnected by the time we send (although we
don't know it yet).
"""
@spec send(t(), node_ref(), name(), message :: term()) :: :ok | {:error, term()}
def send(topology, {node, _} = node_ref, name, message)
when is_atom(topology) and is_atom(node) and is_atom(name) do
adapter!(topology).send(topology, node_ref, name, :default, message)
end
@doc """
Syncs the given `pid` across the topology using its name.
This function is the building block for building static services
on top of the topology. It allows the current process to know whenever
another process with the same name goes up or down in the topology
as long as processes call `sync_named/2`.
This function returns `{:ok, node_refs}` in case the given pid has not
been synced yet, `{:error, {:already_synced, pid}}` otherwise.
`node_refs` is a list of tuples with the first element with the node
name as an atom and the second element is a term used to version
that node name. Only the nodes that are known to have a service
with the same `name` running and that have already called `sync_named/2`
will be included in the list.
Once this function is called, the given process `pid` will receive
two messages with the following guarantees:
* `{:named_up, node_ref, name}` is delivered whenever a process
with name `name` is up on the node identified by `node_ref`.
The message is guaranteed to be delivered after the node is added
to the list returned by `nodes/2`. Note that you may receive
messages from node before you receive its named up event.
* `{:named_down, node_ref, name}` is delivered whenever a process
with name `name` is down on the node identified by `node_ref`.
It can be delivered when such processes crashes or when there is
a disconnection. The message is guaranteed to be delivered after
the node is removed from the list returned by `nodes/2`. Note
the topology may not necessarily guarantee that no messages
are received from `name` after this message is sent.
If the connection to a node is lost, perhaps due to a network partition
or crash, and then reestablished, a `:named_down` for that node is
guaranteed to be delivered before `:named_up` event. In case the service
goes up and down many times during a network partition, those events
won't be notified, only a `:named_down` event from the partition and
a `:named_up` on reconnection.
## Synchronous communication
When you receive a `named_up`, it means you can see `name` in a given
node, but it does not mean that process can see you. Therefore, if you want
to engage on synchronous communication with that process, you must expect
two messages, the `named_up` message and another message sent by the other
process that declares it can see you. In pseudo-code:
def handle_info({:named_up, node, name}, state) do
myself = Firenest.Topology.node(state.topology)
Firenest.Topology.send(state.topology, node, name, {:i_can_see_you, myself})
add_node_named_up(state, node)
end
def handle_info({:i_can_see_you, node}, state) do
add_node_i_can_see_you(state, node)
end
Only after you receive both `named_up` and `i_can_see_you` messages you
can be sure that you are able to communicate to that node and receive
messages back. Note those two messages may be delivered in any order.
"""
@spec sync_named(t(), pid()) :: {:ok, [node_ref()]} | {:error, {:already_synced, pid()}}
def sync_named(topology, pid) when is_pid(pid) do
adapter!(topology).sync_named(topology, pid)
end
@doc """
Gets the adapter for the topology.
Expects the topology to be running, otherwise it raises.
"""
def adapter!(name) do
try do
:ets.lookup_element(name, :adapter, 2)
catch
:error, :badarg -> raise "could not find topology named #{inspect(name)}"
end
end
end
|
lib/firenest/topology.ex
| 0.931072 | 0.73557 |
topology.ex
|
starcoder
|
defmodule WebSocket do
@moduledoc """
An exploration into a stand-alone library for
Plug applications to easily adopt WebSockets.
## Integrating with Plug
If you're looking to try this in your own test
application, do something like this:
```elixir
defmodule MyApp.Router do
use Plug.Router
use WebSocket
# WebSocket routes
# route controller/handler function & name
socket "/topic", MyApp.TopicController, :handle
socket "/echo", MyApp.EchoController, :echo
# Rest of your router's plugs and routes
# ...
def run(opts \\ []) do
dispatch = dispatch_table(opts)
Plug.Adapters.Cowboy.http __MODULE__, opts, [dispatch: dispatch]
end
end
```
For the time being, there is a `run/1` function
generated for your router that starts a HTTP/WS
listener. Not sure if this will stay or get
reduced to helper functions that aid in the
creation of a similar function. Most likely the
latter will win out to help compose functionality.
The big part that it plays is the building of a
dispatch table to pass as an option to Cowboy that
has an entry for each of your socket routes and a
catch all for HTTP requests.
### Add the necessary bits to a module
From the topic example:
```elixir
defmodule MyApp.TopicController do
def handle(:init, state) do
{:ok, state}
end
def handle(:terminate, _state) do
:ok
end
def handle("topic:" <> letter, state, data) do
payload = %{awesome: "blah \#{letter}",
orig: data}
{:reply, {:text, payload}, state}
end
end
```
Currently, the function name needs to be unique
across all controllers/handlers as its used for
the Events layer.
### Broadcast from elsewhere
Need to send data out from elsewhere in your app?
```elixir
# Build your message
topic = "my_event"
data = %{foo: "awesome"}
mes = WebSocket.Message.build(topic, data)
json = Poison.encode!(mes)
# Pick your destination (from your routes)
name = :handle
# Send away!
WebSockets.broadcast!(name, json)
```
This needs to be nicer, but this is still in
progress.
"""
@type route :: {atom | binary, Module.t, {Module.t, Keyword.t}}
defmacro __using__(_) do
quote do
import unquote(__MODULE__)
@before_compile unquote(__MODULE__)
Module.register_attribute(__MODULE__, :ws_routes, accumulate: true)
end
end
defmacro __before_compile__(env) do
quote do
unquote(dispatch_table(env))
end
end
defmacro socket(route, mod, func) do
quote do
@ws_routes {:{}, [], [unquote(route), unquote(mod), unquote(func)]}
end
end
defp dispatch_table(env) do
plug = env.module
routes = Module.get_attribute(env.module, :ws_routes)
quote do
@spec dispatch_table(Keyword.t) :: [WebSocket.route]
def dispatch_table(opts \\ []) do
opts = unquote(plug).init(opts)
build_dispatch(unquote(plug), unquote(routes), opts)
end
end
end
@spec build_dispatch(Module.t,
[{binary, Module.t, atom}],
Keyword.t) :: [{:_, [route]}]
def build_dispatch(plug, routes \\ [], opts \\ []) do
default = [{:_, Plug.Adapters.Cowboy.Handler, {plug, opts}}]
routes = routes
|> Enum.reverse
|> Enum.reduce(default, fn {route, mod, func}, acc ->
[{route, WebSocket.Cowboy.Handler, {mod, func}}|acc]
end)
[{:_, routes}]
end
end
|
lib/web_socket.ex
| 0.800497 | 0.6515 |
web_socket.ex
|
starcoder
|
defmodule Number.SI do
@moduledoc """
Provides functions for formatting numbers using SI notation.
"""
@prefixes [
# yotta
{8, "Y"},
# zetta
{7, "Z"},
# exa
{6, "E"},
# peta
{5, "P"},
# tera
{4, "T"},
# giga
{3, "G"},
# mega
{2, "M"},
# kilo
{1, "k"},
{0, ""},
# milli
{-1, "m"},
# micro
{-2, "µ"},
# nano
{-3, "n"},
# pico
{-4, "p"},
# femto
{-5, "f"},
# atto
{-6, "a"},
# zepto
{-7, "z"},
# ycoto
{-8, "y"}
]
@doc """
Format numbers using SI notation
## Parameters
* `number` - A value to convert. Can be any value that implements
`Number.Conversion.to_float/1`.
* `options` - A keyword list of options. See the documentation below for all
available options.
## Options
* `:base` - Use 1024 if you wish to format bytes. Default: 1000
* `:separator` - The string to place between the scaled number and the
prefix + unit. Perhaps you want a space here. Default: ""
* `:unit` - The unit of measurement, e.g. "M" for Meters. Default: ""
* `:precision` - The number of decimal places to include. Default: 2
* `:trim` - Trim trailing zeros. Default: false
Default configuration for these options can be specified in the `Number`
application configuration.
config :number, si: [
separator: " ",
precision: 4,
trim: true
]
## Examples
iex> Number.SI.number_to_si(1210000000, unit: "W")
"1.21GW"
iex> Number.SI.number_to_si(1210000000, unit: "W", precision: 1)
"1.2GW"
iex> Number.SI.number_to_si(1210000000, unit: "W", precision: 3, separator: " ")
"1.210 GW"
iex> Number.SI.number_to_si(1210000000, unit: "W", precision: 5, trim: true)
"1.21GW"
iex> Number.SI.number_to_si(1210000000)
"1.21G"
iex> Number.SI.number_to_si(Decimal.new(1210000000))
"1.21G"
iex> Number.SI.number_to_si('charlist')
** (ArgumentError) number must be a float, integer or implement `Number.Conversion` protocol, was 'charlist'
"""
@spec number_to_si(number, list) :: String.t()
def number_to_si(number, options \\ [])
def number_to_si(number, options) when is_number(number) do
options = Keyword.merge(config(), options)
exp = compute_exponent(number, options[:base])
prefix = exponent_to_prefix(exp)
scaled_number = number / :math.pow(options[:base], exp)
display_number = :erlang.float_to_binary(scaled_number, [{:decimals, options[:precision]}])
final_number = if options[:trim], do: trim(display_number), else: display_number
final_number <> options[:separator] <> prefix <> options[:unit]
end
def number_to_si(number, options) do
if Number.Conversion.impl_for(number) do
number
|> Number.Conversion.to_float()
|> number_to_si(options)
else
raise ArgumentError,
"number must be a float, integer or implement `Number.Conversion` protocol, was #{
inspect(number)
}"
end
end
defp compute_exponent(number, _) when number == 0, do: 0
defp compute_exponent(number, base) do
(:math.log(abs(number)) / :math.log(base))
|> Float.floor()
|> trunc
|> max(-8)
|> min(8)
end
@doc false
for {num, text} = _p <- @prefixes do
def exponent_to_prefix(number) when number == unquote(num), do: unquote(text)
end
defp trim(display_number) do
if String.contains?(display_number, ".") do
display_number
|> String.trim_trailing("0")
|> String.trim_trailing(".")
else
display_number
end
end
defp config do
defaults = [
base: 1000,
separator: "",
unit: "",
precision: 2
]
Keyword.merge(defaults, Application.get_env(:number, :si, []))
end
end
|
lib/number/si.ex
| 0.79999 | 0.439266 |
si.ex
|
starcoder
|
defmodule Inky.RpiIO do
@moduledoc """
An `Inky.InkyIO` implementation intended for use with raspberry pis and relies on
Circuits.GPIO and Cirtuits.SPI.
"""
@behaviour Inky.InkyIO
alias Circuits.GPIO
alias Circuits.SPI
alias Inky.InkyIO
defmodule State do
@moduledoc false
@state_fields [:dc_pid, :reset_pid, :busy_pid, :spi_pid]
@enforce_keys @state_fields
defstruct @state_fields
end
@reset_pin 27
@busy_pin 17
@dc_pin 22
@cs0_pin 0
@default_pin_mappings %{
dc_pin: @dc_pin,
reset_pin: @reset_pin,
busy_pin: @busy_pin,
cs0_pin: @cs0_pin
}
@spi_speed_hz 488_000
@spi_command 0
@spi_data 1
# API
@impl InkyIO
def init(opts \\ []) do
pin_mappings = opts[:pin_mappings] || @default_pin_mappings
spi_address = "spidev0." <> to_string(pin_mappings[:cs0_pin])
{:ok, dc_pid} = GPIO.open(pin_mappings[:dc_pin], :output)
{:ok, reset_pid} = GPIO.open(pin_mappings[:reset_pin], :output)
{:ok, busy_pid} = GPIO.open(pin_mappings[:busy_pin], :input)
{:ok, spi_pid} = SPI.open(spi_address, speed_hz: @spi_speed_hz)
# Use binary pattern matching to pull out the ADC counts (low 10 bits)
# <<_::size(6), counts::size(10)>> = SPI.transfer(spi_pid, <<0x78, 0x00>>)
%State{dc_pid: dc_pid, reset_pid: reset_pid, busy_pid: busy_pid, spi_pid: spi_pid}
end
@impl InkyIO
def handle_sleep(_state, duration_ms) do
:timer.sleep(duration_ms)
end
@impl InkyIO
def handle_read_busy(pins) do
GPIO.read(pins.busy_pid)
end
@impl InkyIO
def handle_reset(pins, value) do
:ok = GPIO.write(pins.reset_pid, value)
end
@impl InkyIO
def handle_command(pins, command, data) do
write_command(pins, command)
write_data(pins, data)
end
@impl InkyIO
def handle_command(pins, command) do
write_command(pins, command)
end
# IO primitives
defp write_command(pins, command) do
value = maybe_wrap_integer(command)
spi_write(pins, @spi_command, value)
end
defp write_data(pins, data) do
value = maybe_wrap_integer(data)
spi_write(pins, @spi_data, value)
end
defp spi_write(pins, data_or_command, values) when is_list(values) do
:ok = GPIO.write(pins.dc_pid, data_or_command)
{:ok, <<_::binary>>} = SPI.transfer(pins.spi_pid, :erlang.list_to_binary(values))
end
defp spi_write(pins, data_or_command, value) when is_binary(value) do
:ok = GPIO.write(pins.dc_pid, data_or_command)
{:ok, <<_::binary>>} = SPI.transfer(pins.spi_pid, value)
end
# internals
defp maybe_wrap_integer(value), do: if(is_integer(value), do: <<value>>, else: value)
end
|
lib/hal/rpiio.ex
| 0.820326 | 0.527499 |
rpiio.ex
|
starcoder
|
defmodule GoodTimes.Boundary do
@vsn GoodTimes.version
@moduledoc """
Return the first or last second of a unit of time.
Find the boundaries of a unit of time, i.e. the first/last second of a minute,
an hour, day, week, month or year.
Find the first second with `beginning_of_<time unit>/1` and the last second
with `end_of_<time unit>/1`. They operate on and return an Erlang datetime
based on the Coordinated Universal Time (UTC).
## Examples
iex> {{2015, 2, 27}, {18, 30, 45}} |> end_of_month
{{2015, 2, 28}, {23, 59, 59}}
"""
@doc """
Returns the UTC date and time at the start of the given datetime's minute.
## Examples
iex> {{2015, 2, 27}, {18, 30, 45}} |> beginning_of_minute
{{2015, 2, 27}, {18, 30, 0}}
"""
@spec beginning_of_minute(GoodTimes.datetime) :: GoodTimes.datetime
def beginning_of_minute({date, {hour, minute, _}}), do: {date, {hour, minute, 0}}
@doc """
Returns the UTC date and time at the end of the given datetime's minute.
## Examples
iex> {{2015, 2, 27}, {18, 30, 45}} |> end_of_minute
{{2015, 2, 27}, {18, 30, 59}}
"""
@spec end_of_minute(GoodTimes.datetime) :: GoodTimes.datetime
def end_of_minute({date, {hour, minute, _}}), do: {date, {hour, minute, 59}}
@doc """
Returns the UTC date and time at the start of the given datetime's hour.
## Examples
iex> {{2015, 2, 27}, {18, 30, 45}} |> beginning_of_hour
{{2015, 2, 27}, {18, 0, 0}}
"""
@spec beginning_of_hour(GoodTimes.datetime) :: GoodTimes.datetime
def beginning_of_hour({date, {hour, _, _}}), do: {date, {hour, 0, 0}}
@doc """
Returns the UTC date and time at the end of the given datetime's hour.
## Examples
iex> {{2015, 2, 27}, {18, 30, 45}} |> end_of_hour
{{2015, 2, 27}, {18, 59, 59}}
"""
@spec end_of_hour(GoodTimes.datetime) :: GoodTimes.datetime
def end_of_hour({date, {hour, _, _}}), do: {date, {hour, 59, 59}}
@doc """
Returns the UTC date and time at the start of the given datetime's day.
## Examples
iex> {{2015, 2, 27}, {18, 30, 45}} |> beginning_of_day
{{2015, 2, 27}, {0, 0, 0}}
"""
@spec beginning_of_day(GoodTimes.datetime) :: GoodTimes.datetime
def beginning_of_day({date, _}), do: {date, {0, 0, 0}}
@doc """
Returns the UTC date and time at the end of the given datetime's day.
## Examples
iex> {{2015, 2, 27}, {18, 30, 45}} |> end_of_day
{{2015, 2, 27}, {23, 59, 59}}
"""
@spec end_of_day(GoodTimes.datetime) :: GoodTimes.datetime
def end_of_day({date, _}), do: {date, {23, 59, 59}}
@doc """
Returns the UTC date and time at the start of the given datetime's week.
## Examples
iex> {{2015, 2, 27}, {18, 30, 45}} |> beginning_of_week
{{2015, 2, 23}, {0, 0, 0}}
"""
@spec beginning_of_week(GoodTimes.datetime) :: GoodTimes.datetime
def beginning_of_week(datetime) do
datetime
|> GoodTimes.Generate.all_days_before
|> find_weekday(1)
|> GoodTimes.at({0, 0, 0})
end
@doc """
Returns the UTC date and time at the end of the given datetime's week.
## Examples
iex> {{2015, 2, 27}, {18, 30, 45}} |> end_of_week
{{2015, 3, 1}, {23, 59, 59}}
"""
@spec end_of_week(GoodTimes.datetime) :: GoodTimes.datetime
def end_of_week(datetime) do
datetime
|> GoodTimes.Generate.all_days_after
|> find_weekday(7)
|> GoodTimes.at({23, 59, 59})
end
defp find_weekday(stream, weekday) do
stream |> Enum.find(fn {date, _} -> weekday == :calendar.day_of_the_week date end)
end
@doc """
Returns the UTC date and time at the start of the given datetime's month.
## Examples
iex> {{2015, 2, 27}, {18, 30, 45}} |> beginning_of_month
{{2015, 2, 1}, {0, 0, 0}}
"""
@spec beginning_of_month(GoodTimes.datetime) :: GoodTimes.datetime
def beginning_of_month({{year, month, _}, _}), do: {{year, month, 1}, {0, 0, 0}}
@doc """
Returns the UTC date and time at the end of the given datetime's month.
## Examples
iex> {{2015, 2, 27}, {18, 30, 45}} |> end_of_month
{{2015, 2, 28}, {23, 59, 59}}
"""
@spec end_of_month(GoodTimes.datetime) :: GoodTimes.datetime
def end_of_month({{year, month, _}, _}) do
{{year, month, :calendar.last_day_of_the_month(year, month)}, {23, 59, 59}}
end
@doc """
Returns the UTC date and time at the start of the given datetime's year.
## Examples
iex> {{2015, 2, 27}, {18, 30, 45}} |> beginning_of_year
{{2015, 1, 1}, {0, 0, 0}}
"""
@spec beginning_of_year(GoodTimes.datetime) :: GoodTimes.datetime
def beginning_of_year({{year, _, _}, _}), do: {{year, 1, 1}, {0, 0, 0}}
@doc """
Returns the UTC date and time at the end of the given datetime's year.
## Examples
iex> {{2015, 2, 27}, {18, 30, 45}} |> end_of_year
{{2015, 12, 31}, {23, 59, 59}}
"""
@spec end_of_year(GoodTimes.datetime) :: GoodTimes.datetime
def end_of_year({{year, _, _}, _}), do: {{year, 12, 31}, {23, 59, 59}}
end
|
lib/good_times/boundary.ex
| 0.90842 | 0.777807 |
boundary.ex
|
starcoder
|
defmodule Google.Datastore.V1.PartitionId do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
project_id: String.t(),
namespace_id: String.t()
}
defstruct [:project_id, :namespace_id]
field :project_id, 2, type: :string
field :namespace_id, 4, type: :string
end
defmodule Google.Datastore.V1.Key.PathElement do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
id_type: {atom, any},
kind: String.t()
}
defstruct [:id_type, :kind]
oneof :id_type, 0
field :kind, 1, type: :string
field :id, 2, type: :int64, oneof: 0
field :name, 3, type: :string, oneof: 0
end
defmodule Google.Datastore.V1.Key do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
partition_id: Google.Datastore.V1.PartitionId.t() | nil,
path: [Google.Datastore.V1.Key.PathElement.t()]
}
defstruct [:partition_id, :path]
field :partition_id, 1, type: Google.Datastore.V1.PartitionId
field :path, 2, repeated: true, type: Google.Datastore.V1.Key.PathElement
end
defmodule Google.Datastore.V1.ArrayValue do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
values: [Google.Datastore.V1.Value.t()]
}
defstruct [:values]
field :values, 1, repeated: true, type: Google.Datastore.V1.Value
end
defmodule Google.Datastore.V1.Value do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
value_type: {atom, any},
meaning: integer,
exclude_from_indexes: boolean
}
defstruct [:value_type, :meaning, :exclude_from_indexes]
oneof :value_type, 0
field :null_value, 11, type: Google.Protobuf.NullValue, enum: true, oneof: 0
field :boolean_value, 1, type: :bool, oneof: 0
field :integer_value, 2, type: :int64, oneof: 0
field :double_value, 3, type: :double, oneof: 0
field :timestamp_value, 10, type: Google.Protobuf.Timestamp, oneof: 0
field :key_value, 5, type: Google.Datastore.V1.Key, oneof: 0
field :string_value, 17, type: :string, oneof: 0
field :blob_value, 18, type: :bytes, oneof: 0
field :geo_point_value, 8, type: Google.Type.LatLng, oneof: 0
field :entity_value, 6, type: Google.Datastore.V1.Entity, oneof: 0
field :array_value, 9, type: Google.Datastore.V1.ArrayValue, oneof: 0
field :meaning, 14, type: :int32
field :exclude_from_indexes, 19, type: :bool
end
defmodule Google.Datastore.V1.Entity.PropertiesEntry do
@moduledoc false
use Protobuf, map: true, syntax: :proto3
@type t :: %__MODULE__{
key: String.t(),
value: Google.Datastore.V1.Value.t() | nil
}
defstruct [:key, :value]
field :key, 1, type: :string
field :value, 2, type: Google.Datastore.V1.Value
end
defmodule Google.Datastore.V1.Entity do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
key: Google.Datastore.V1.Key.t() | nil,
properties: %{String.t() => Google.Datastore.V1.Value.t() | nil}
}
defstruct [:key, :properties]
field :key, 1, type: Google.Datastore.V1.Key
field :properties, 3,
repeated: true,
type: Google.Datastore.V1.Entity.PropertiesEntry,
map: true
end
|
lib/google/datastore/v1/entity.pb.ex
| 0.696784 | 0.572364 |
entity.pb.ex
|
starcoder
|
defmodule Aecore.Sync.Task do
@moduledoc """
Each sync task holds information about a syncing process with multiple peers
where each peer is recognized as a worker with peer_id and pid of a separate process doing the work.
A sync task works on a specific chain, meaning there is one sync task per chain.
If a worker is on different chain (on a fork, meaning different chain than what we already syncing against)
a new sync task will be started. In the normal case where all is good
all peers work on the same task, we work only on one sync task.
"""
alias Aecore.Sync.{Chain, Sync}
alias Aecore.Chain.Block
alias __MODULE__
require Logger
@type height :: non_neg_integer()
@type hash :: binary()
@typedoc "Id specifing the chain to which we are syncing"
@type chain_id :: reference()
@typedoc "Id of the peer we are communicating with"
@type peer_id :: pid()
@typedoc "List of all the sync tasks we are currently syncing against"
@type sync_tasks :: list(%Task{})
@typedoc "Id of the current task"
@type task_id :: reference()
@typedoc "Element holding weather we have this block or not,
and if we don't from where could we take it (local/remote peer)"
@type pool_elem :: {height(), hash(), {peer_id(), Block.t()} | {:ok, :local} | false}
@typedoc "On what header data (height + hash) do we agree upon when starting a sync task"
@type agreed :: %{height: height(), hash: hash()} | nil
@typedoc "Process resolving syncing implemetation with a specific peer"
@type worker :: {peer_id(), pid()}
@type t :: %Task{
id: task_id(),
chain: Chain.t(),
pool: list(pool_elem()),
agreed: agreed(),
adding: list(pool_elem()),
pending: list(pool_elem()),
workers: list(worker())
}
defstruct id: nil,
chain: nil,
pool: [],
agreed: nil,
adding: [],
pending: [],
workers: []
@spec init_sync_task(Chain.t()) :: Task.t()
def init_sync_task(%Chain{chain_id: id} = chain) do
%Task{id: id, chain: chain}
end
@spec get_sync_task(task_id(), Sync.t()) :: {:ok, Task.t()} | {:error, :not_found}
def get_sync_task(task_id, %Sync{sync_tasks: tasks}) do
case Enum.find(tasks, fn %{id: id} -> id == task_id end) do
nil -> {:error, :not_found}
task -> {:ok, task}
end
end
@spec set_sync_task(Task.t(), Sync.t()) :: Sync.t()
def set_sync_task(%Task{id: id} = task, %Sync{sync_tasks: tasks} = sync) do
%Sync{sync | sync_tasks: keystore(id, task, tasks)}
end
@spec set_sync_task(task_id(), Task.t(), Sync.t()) :: Sync.t()
def set_sync_task(id, %Task{} = task, %Sync{sync_tasks: tasks} = sync) do
%Sync{sync | sync_tasks: keystore(id, task, tasks)}
end
@spec delete_sync_task(Task.t(), Sync.t()) :: Sync.t()
def delete_sync_task(%Task{id: task_id}, %Sync{sync_tasks: tasks} = sync) do
%Sync{sync | sync_tasks: Enum.filter(tasks, fn %{id: id} -> id != task_id end)}
end
@spec do_update_sync_task(Sync.t(), task_id(), {:done | :error, peer_id()}) :: Sync.t()
def do_update_sync_task(sync, task_id, update) do
case get_sync_task(task_id, sync) do
{:ok, %Task{chain: %Chain{peers: peers} = task_chain} = task} ->
chain_with_removed_peer_id =
case update do
{:done, peer_id} -> %Chain{task_chain | peers: peers -- [peer_id]}
{:error, peer_id} -> %Chain{task_chain | peers: peers -- [peer_id]}
end
maybe_end_sync_task(sync, %Task{task | chain: chain_with_removed_peer_id})
{:error, :not_found} ->
Logger.info("#{__MODULE__}: Sync task not found!")
sync
end
end
@spec maybe_end_sync_task(Sync.t(), Task.t()) :: Sync.t()
def maybe_end_sync_task(sync, %Task{chain: chain} = task) do
case chain do
%Chain{peers: [], chain: [target_chain | _]} ->
Logger.info(
"#{__MODULE__}: Removing Sync task: task with target: #{inspect(target_chain)}"
)
delete_sync_task(task, sync)
_ ->
set_sync_task(task, sync)
end
end
@spec match_chain_to_task(Chain.t(), list(Task.t()), list()) ::
:no_match
| {:inconclusive, Chain.t(), {:get_header, chain_id(), peer_id(), height()}}
| {:match, Task.t()}
def match_chain_to_task(_incoming_chain, [], []), do: :no_match
def match_chain_to_task(incoming_chain, [], acc) do
{height, %Chain{chain_id: cid, peers: peers}} = hd(Enum.reverse(acc))
{:inconclusive, incoming_chain, {:get_header, cid, peers, height}}
end
def match_chain_to_task(incoming_chain, [%Task{chain: task_chain} = task | tasks], acc) do
case Chain.try_match_chains(Map.get(incoming_chain, :chain), Map.get(task_chain, :chain)) do
:equal ->
{:match, task}
:different ->
match_chain_to_task(incoming_chain, tasks, acc)
{:first, height} ->
match_chain_to_task(incoming_chain, tasks, [{height, incoming_chain} | acc])
{:second, height} ->
match_chain_to_task(incoming_chain, tasks, [{height, task_chain} | acc])
end
end
@doc """
This function gets a list of arguments and a single element. If this element
is present in the list -> update the list with it's values.
If not -> add the element to the list
"""
@spec keystore(peer_id() | task_id(), Task.t() | worker(), Task.t() | list(worker())) ::
sync_tasks() | list(worker())
def keystore(id, elem, elems) do
do_keystore(elems, elem, id, [])
end
defp do_keystore([{id, _} | elems], elem, id, acc) do
acc ++ [elem] ++ elems
end
defp do_keystore([%{id: id} | elems], elem, id, acc) do
acc ++ [elem] ++ elems
end
defp do_keystore([head | elems], elem, id, acc) do
do_keystore(elems, elem, id, [head | acc])
end
defp do_keystore([], elem, _id, acc) do
Enum.reverse([elem | Enum.reverse(acc)])
end
end
|
apps/aecore/lib/aecore/sync/task.ex
| 0.744935 | 0.449997 |
task.ex
|
starcoder
|
defmodule Burette.Calendar do
@moduledoc """
Generator for dates and times
"""
@typep erl_date :: {Calendar.year, Calendar.month, Calendar.day}
@typep erl_time :: {Calendar.hour, Calendar.minute, Calendar.second}
@spec date(Keyword.t) :: Date.t
def date(params \\ []) do
{year, month, day} = make_date_tuple(params)
{:ok, date} = Date.new(year, month, day)
date
end
@spec time(Keyword.t) :: Time.t
def time(params \\ []) do
{hour, minute, second} = make_time_tuple(params)
{:ok, time} = Time.new(hour, minute, second)
time
end
@spec datetime(Keyword.t) :: DateTime.t
def datetime(params \\ []) do
date = make_date_tuple(params)
time = make_time_tuple(params)
erl_datetime_to_elx_datetime({date, time})
end
@spec future(Keyword.t) :: DateTime.t
@doc """
Generates a DateTime.t in the future
NOTE: That datetime is returned as UTC
You can pass to this function the same parameters you would pass to
`datetime/1` but note that if you
"""
def future(params \\ []),
do: do_future(params)
@spec past(Keyword.t) :: DateTime.t
@doc """
Works just like `future/1` but the date generated is in the past
"""
def past(params \\ []),
do: do_past(params)
@spec do_past(Keyword.t, 0..25) :: DateTime.t
defp do_past(params, retry_count \\ 0) do
present = {{y, m, d}, {h, i, s}} = present_datetime()
generate_date = ¶m_bubble_transform(&1, &2, &3, fn x -> maybe_random_number(x) end)
generation_params = with \
p = [{:year, ^y}| _] <- generate_date.(params, :year, y..(y - 20)),
p = [{:month, ^m}| _] <- generate_date.(p, :month, m..1),
p = [{:day, ^d}| _] <- generate_date.(p, :day, d..1),
p = [{:hour, ^h}| _] <- generate_date.(p, :hour, h..0),
p = [{:minute, ^i}| _] <- generate_date.(p, :minute, i..0),
p = generate_date.(p, :second, max(s - 1, 0)..0)
do
p
end
past = {make_date_tuple(generation_params), make_time_tuple(generation_params)}
present_u = :calendar.datetime_to_gregorian_seconds(present)
past_u = :calendar.datetime_to_gregorian_seconds(past)
if present_u >= past_u do
erl_datetime_to_elx_datetime(past)
else
if 25 == retry_count do
raise ArgumentError,
message: """
parameters provided to Burette.Calendar.past/1 make it impossible to provide a correct date in the past
Last possible past date generated:
#{inspect erl_datetime_to_elx_datetime(past)}
Present date:
#{inspect erl_datetime_to_elx_datetime(past)}
Params:
#{inspect params}
"""
else
do_past(params, retry_count + 1)
end
end
end
@spec do_future(Keyword.t, 0..25) :: DateTime.t
defp do_future(params, retry_count \\ 0) do
present = {{y, m, d}, {h, i, s}} = present_datetime()
ldom = :calendar.last_day_of_the_month(y, m)
generate_date = ¶m_bubble_transform(&1, &2, &3, fn x -> maybe_random_number(x) end)
generation_params = with \
p = [{:year, ^y}| _] <- generate_date.(params, :year, y..(y + 20)),
p = [{:month, ^m}| _] <- generate_date.(p, :month, m..12),
p = [{:day, ^d}| _] <- generate_date.(p, :day, d..ldom),
p = [{:hour, ^h}| _] <- generate_date.(p, :hour, h..23),
p = [{:minute, ^i}| _] <- generate_date.(p, :minute, i..59),
p = generate_date.(p, :second, min(s + 1, 59)..59)
do
p
end
future = {make_date_tuple(generation_params), make_time_tuple(generation_params)}
present_u = :calendar.datetime_to_gregorian_seconds(present)
future_u = :calendar.datetime_to_gregorian_seconds(future)
if present_u <= future_u do
erl_datetime_to_elx_datetime(future)
else
if 25 == retry_count do
raise ArgumentError,
message: """
parameters provided to Burette.Calendar.future/1 make it impossible to provide a correct date in the future
Last possible future date generated:
#{inspect erl_datetime_to_elx_datetime(future)}
Present date:
#{inspect erl_datetime_to_elx_datetime(present)}
Params:
#{inspect params}
"""
else
do_future(params, retry_count + 1)
end
end
end
@spec make_date_tuple(Keyword.t) :: erl_date
defp make_date_tuple(params) do
year =
params
|> Keyword.get(:year, 1950..2050)
|> maybe_random_number()
month =
params
|> Keyword.get(:month, 1..12)
|> maybe_random_number()
ldom = :calendar.last_day_of_the_month(year, month)
dp = Keyword.get(params, :day, 1..ldom)
day = maybe_random_number(is_integer(dp) && dp > ldom && ldom || dp)
{year, month, day}
end
@spec make_time_tuple(Keyword.t) :: erl_time
defp make_time_tuple(params) do
hour =
params
|> Keyword.get(:hour, 0..23)
|> maybe_random_number()
minute =
params
|> Keyword.get(:minute, 0..59)
|> maybe_random_number()
# Ignore leap seconds
second =
params
|> Keyword.get(:second, 0..59)
|> maybe_random_number()
{hour, minute, second}
end
@spec erl_datetime_to_elx_datetime({erl_date, erl_time}) :: DateTime.t
defp erl_datetime_to_elx_datetime(erl_datetime) do
erl_datetime
|> :calendar.datetime_to_gregorian_seconds()
|> Kernel.-(62_167_219_200) # EPOCH in seconds
|> DateTime.from_unix!()
end
@spec present_datetime() :: {erl_date, erl_time}
defp present_datetime do
:calendar.local_time()
|> :calendar.local_time_to_universal_time_dst()
|> case do
[datetime_utc] ->
datetime_utc
[_dst, datetime_utc] ->
# This happens on a local time that is switching from dst. At that
# moment, there are two possible different utc datetimes.
# To avoid bugs, the library will prefer the one from future
datetime_utc
end
end
@spec maybe_random_number(Range.t | integer) :: integer
defp maybe_random_number(m..n),
do: Burette.Number.number(m..n)
defp maybe_random_number(v) when is_integer(v),
do: v
@spec param_bubble_transform(Keyword.t, atom, term, ((term) -> term)) :: Keyword.t
defp param_bubble_transform(keywords, key, default, fun) do
keywords
|> Keyword.put_new(key, default)
|> pop_update(key, fun)
end
@spec pop_update(Keyword.t, atom, term, ((term) -> term)) :: Keyword.t
defp pop_update(keywords, key, fun, acc \\ [])
defp pop_update([{k, v}| t], k, fun, acc),
do: [{k, fun.(v)}| :lists.reverse(acc)] ++ t
defp pop_update([h| t], k, fun, acc),
do: pop_update(t, k, fun, [h| acc])
defp pop_update([], _, _, acc),
do: :lists.reverse(acc)
end
|
lib/burette/calendar.ex
| 0.775902 | 0.557905 |
calendar.ex
|
starcoder
|
defmodule DarkMatter.Decimals do
@moduledoc """
Decimal Utils
"""
@moduledoc since: "1.0.0"
alias DarkMatter.Decimals.Arithmetic
alias DarkMatter.Decimals.Comparison
alias DarkMatter.Decimals.Conversion
alias DarkMatter.Decimals.Variance
@type decimal_map() :: %{sign: -1 | 1, coef: non_neg_integer(), exp: non_neg_integer()}
@doc """
Casts an `x` of type `t:DarkMatter.numeric/0` into a `t:Decimal.t/0`.
## Examples
iex> cast_decimal(0.11)
%Decimal{coef: 11, exp: -2}
iex> cast_decimal(%{sign: -1, coef: 11, exp: -2})
%Decimal{sign: -1, coef: 11, exp: -2}
iex> cast_decimal(%Decimal{sign: -1, coef: 11, exp: -2})
%Decimal{sign: -1, coef: 11, exp: -2}
iex> cast_decimal(1_000, :normal)
%Decimal{coef: 1_000, exp: 0}
iex> cast_decimal(1_000, :reduced)
%Decimal{coef: 1, exp: 3}
"""
@spec cast_decimal(any()) :: :error | nil | Decimal.t()
defdelegate cast_decimal(x), to: Conversion
@spec cast_decimal(any(), Conversion.conversion_modes()) :: :error | nil | Decimal.t()
defdelegate cast_decimal(x, mode), to: Conversion
@doc """
Casts an `x` of type `t:DarkMatter.numeric/0` into a `t:Decimal.t/0`.
Raises `ArgumentError` if given a non-numeric.
## Examples
iex> cast_decimal!(0.11)
%Decimal{coef: 11, exp: -2}
iex> cast_decimal!(nil)
** (ArgumentError) invalid argument nil
iex> cast_decimal!(1_000, :normal)
%Decimal{coef: 1_000, exp: 0}
iex> cast_decimal!(1_000, :reduced)
%Decimal{coef: 1, exp: 3}
"""
@spec cast_decimal!(any()) :: Decimal.t()
defdelegate cast_decimal!(x), to: Conversion
@spec cast_decimal!(any(), Conversion.conversion_modes()) :: Decimal.t()
defdelegate cast_decimal!(x, mode), to: Conversion
@doc """
Casts an `x` of type `t:DarkMatter.numeric/0` into a `t:Decimal.t/0`.
Returns `{:ok, %Decimal{}}` or `:error`
## Examples
iex> cast_decimal_ok(0.11)
{:ok, %Decimal{coef: 11, exp: -2}}
iex> cast_decimal_ok(nil)
:error
iex> cast_decimal_ok(1_000, :normal)
{:ok, %Decimal{coef: 1_000, exp: 0}}
iex> cast_decimal_ok(1_000, :reduced)
{:ok, %Decimal{coef: 1, exp: 3}}
"""
@spec cast_decimal_ok(any()) :: {:ok, Decimal.t()} | :error
defdelegate cast_decimal_ok(x), to: Conversion
@spec cast_decimal_ok(any(), Conversion.conversion_modes()) :: {:ok, Decimal.t()} | :error
defdelegate cast_decimal_ok(x, mode), to: Conversion
@doc """
Adds `x` and `y` of type `t:DarkMatter.numeric/0`.
## Examples
iex> decimal_add(1, 2.5)
%Decimal{coef: 35, exp: -1}
"""
@spec decimal_add(DarkMatter.numeric(), DarkMatter.strict_numeric()) :: Decimal.t()
defdelegate decimal_add(x, y), to: Arithmetic
@doc """
Averages a `list` of type `t:DarkMatter.numeric/0`.
## Examples
iex> decimal_avg([8, 9, "10.5", 13.3, "$1.23", %Decimal{coef: 33}])
%Decimal{coef: 12505, exp: -3}
iex> decimal_avg([], 711)
%Decimal{coef: 711, exp: 0}
"""
@spec decimal_avg([DarkMatter.numeric()]) :: Decimal.t()
defdelegate decimal_avg(list), to: Arithmetic
@spec decimal_avg([DarkMatter.numeric()], DarkMatter.strict_numeric()) :: Decimal.t()
defdelegate decimal_avg(list, default), to: Arithmetic
@doc """
Divides `x` and `y` of type `t:DarkMatter.numeric/0`.
Returns `0` or `default` (if given) when dividing by `0`.
## Examples
iex> decimal_div(30, 2.5)
%Decimal{coef: 12, exp: 0}
iex> decimal_div(0, 0)
%Decimal{coef: 0, exp: 0}
iex> decimal_div(0, 0, 989)
%Decimal{coef: 989, exp: 0}
"""
@spec decimal_div(DarkMatter.numeric(), DarkMatter.strict_numeric()) :: Decimal.t()
defdelegate decimal_div(x, y), to: Arithmetic
@spec decimal_div(DarkMatter.numeric(), DarkMatter.numeric(), DarkMatter.strict_numeric()) ::
Decimal.t()
defdelegate decimal_div(x, y, default), to: Arithmetic
@doc """
Multiplies `x` and `y` of type `t:DarkMatter.numeric/0`.
## Examples
iex> decimal_mult(33, 21.523)
%Decimal{coef: 710259, exp: -3}
iex> decimal_mult(0, 0)
%Decimal{coef: 0, exp: 0}
iex> decimal_mult(1, 989)
%Decimal{coef: 989, exp: 0}
"""
@spec decimal_mult(DarkMatter.numeric(), DarkMatter.numeric()) :: Decimal.t()
defdelegate decimal_mult(x, y), to: Arithmetic
@doc """
Subtracts `x` from `y` of type `t:DarkMatter.numeric/0`.
## Examples
iex> decimal_sub(1, 2.5)
%Decimal{sign: -1, coef: 15, exp: -1}
"""
@spec decimal_sub(DarkMatter.numeric(), DarkMatter.numeric()) :: Decimal.t()
defdelegate decimal_sub(x, y), to: Arithmetic
@doc """
Sums a `list` of type `t:DarkMatter.numeric/0`.
## Examples
iex> decimal_sum([8, 9, "10.5", 13.3, "$1.23", %Decimal{coef: 33}])
%Decimal{coef: 7503, exp: -2}
iex> decimal_sum([], 711)
%Decimal{coef: 711, exp: 0}
"""
@spec decimal_sum([DarkMatter.numeric()]) :: Decimal.t()
defdelegate decimal_sum(list), to: Arithmetic
@spec decimal_sum([DarkMatter.numeric()], DarkMatter.strict_numeric()) :: Decimal.t()
defdelegate decimal_sum(list, default), to: Arithmetic
@doc """
Gives the percentage of `x` relative to `y` of type `t:DarkMatter.numeric/0`.
## Examples
iex> decimal_percentage(20, 100)
%Decimal{coef: 2, exp: 1}
"""
@spec decimal_percentage(DarkMatter.numeric(), DarkMatter.numeric()) :: Decimal.t()
defdelegate decimal_percentage(x, y), to: Arithmetic
@doc """
Gives the decimal representation of an`x` of type `t:DarkMatter.numeric/0`.
## Examples
iex> from_percentage(25)
%Decimal{coef: 25, exp: -2}
"""
@spec from_percentage(DarkMatter.numeric()) :: Decimal.t()
defdelegate from_percentage(x), to: Arithmetic
@doc """
Gives the percentage representation of an`x` of type `t:DarkMatter.numeric/0`.
## Examples
iex> to_percentage(0.25)
%Decimal{coef: 25, exp: 0}
"""
@spec to_percentage(DarkMatter.numeric()) :: Decimal.t()
defdelegate to_percentage(x), to: Arithmetic
@doc """
Compares `x` of type `t:DarkMatter.numeric/0` to `y` of type `t:DarkMatter.numeric/0`.
Returns `:eq` or `:gt` or `:lt`.
## Examples
iex> decimal_compare(1, 1)
:eq
iex> decimal_compare(3, 0)
:gt
iex> decimal_compare(1, 2)
:lt
"""
@spec decimal_compare(DarkMatter.numeric(), DarkMatter.numeric()) :: Comparison.comparison()
defdelegate decimal_compare(x, y), to: Comparison
@doc """
Determines if `x` of type `t:DarkMatter.numeric/0` is equivalent to `y` of type `t:DarkMatter.numeric/0`.
Returns `true` or `false`.
## Examples
iex> decimal_equal?(1, 1)
true
iex> decimal_equal?(3, 0)
false
iex> decimal_equal?(nil, 2)
** (FunctionClauseError) no function clause matching in DarkMatter.Decimals.Comparison.decimal_compare/2
"""
@spec decimal_equal?(DarkMatter.numeric(), DarkMatter.numeric()) :: boolean()
defdelegate decimal_equal?(x, y), to: Comparison
@doc """
Rounds an `x` of type `t:DarkMatter.numeric/0` based on the `opts`.
Returns `round_up * ((x + (round_up/2)) / round_up)`
## Examples
iex> decimal_round_ok(25.11, round_up: 50)
{:ok, %Decimal{coef: 5, exp: 1}}
iex> decimal_round_ok(50, round_up: 50)
{:ok, %Decimal{coef: 5, exp: 1}}
iex> decimal_round_ok(0, round_up: 50)
{:ok, %Decimal{coef: 0, exp: 0}}
"""
@spec decimal_round_ok(any(), Conversion.round_options()) :: {:ok, Decimal.t()} | :error
defdelegate decimal_round_ok(x, opts), to: Conversion
@doc """
Rounds whether an `x` of type `t:DarkMatter.numeric/0` is already rounded according to `opts`
## Examples
iex> rounded?(25.11, round_up: 50)
false
iex> rounded?(50, round_up: 50)
true
iex> rounded?(0, round_up: 50)
true
"""
@spec rounded?(any(), Conversion.round_options()) :: boolean()
defdelegate rounded?(x, opts), to: Conversion
@doc """
Rounds an `x` of type `t:DarkMatter.numeric/0` into a `t:integer/0`.
## Examples
iex> to_number(0.11)
0.11
iex> to_number(%Decimal{coef: 124_225, exp: -3})
124.225
iex> to_number("$0")
0
iex> to_number(nil)
nil
iex> to_number("xyz")
nil
"""
@spec to_number(DarkMatter.maybe_numeric()) :: DarkMatter.maybe_number()
defdelegate to_number(x), to: Conversion
@doc """
Rounds an `x` of type `nil` or `t:DarkMatter.numeric/0` into a `t:String.t/0`.
## Examples
iex> to_string(%Decimal{coef: 12, exp: -10})
"0.0000000012"
iex> to_string(%Decimal{coef: 124_225, exp: -3})
"124.225"
iex> to_string("$0")
"0"
iex> to_string(nil)
nil
iex> to_string("xyz")
** (Decimal.Error): number parsing syntax: "xyz"
"""
@spec to_string(Conversion.stringable()) :: String.t() | nil
defdelegate to_string(x), to: Conversion
@spec to_string(Conversion.stringable(), Conversion.to_string_formatter()) :: String.t() | nil
defdelegate to_string(x, mode), to: Conversion
@doc """
Determines the max variance percent of a `list` of type `t:DarkMatter.numeric/0`.
Defaults to `100` if given an empty `list`.
## Examples
iex> max_variance_percent([8, 9, "10.5", 13.3, "$1.23", %Decimal{coef: 33}])
%Decimal{coef: 2638944422231107556977209116, exp: -25}
iex> max_variance_percent([])
%Decimal{coef: 1, exp: 2}
iex> max_variance_percent([], {0, 100})
%Decimal{coef: 0, exp: 0}
iex> max_variance_percent([1], {0, 100})
%Decimal{coef: 1, exp: 2}
"""
@spec max_variance_percent([DarkMatter.numeric()]) :: Decimal.t()
defdelegate max_variance_percent(list), to: Variance
@spec max_variance_percent([DarkMatter.numeric()], Variance.minmax()) :: Decimal.t()
defdelegate max_variance_percent(list, default), to: Variance
@doc """
Determines the variance percent of a `list` of type `t:DarkMatter.numeric/0`.
Defaults to `0` if given an empty `list` or `100` if given a single item `list`.
## Examples
iex> variance_percent([8, 9, "10.5", 13.3, "$1.23", %Decimal{coef: 33}])
%Decimal{coef: 2831837255702387281334649757, exp: -25}
iex> variance_percent([])
%Decimal{coef: 0, exp: 0}
iex> variance_percent([1_000])
%Decimal{coef: 1, exp: 2}
iex> variance_percent([], {0, 100})
%Decimal{coef: 0, exp: 0}
iex> variance_percent([1], {0, 100})
%Decimal{coef: 1, exp: 2}
"""
@spec variance_percent([DarkMatter.numeric()]) :: Decimal.t()
defdelegate variance_percent(list), to: Variance
@spec variance_percent([DarkMatter.numeric()], Variance.minmax()) :: Decimal.t()
defdelegate variance_percent(list, default), to: Variance
@doc """
Determines the variance of a `list` of type `t:DarkMatter.numeric/0`.
Defaults to `0` if given an empty or single item `list`.
## Examples
iex> variance([8, 9, "10.5", 13.3, "$1.23", %Decimal{coef: 33}])
%Decimal{coef: 11688055, exp: -5}
iex> variance([])
%Decimal{coef: 0, exp: 0}
iex> variance([1_000])
%Decimal{coef: 0, exp: 0}
"""
@spec variance([DarkMatter.numeric()]) :: Decimal.t()
defdelegate variance(list), to: Variance
@doc """
Annualize a monthly amount
## Examples
iex> annualize(1)
%Decimal{coef: 12, exp: 0}
iex> annualize("$145.23")
%Decimal{coef: 174276, exp: -2}
iex> annualize(nil, 1)
%Decimal{coef: 1, exp: 0}
"""
@spec annualize(DarkMatter.numeric(), DarkMatter.strict_numeric()) :: Decimal.t()
def annualize(x, default \\ 0)
def annualize(nil, default), do: cast_decimal(default)
def annualize(x, _default), do: decimal_mult(x, 12)
end
|
lib/dark_matter/decimals.ex
| 0.952574 | 0.468 |
decimals.ex
|
starcoder
|
defmodule FusionAuth.JWT do
@moduledoc """
The `FusionAuth.JWT` module provides access functions to the [FusionAuth JWT API](https://fusionauth.io/docs/v1/tech/apis/jwt).
Most functions require a Tesla Client struct created with `FusionAuth.client(base_url, api_key, tenant_id)`.
Those that use JWT Authentication may require a different `api_key` structure.
See [JWT Authentication](https://fusionauth.io/docs/v1/tech/apis/authentication#jwt-authentication) for examples of how you can send the JWT to FusionAuth.
"""
alias FusionAuth.Utils
@type client :: FusionAuth.client()
@type result :: FusionAuth.result()
@jwt_issue_url "/api/jwt/issue"
@jwt_reconcile_url "/api/jwt/reconcile"
@jwt_public_key_url "/api/jwt/public-key"
@jwt_refresh_url "/api/jwt/refresh"
@jwt_validate_url "/api/jwt/validate"
@doc """
Issue an Access Token by Application ID
This API is used to issue a new access token (JWT) using an existing access token (JWT).
This API provides the single signon mechanism for access tokens. For example you have an access token for application A and you need an access token for application B.
You may use this API to request an access token to application B with the authorized token to application A. The returned access token will have the same expiration of the one provided.
This API will use a JWT as authentication. See [JWT Authentication](https://fusionauth.io/docs/v1/tech/apis/authentication#jwt-authentication) for examples of how you can send the JWT to FusionAuth.
## Examples
iex> FusionAuth.JWT.issue_jwt_by_application_id(client, token, application_id, refresh_token)
{
:ok,
%{
"token" => "<KEY>"
},
%Tesla.Env{...}
}
iex>
For more information, visit the FusionAuth API Documentation for [Issue a JWT](https://fusionauth.io/docs/v1/tech/apis/jwt#issue-a-jwt).
"""
@spec issue_jwt_by_application_id(client(), String.t(), String.t(), String.t()) :: result()
def issue_jwt_by_application_id(client, token, application_id, refresh_token) do
client = jwt_client(client, "Bearer #{token}")
parameters = [
applicationId: application_id,
refreshToken: refresh_token
]
Tesla.get(
client,
@jwt_issue_url <> Utils.build_query_parameters(parameters)
)
|> FusionAuth.result()
end
@doc """
Reconcile a JWT
The Reconcile API is used to take a JWT issued by a third party identity provider as described by an [Identity Provider](https://fusionauth.io/docs/v1/tech/apis/identity-providers/) configuration and reconcile the User represented by the JWT to FusionAuth.
For more information, visit the FusionAuth API Documentation for [Reconcile a JWT](https://fusionauth.io/docs/v1/tech/apis/jwt#reconcile-a-jwt).
"""
@spec reconcile_jwt(client(), String.t(), map(), String.t()) :: result()
def reconcile_jwt(client, application_id, data, identity_provider_id) do
post_data = %{
applicationId: application_id,
data: data,
identityProviderId: identity_provider_id
}
Tesla.post(client, @jwt_reconcile_url, post_data)
|> FusionAuth.result()
end
@doc """
Retrieve all Public Keys
This API is used to retrieve Public Keys generated by FusionAuth, used used to cryptographically verify JWT signatures signed using the corresponding RSA or ECDSA private key.
For more information, visit the FusionAuth API Documentation for [Retrieve Public Keys](https://fusionauth.io/docs/v1/tech/apis/jwt#retrieve-public-keys).
"""
@spec get_public_keys(client()) :: result()
def get_public_keys(client) do
Tesla.get(client, @jwt_public_key_url)
|> FusionAuth.result()
end
@doc """
Retrieve a single Public Key for a specific Application by Application Id
For more information, visit the FusionAuth API Documentation for [Retrieve Public Keys](https://fusionauth.io/docs/v1/tech/apis/jwt#retrieve-public-keys).
"""
@spec get_public_key_by_application_id(client(), String.t()) :: result()
def get_public_key_by_application_id(client, application_id) do
parameters = [applicationId: application_id]
Tesla.get(client, @jwt_public_key_url <> Utils.build_query_parameters(parameters))
|> FusionAuth.result()
end
@doc """
Retrieve a single Public Key by Key Identifier
For more information, visit the FusionAuth API Documentation for [Retrieve Public Keys](https://fusionauth.io/docs/v1/tech/apis/jwt#retrieve-public-keys).
"""
@spec get_public_key_by_key_id(client(), String.t()) :: result()
def get_public_key_by_key_id(client, public_key_id) do
parameters = [kid: public_key_id]
Tesla.get(client, @jwt_public_key_url <> Utils.build_query_parameters(parameters))
|> FusionAuth.result()
end
@doc """
Request a new Access Token by presenting a valid Refresh Token
The refresh token may be provided either in the HTTP request body or as a cookie. If both are provided, the cookie will take precedence.
## Examples
iex> FusionAuth.JWT.refresh_jwt(client, refresh_token, token)
{
:ok,
%{
"token" => "<KEY>"
},
%Tesla.Env{...}
}
For more information, visit the FusionAuth API Documentation for [Refresh a JWT](https://fusionauth.io/docs/v1/tech/apis/jwt#refresh-a-jwt).
"""
@spec refresh_jwt(client(), String.t(), String.t()) :: result()
def refresh_jwt(client, refresh_token, token) do
post_data = %{
refreshToken: refresh_token,
token: token
}
Tesla.post(client, @jwt_refresh_url, post_data)
|> FusionAuth.result()
end
[]
@doc """
Retrieve Refresh Tokens issued to a User by User ID
## Examples
iex> FusionAuth.JWT.get_user_refresh_tokens_by_user_id(client, user_id)
{
:ok,
%{
"refreshTokens" => [...]
},
%Tesla.Env{...}
}
For more information, visit the FusionAuth API Documentation for [Retrieve Refresh Tokens](https://fusionauth.io/docs/v1/tech/apis/jwt#retrieve-refresh-tokens).
"""
@spec get_user_refresh_tokens_by_user_id(client(), String.t()) :: result()
def get_user_refresh_tokens_by_user_id(client, user_id) do
parameters = [userId: user_id]
Tesla.get(client, @jwt_refresh_url <> Utils.build_query_parameters(parameters))
|> FusionAuth.result()
end
@doc """
Retrieve Refresh Tokens issued to a User
This API will use a JWT as authentication. See [JWT Authentication](https://fusionauth.io/docs/v1/tech/apis/authentication#jwt-authentication) for examples of how you can send the JWT to FusionAuth.
## Examples
iex> FusionAuth.JWT.get_user_refresh_tokens(client, token)
{
:ok,
%{
"refreshTokens" => [...]
},
%Tesla.Env{...}
}
For more information, visit the FusionAuth API Documentation for [Retrieve Refresh Tokens](https://fusionauth.io/docs/v1/tech/apis/jwt#retrieve-refresh-tokens).
"""
@spec get_user_refresh_tokens(client(), String.t()) :: result()
def get_user_refresh_tokens(client, token) do
client = jwt_client(client, "Bearer #{token}")
Tesla.get(
client,
@jwt_refresh_url
)
|> FusionAuth.result()
end
@doc """
Revoke all Refresh Tokens for an entire Application
## Examples
iex> JWT.revoke_refresh_tokens_by_application_id(client, application_id)
{
:ok,
"",
%Tesla.Env{...}
}
For more information, visit the FusionAuth API Documentation for [Revoke Refresh Tokens](https://fusionauth.io/docs/v1/tech/apis/jwt#revoke-refresh-tokens).
"""
@spec revoke_refresh_tokens_by_application_id(client(), String.t()) :: result()
def revoke_refresh_tokens_by_application_id(client, application_id) do
parameters = [applicationId: application_id]
Tesla.delete(client, @jwt_refresh_url <> Utils.build_query_parameters(parameters))
|> FusionAuth.result()
end
@doc """
Revoke all Refresh Tokens issued to a User
## Examples
iex> FusionAuth.JWT.revoke_refresh_token(client, user_id)
{
:ok,
"",
%Tesla.Env{...}
}
For more information, visit the FusionAuth API Documentation for [Revoke Refresh Tokens](https://fusionauth.io/docs/v1/tech/apis/jwt#revoke-refresh-tokens).
"""
@spec revoke_refresh_tokens_by_user_id(client(), String.t()) :: result()
def revoke_refresh_tokens_by_user_id(client, user_id) do
parameters = [userId: user_id]
Tesla.delete(client, @jwt_refresh_url <> Utils.build_query_parameters(parameters))
|> FusionAuth.result()
end
@doc """
Revoke a single Refresh Token
This API may be authenticated using an Access Token. See Authentication for examples of authenticating using an Access Token. The token owner must match the identity in the access token if provided to be successful.
## Examples
iex> FusionAuth.JWT.revoke_refresh_token(client, token)
{
:ok,
"",
%Tesla.Env{...}
}
For more information, visit the FusionAuth API Documentation for [Revoke Refresh Tokens](https://fusionauth.io/docs/v1/tech/apis/jwt#revoke-refresh-tokens).
"""
@spec revoke_refresh_token(client(), String.t()) :: result()
def revoke_refresh_token(client, token) do
parameters = [token: token]
Tesla.delete(client, @jwt_refresh_url <> Utils.build_query_parameters(parameters))
|> FusionAuth.result()
end
@doc """
Validate Access Token
The access token can be provided to the API using an HTTP request header, or a cookie. The response body will contain the decoded JWT payload.
## Examples
iex> FusionAuth.JWT.validate_jwt(client, token)
{
:ok,
%{
"jwt" => %{
"authenticationType" => "PASSWORD",
"email" => "<EMAIL>",
"email_verified" => true,
"exp" => 1591815558,
"iat" => 1591811958,
"iss" => "acme.com",
"sub" => "fffc8648-bab2-4bdd-b2eb-a48e853d9217"
}
},
%Tesla.Env{...}
}
For more information, visit the FusionAuth API Documentation for [Validate a JWT](https://fusionauth.io/docs/v1/tech/apis/jwt#validate-a-jwt).
"""
@spec validate_jwt(client(), String.t()) :: result()
def validate_jwt(client, token) do
client = jwt_client(client, "JWT #{token}")
Tesla.get(
client,
@jwt_validate_url
)
|> FusionAuth.result()
end
defp jwt_client(client, authorization) do
tenant_id = Application.get_env(:fusion_auth, :tenant_id)
config = Map.get(client, :pre)
headers =
{Tesla.Middleware.Headers, :call,
[
[
{"X-FusionAuth-TenantId", tenant_id},
{"Authorization", authorization}
]
]}
{_, config} = List.pop_at(config, -1)
Map.put(client, :pre, [headers | config])
end
end
|
lib/fusion_auth/jwt.ex
| 0.881831 | 0.496277 |
jwt.ex
|
starcoder
|
defmodule Harald.HCI.SynchronousData do
@moduledoc """
Reference: version 5.2, vol 4, part E, 5.4.3.
"""
@enforce_keys [
:connection_handle,
:packet_status_flag,
:rfu,
:data_total_length,
:data
]
defstruct [
:connection_handle,
:packet_status_flag,
:rfu,
:data_total_length,
:data
]
def decode(<<
connection_handle::bits-size(12),
encoded_packet_status_flag::size(2),
rfu::size(2),
data_total_length,
data::binary-size(data_total_length)
>>) do
decoded = %__MODULE__{
connection_handle: connection_handle,
packet_status_flag: decode_packet_status_flag!(encoded_packet_status_flag),
rfu: rfu,
data_total_length: data_total_length,
data: data
}
{:ok, decoded}
end
def encode(%__MODULE__{
connection_handle: connection_handle,
packet_status_flag: decoded_packet_status_flag,
rfu: rfu,
data_total_length: data_total_length,
data: data
}) do
encoded_packet_status_flag = encode_packet_status_flag!(decoded_packet_status_flag)
encoded = <<
connection_handle::bits-size(12),
encoded_packet_status_flag::size(2),
rfu::size(2),
data_total_length,
data::binary
>>
{:ok, encoded}
end
def new(connection_handle, packet_status_flag, rfu, data) do
synchronous_data = %__MODULE__{
connection_handle: connection_handle,
packet_status_flag: packet_status_flag,
rfu: rfu,
data_total_length: byte_size(data),
data: data
}
{:ok, synchronous_data}
end
defp decode_packet_status_flag!(0b00 = bc_flag) do
%{
description:
"Correctly received data. The payload data belongs to received eSCO or SCO packets that the baseband marked as \"good data\".",
value: bc_flag
}
end
defp decode_packet_status_flag!(0b01 = bc_flag) do
%{
description:
"Possibly invalid data. At least one eSCO packet has been marked by the baseband as \"data with possible errors\" and all others have been marked as \"good data\" in the eSCO interval(s) corresponding to the HCI Synchronous Data packet.",
value: bc_flag
}
end
defp decode_packet_status_flag!(0b10 = bc_flag) do
%{
description:
"No data received. All data from the baseband received during the (e)SCO interval(s) corresponding to the HCI Synchronous Data packet have been marked as \"lost data\" by the baseband. The Payload data octets shall be set to 0.",
value: bc_flag
}
end
defp decode_packet_status_flag!(0b11 = bc_flag) do
%{
description:
"Data partially lost. Not all, but at least one (e)SCO packet has been marked as \"lost data\" by the baseband in the (e)SCO intervals corresponding to the HCI Synchronous Data packet. The payload data octets corresponding to the missing (e)SCO packets shall be set to 0.",
value: bc_flag
}
end
defp encode_packet_status_flag!(%{value: encoded_bc_flag})
when encoded_bc_flag in [0b00, 0b01, 0b10, 0b11] do
encoded_bc_flag
end
end
|
lib/harald/hci/synchronous_data.ex
| 0.668664 | 0.426172 |
synchronous_data.ex
|
starcoder
|
defmodule Juice do
@moduledoc """
Reduce in memory data structures using a lightweight query language
"""
alias Juice.Expression
def squeeze(source, query) when is_bitstring(query) do
expression = Expression.parse(query)
squeeze(source, expression)
end
def squeeze(source, expression) when is_list(expression) do
expression
|> Enum.reduce(
empty_acc(source),
&eval(source, &1, &2)
)
end
defp eval(source, {:+, ["*"]}, _), do: source
defp eval(source, {:-, ["*"]}, _), do: empty_acc(source)
defp eval(source, {:+, key_chain}, acc) do
collect(key_chain, source, acc)
end
defp eval(_, {:-, key_chain}, acc) do
reject(key_chain, acc)
end
defp collect([key | []], source, acc) when is_list(source) and is_list(acc) do
cond do
Enum.member?(source, key) ->
collect_intersection(key, source, acc)
Enum.member?(source, key |> String.to_atom()) ->
collect_intersection(key |> String.to_atom(), source, acc)
true ->
acc
end
end
defp collect([key | []], source, acc) when is_map(source) and is_map(acc) do
key
|> match(source)
|> case do
{:ok, {matched_key, matched_value}} ->
Map.put(acc, matched_key, matched_value)
{:error, :not_found} ->
acc
end
end
defp collect([key | tail], source, acc) when is_map(source) and is_map(acc) do
key
|> match(source)
|> case do
{:ok, {matched_key, matched_value}} ->
default_acc = empty_acc(matched_value)
sub_acc = Map.get(acc, matched_key, default_acc)
collected = collect(tail, matched_value, sub_acc)
Map.put(acc, matched_key, collected)
{:error, :not_found} ->
acc
end
end
defp collect_intersection(key, source, acc) do
source_set = MapSet.new(source)
acc_set = MapSet.new([key | acc])
source_set
|> MapSet.intersection(acc_set)
|> MapSet.to_list()
end
defp match(key, source) do
atom_key = String.to_atom(key)
cond do
Map.has_key?(source, atom_key) ->
{:ok, {atom_key, Map.get(source, atom_key)}}
Map.has_key?(source, key) ->
{:ok, {key, Map.get(source, key)}}
true ->
{:error, :not_found}
end
end
defp reject([key | []], acc) when is_list(acc) do
acc
|> List.delete(key)
|> List.delete(key |> String.to_atom())
end
defp reject([key | []], acc) when is_map(acc) do
acc
|> Map.delete(key)
|> Map.delete(key |> String.to_atom())
end
defp reject([key | tail], acc) when is_map(acc) do
key
|> match(acc)
|> case do
{:ok, {matched_key, matched_value}} ->
rejected = reject(tail, matched_value)
Map.put(acc, matched_key, rejected)
{:error, :not_found} ->
acc
end
end
defp empty_acc(source) when is_map(source), do: %{}
defp empty_acc(source) when is_list(source), do: []
end
|
lib/juice.ex
| 0.794783 | 0.589155 |
juice.ex
|
starcoder
|
defprotocol Brook.Serializer.Protocol do
@moduledoc """
The protocol for standard serialization of Elixir structs to
an in-transit encoding format before sending on the Brook event stream.
Brook drivers are expected to implement a default serializer for
converting to the given encoding, leaving the client the option to
implement a custom serializer for specific struct types.
"""
@fallback_to_any true
@doc """
Convert the supplied Elixir term to an encoded term.
"""
@spec serialize(term()) :: {:ok, term()} | {:error, term()}
def serialize(data)
end
defimpl Brook.Serializer.Protocol, for: Any do
@moduledoc """
Provide a default implementation for the `Brook.Event.Serializer`
protocol that will encode the supplied term to json.
"""
require Logger
import Brook.Serializer.Util
def serialize(%struct{} = data) do
data
|> Map.from_struct()
|> safe_transform(fn {key, value} ->
Brook.Serializer.Protocol.serialize(value)
|> safe_map(fn new_value -> {key, new_value} end)
end)
|> safe_map(&Map.new/1)
|> safe_map(&Map.put(&1, Brook.Serializer.struct_key(), struct))
end
def serialize(data) do
ok(data)
end
defp ok(value), do: {:ok, value}
end
defimpl Brook.Serializer.Protocol, for: List do
import Brook.Serializer.Util
def serialize(list) do
if Keyword.keyword?(list) do
{:ok, safe_list} =
list
|> Enum.map(fn {key, val} -> [key, val] end)
|> safe_transform(&Brook.Serializer.Protocol.serialize/1)
{:ok, %{"keyword" => true, "list" => safe_list}}
else
safe_transform(list, &Brook.Serializer.Protocol.serialize/1)
end
end
end
defimpl Brook.Serializer.Protocol, for: Map do
import Brook.Serializer.Util
def serialize(data) do
data
|> safe_transform(fn {key, value} ->
Brook.Serializer.Protocol.serialize(value)
|> safe_map(fn new_value -> {:ok, {key, new_value}} end)
end)
|> safe_map(&Map.new/1)
end
end
defimpl Brook.Serializer.Protocol, for: MapSet do
def serialize(map_set) do
{:ok,
%{
Brook.Serializer.struct_key() => MapSet,
"values" => MapSet.to_list(map_set)
}}
end
end
defimpl Brook.Serializer.Protocol, for: DateTime do
def serialize(date_time) do
{:ok,
%{
Brook.Serializer.struct_key() => DateTime,
"value" => DateTime.to_iso8601(date_time)
}}
end
end
defimpl Brook.Serializer.Protocol, for: NaiveDateTime do
def serialize(naive_date_time) do
{:ok,
%{
Brook.Serializer.struct_key() => NaiveDateTime,
"value" => NaiveDateTime.to_iso8601(naive_date_time)
}}
end
end
defimpl Brook.Serializer.Protocol, for: Date do
def serialize(date) do
{:ok,
%{
Brook.Serializer.struct_key() => Date,
"value" => Date.to_iso8601(date)
}}
end
end
defimpl Brook.Serializer.Protocol, for: Time do
def serialize(time) do
{:ok,
%{
Brook.Serializer.struct_key() => Time,
"value" => Time.to_iso8601(time)
}}
end
end
defmodule Brook.Serializer do
def struct_key(), do: "__brook_struct__"
@spec serialize(term()) :: {:ok, term()} | {:error, term()}
def serialize(data) do
case Brook.Serializer.Protocol.serialize(data) do
{:ok, serialized_data} -> Jason.encode(serialized_data)
error_result -> error_result
end
end
end
|
lib/brook/serializer.ex
| 0.82151 | 0.430536 |
serializer.ex
|
starcoder
|
defmodule WordSearch.State do
def new(alpahbet, words, size, directions) do
%{
alpahbet: alpahbet,
words: words,
placed_words: [],
grid: %{}, # 1d "array"
size: size, # size of one side
available_positions: generate_positions(size),
directions: WordSearch.Directions.convert(directions)
}
end
def to_list(state = %{size: size}) do
new_grid = build_list(0, size * size, state[:grid], [])
state
|> Map.put(:grid, Enum.chunk_every(new_grid, size))
end
# Work from 0 to max_size - 1. When the num is equal to max_size, we are at the end
defp build_list(max_size, max_size, _grid, list), do: list
defp build_list(num, max_size, grid, list) do
case Map.fetch(grid, num) do
{:ok, val} -> build_list(num + 1, max_size, grid, list ++ [val])
:error -> build_list(num + 1, max_size, grid, list)
end
end
def display_grid(state = %{grid: grid, size: size}) do
grid_size = size * size
Enum.map(Enum.to_list(0..(grid_size - 1)), fn num ->
if rem(num, size) == 0 do
IO.puts ""
end
case Map.fetch(grid, num) do
{:ok, letter} -> IO.write "#{letter} "
:error -> :ok
end
end)
state
end
def spot_available?(%{size: size}, x, _y, _letter) when x >= size, do: false
def spot_available?(%{size: size}, _x, y, _letter) when y >= size, do: false
def spot_available?(state, x, y, letter) do
case Map.fetch(state[:grid], x + (y * state[:size])) do
{:ok, val} ->
if val == letter do
true
else
false
end
:error -> true
end
end
def set_letter(state, x, y, letter) do
put_in(state, [:grid, x + (y * state[:size])], List.to_string([letter]))
end
# Generates a list of possible positions to place
defp generate_positions(side_size) do
grid_size = side_size * side_size
Enum.map(Enum.to_list(0..(grid_size - 1)), fn num ->
{rem(num, side_size), div(num, side_size)}
end)
end
end
|
lib/word_search/state.ex
| 0.506836 | 0.544801 |
state.ex
|
starcoder
|
defmodule Logger.Utils do
@moduledoc false
@doc """
Truncates a char data into n bytes.
There is a chance we truncate in the middle of a grapheme
cluster but we never truncate in the middle of a binary
codepoint. For this reason, truncation is not exact.
"""
@spec truncate(IO.chardata, non_neg_integer) :: IO.chardata
def truncate(chardata, n) when n >= 0 do
{chardata, n} = truncate_n(chardata, n)
if n >= 0, do: chardata, else: [chardata, " (truncated)"]
end
defp truncate_n(_, n) when n < 0 do
{"", n}
end
defp truncate_n(binary, n) when is_binary(binary) do
remaining = n - byte_size(binary)
if remaining < 0 do
# There is a chance we are cutting at the wrong
# place so we need to fix the binary.
{fix_binary(binary_part(binary, 0, n)), remaining}
else
{binary, remaining}
end
end
defp truncate_n(int, n) when int in 0..127, do: {int, n-1}
defp truncate_n(int, n) when int in 127..0x07FF, do: {int, n-2}
defp truncate_n(int, n) when int in 0x800..0xFFFF, do: {int, n-3}
defp truncate_n(int, n) when int >= 0x10000 and is_integer(int), do: {int, n-4}
defp truncate_n(list, n) when is_list(list) do
truncate_n_list(list, n, [])
end
defp truncate_n_list(_, n, acc) when n < 0 do
{:lists.reverse(acc), n}
end
defp truncate_n_list([h|t], n, acc) do
{h, n} = truncate_n(h, n)
truncate_n_list(t, n, [h|acc])
end
defp truncate_n_list([], n, acc) do
{:lists.reverse(acc), n}
end
defp truncate_n_list(t, n, acc) do
{t, n} = truncate_n(t, n)
{:lists.reverse(acc, t), n}
end
defp fix_binary(binary) do
# Use a thirteen-bytes offset to look back in the binary.
# This should allow at least two codepoints of 6 bytes.
suffix_size = min(byte_size(binary), 13)
prefix_size = byte_size(binary) - suffix_size
<<prefix :: binary-size(prefix_size), suffix :: binary-size(suffix_size)>> = binary
prefix <> fix_binary(suffix, "")
end
defp fix_binary(<<h::utf8, t::binary>>, acc) do
acc <> <<h::utf8>> <> fix_binary(t, "")
end
defp fix_binary(<<h, t::binary>>, acc) do
fix_binary(t, <<h, acc::binary>>)
end
defp fix_binary(<<>>, _acc) do
<<>>
end
@doc """
Receives a format string and arguments and replace `~p`,
`~P`, `~w` and `~W` by its inspected variants.
"""
def inspect(format, args, truncate, opts \\ %Inspect.Opts{})
def inspect(format, args, truncate, opts) when is_atom(format) do
do_inspect(Atom.to_char_list(format), args, truncate, opts)
end
def inspect(format, args, truncate, opts) when is_binary(format) do
do_inspect(:binary.bin_to_list(format), args, truncate, opts)
end
def inspect(format, args, truncate, opts) when is_list(format) do
do_inspect(format, args, truncate, opts)
end
defp do_inspect(format, [], _truncate, _opts), do: {format, []}
defp do_inspect(format, args, truncate, opts) do
# A pre-pass that removes binaries from
# arguments according to the truncate limit.
{args, _} = Enum.map_reduce(args, truncate, fn arg, acc ->
if is_binary(arg) do
truncate_n(arg, acc)
else
{arg, acc}
end
end)
do_inspect(format, args, [], [], opts)
end
defp do_inspect([?~|t], args, used_format, used_args, opts) do
{t, args, cc_format, cc_args} = collect_cc(:width, t, args, [?~], [], opts)
do_inspect(t, args, cc_format ++ used_format, cc_args ++ used_args, opts)
end
defp do_inspect([h|t], args, used_format, used_args, opts),
do: do_inspect(t, args, [h|used_format], used_args, opts)
defp do_inspect([], [], used_format, used_args, _opts),
do: {:lists.reverse(used_format), :lists.reverse(used_args)}
## width
defp collect_cc(:width, [?-|t], args, used_format, used_args, opts),
do: collect_value(:width, t, args, [?-|used_format], used_args, opts, :precision)
defp collect_cc(:width, t, args, used_format, used_args, opts),
do: collect_value(:width, t, args, used_format, used_args, opts, :precision)
## precision
defp collect_cc(:precision, [?.|t], args, used_format, used_args, opts),
do: collect_value(:precision, t, args, [?.|used_format], used_args, opts, :pad_char)
defp collect_cc(:precision, t, args, used_format, used_args, opts),
do: collect_cc(:pad_char, t, args, used_format, used_args, opts)
## pad char
defp collect_cc(:pad_char, [?.,?*|t], [arg|args], used_format, used_args, opts),
do: collect_cc(:encoding, t, args, [?*,?.|used_format], [arg|used_args], opts)
defp collect_cc(:pad_char, [?.,p|t], args, used_format, used_args, opts),
do: collect_cc(:encoding, t, args, [p,?.|used_format], used_args, opts)
defp collect_cc(:pad_char, t, args, used_format, used_args, opts),
do: collect_cc(:encoding, t, args, used_format, used_args, opts)
## encoding
defp collect_cc(:encoding, [?l|t], args, used_format, used_args, opts),
do: collect_cc(:done, t, args, [?l|used_format], used_args, %{opts | char_lists: false})
defp collect_cc(:encoding, [?t|t], args, used_format, used_args, opts),
do: collect_cc(:done, t, args, [?t|used_format], used_args, opts)
defp collect_cc(:encoding, t, args, used_format, used_args, opts),
do: collect_cc(:done, t, args, used_format, used_args, opts)
## done
defp collect_cc(:done, [?W|t], [data, limit|args], _used_format, _used_args, opts),
do: collect_inspect(t, args, data, %{opts | limit: limit, width: :infinity})
defp collect_cc(:done, [?w|t], [data|args], _used_format, _used_args, opts),
do: collect_inspect(t, args, data, %{opts | width: :infinity})
defp collect_cc(:done, [?P|t], [data, limit|args], _used_format, _used_args, opts),
do: collect_inspect(t, args, data, %{opts | limit: limit})
defp collect_cc(:done, [?p|t], [data|args], _used_format, _used_args, opts),
do: collect_inspect(t, args, data, opts)
defp collect_cc(:done, [h|t], args, used_format, used_args, _opts) do
{args, used_args} = collect_cc(h, args, used_args)
{t, args, [h|used_format], used_args}
end
defp collect_cc(?x, [a,prefix|args], used), do: {args, [prefix, a|used]}
defp collect_cc(?X, [a,prefix|args], used), do: {args, [prefix, a|used]}
defp collect_cc(?s, [a|args], used), do: {args, [a|used]}
defp collect_cc(?e, [a|args], used), do: {args, [a|used]}
defp collect_cc(?f, [a|args], used), do: {args, [a|used]}
defp collect_cc(?g, [a|args], used), do: {args, [a|used]}
defp collect_cc(?b, [a|args], used), do: {args, [a|used]}
defp collect_cc(?B, [a|args], used), do: {args, [a|used]}
defp collect_cc(?+, [a|args], used), do: {args, [a|used]}
defp collect_cc(?#, [a|args], used), do: {args, [a|used]}
defp collect_cc(?c, [a|args], used), do: {args, [a|used]}
defp collect_cc(?i, [a|args], used), do: {args, [a|used]}
defp collect_cc(?~, args, used), do: {args, used}
defp collect_cc(?n, args, used), do: {args, used}
defp collect_inspect(t, args, data, opts) do
data =
data
|> Inspect.Algebra.to_doc(opts)
|> Inspect.Algebra.format(opts.width)
{t, args, 'st~', [data]}
end
defp collect_value(current, [?*|t], [arg|args], used_format, used_args, opts, next)
when is_integer(arg) do
collect_cc(next, t, args, [?*|used_format], [arg|used_args],
put_value(opts, current, arg))
end
defp collect_value(current, [c|t], args, used_format, used_args, opts, next)
when is_integer(c) and c >= ?0 and c <= ?9 do
{t, c} = collect_value([c|t], [])
collect_cc(next, t, args, c ++ used_format, used_args,
put_value(opts, current, c |> :lists.reverse |> List.to_integer))
end
defp collect_value(_current, t, args, used_format, used_args, opts, next),
do: collect_cc(next, t, args, used_format, used_args, opts)
defp collect_value([c|t], buffer)
when is_integer(c) and c >= ?0 and c <= ?9,
do: collect_value(t, [c|buffer])
defp collect_value(other, buffer),
do: {other, buffer}
defp put_value(opts, key, value) do
if Map.has_key?(opts, key) do
Map.put(opts, key, value)
else
opts
end
end
@doc """
Returns a timestamp that includes miliseconds.
"""
def timestamp(utc_log?) do
{_, _, micro} = now = :os.timestamp()
{date, {hours, minutes, seconds}} =
case utc_log? do
true -> :calendar.now_to_universal_time(now)
false -> :calendar.now_to_local_time(now)
end
{date, {hours, minutes, seconds, div(micro, 1000)}}
end
@doc """
Formats time to an iodata.
"""
def format_time({hh, mi, ss, ms}) do
[pad2(hh), ?:, pad2(mi), ?:, pad2(ss), ?., pad3(ms)]
end
@doc """
Formats date to an iodata.
"""
def format_date({yy, mm, dd}) do
[Integer.to_string(yy), ?-, pad2(mm), ?-, pad2(dd)]
end
defp pad3(int) when int < 100 and int > 10, do: [?0, Integer.to_string(int)]
defp pad3(int) when int < 10, do: [?0, ?0, Integer.to_string(int)]
defp pad3(int), do: Integer.to_string(int)
defp pad2(int) when int < 10, do: [?0, Integer.to_string(int)]
defp pad2(int), do: Integer.to_string(int)
end
|
lib/logger/lib/logger/utils.ex
| 0.774541 | 0.539469 |
utils.ex
|
starcoder
|
defmodule Shipstation.RequestLimit do
@moduledoc ~s"""
This module is designed to record and handle the [backpressure
obligations](https://www.shipstation.com/developer-api/#/introduction/shipstation-api-requirements/api-rate-limits)
the API client has against the API.
When a request is made, the response headers contain information about how
many requests are allowed to be done within a given timeframe. This
information is then stored in our Agent and used to determine if we should
wait or not. Should the API Client go over the limit, the client will backoff,
blocking the request until the elapsed time has been reached.
"""
use Timex
require Logger
@default_duration 40
@default_requests_allowed 40
def start_link do
Logger.info "Booting up RequestLimit Agent"
Agent.start_link(fn ->
{@default_requests_allowed, @default_requests_allowed, seconds_from_now(@default_duration)}
end, name: __MODULE__)
end
@doc ~s"""
This function allows us to anticipate whether the API will reject our request
"""
@spec should_request?() :: boolean
def should_request? do
{_limit, remaining, reset} = Agent.get(__MODULE__, & &1)
!(remaining == 0 && (Timex.now < reset))
end
@doc ~s"""
Return the current state
"""
@spec state() :: {integer, integer, %DateTime{}}
def state do
Agent.get(__MODULE__, & &1)
end
@doc ~s"""
This function lets us set the rate information we're getting back from the API
"""
@spec set_api_rate({atom, HTTPoison.Response.t}) :: any
def set_api_rate({:error, _}) do
nil
end
def set_api_rate({_, %HTTPoison.Response{headers: headers}}) do
headers = Enum.into(headers, %{})
{limit, _} = Integer.parse(Map.get(headers, "X-Rate-Limit-Limit", "40"))
{remaining, _} = Integer.parse(Map.get(headers, "X-Rate-Limit-Remaining", "40"))
{reset, _} = Integer.parse(Map.get(headers, "X-Rate-Limit-Reset", "40"))
state = {limit, remaining, seconds_from_now(reset)}
Agent.update(__MODULE__, fn _ -> state end)
end
@doc ~s"""
Wait a specified amount of time, so that the API has room to breathe
"""
@spec backoff() :: any
def backoff() do
{_limit, _remaining, reset} = Agent.get(__MODULE__, & &1)
period = calculate_backoff_period(reset)
Logger.warn("Backing off Shipstation API for #{period}ms...")
:timer.sleep(period)
end
@spec calculate_backoff_period(future_time :: %DateTime{}) :: non_neg_integer
def calculate_backoff_period(future_time) do
future_time
|> Timex.diff(Timex.now, :milliseconds)
end
@doc false
@spec seconds_from_now(integer) :: %DateTime{}
def seconds_from_now(distance) do
Timex.add(Timex.now, Timex.Duration.from_seconds(distance))
end
end
|
lib/request_limit.ex
| 0.791982 | 0.406833 |
request_limit.ex
|
starcoder
|
defmodule Queue do
@moduledoc File.read!("README.md")
defstruct front: [], rear: []
@type t :: %Queue{front: list, rear: list}
@doc "Returns a new empty queue"
@spec new :: t
def new do
%Queue{}
end
@doc "Puts the given value at the end the queue"
@spec put(t, term) :: t
def put(%Queue{front: [], rear: rear = [_]}, item) do
%Queue{front: rear, rear: [item]}
end
def put(%Queue{rear: rear} = queue, item) do
%Queue{queue|rear: [item | rear]}
end
@doc """
Puts the given value at the front of the queue
This means that it will be the first item in the queue to pop, peek, or drop.
"""
@spec put_front(t, term) :: t
def put_front(%Queue{front: front = [_], rear: []}, item) do
%Queue{front: [item], rear: front}
end
def put_front(%Queue{front: front} = queue, item) do
%Queue{queue|front: [item | front]}
end
@doc """
Pop the first value from the front of the queue
Returns the value as well the rest of the queue or `:empty` if the queue has
no items.
"""
@spec pop(t) :: { term, t } | :empty
def pop(%Queue{front: [], rear: []}) do
:empty
end
def pop(%Queue{front: [], rear: [item]}) do
{ item, %Queue{front: [], rear: []} }
end
def pop(%Queue{front: [], rear: [last | rest]}) do
[item | front] = :lists.reverse(rest, [])
{ item, %Queue{front: front, rear: [last]} }
end
def pop(%Queue{front: [item], rear: rear}) do
{ item, r2f(rear) }
end
def pop(%Queue{front: [item | rest]} = queue) do
{ item, %Queue{queue|front: rest} }
end
@doc """
Pop the last value from the rear of the queue
Returns the value as well the rest of the queue or `:empty` if the queue has
no items.
"""
@spec pop_rear(t) :: { term, t } | :empty
def pop_rear(%Queue{front: [], rear: []}) do
:empty
end
def pop_rear(%Queue{front: [item], rear: []}) do
{ item, %Queue{front: [], rear: []} }
end
def pop_rear(%Queue{front: [first | rest], rear: []}) do
[item | rear] = :lists.reverse(rest, [])
{ item, %Queue{front: [first], rear: rear} }
end
def pop_rear(%Queue{front: front, rear: [item]}) do
{ item, f2r(front) }
end
def pop_rear(%Queue{rear: [item | rest]} = queue) do
{ item, %Queue{queue|rear: rest} }
end
@doc """
Remove the first value from the front of the queue
Returns the rest of the queue or `:empty` if the queue has no items.
"""
@spec drop(t) :: t | :empty
def drop(%Queue{front: [], rear: []}) do
:empty
end
def drop(%Queue{front: [], rear: [_item]}) do
%Queue{front: [], rear: []}
end
def drop(%Queue{front: [], rear: [last | rest]}) do
[_item | front] = :lists.reverse(rest, [])
%Queue{front: front, rear: [last]}
end
def drop(%Queue{front: [_item], rear: rear}) do
r2f(rear)
end
def drop(%Queue{front: [_item | rest]} = queue) do
%Queue{queue|front: rest}
end
@doc """
Remove the last value from the rear of the queue
Returns the rest of the queue or `:empty` if the queue has no items.
"""
@spec drop_rear(t) :: t | :empty
def drop_rear(%Queue{front: [], rear: []}) do
:empty
end
def drop_rear(%Queue{front: [_item], rear: []}) do
%Queue{front: [], rear: []}
end
def drop_rear(%Queue{front: [first | rest], rear: []}) do
[_item | rear] = :lists.reverse(rest, [])
%Queue{front: [first], rear: rear}
end
def drop_rear(%Queue{front: front, rear: [_item]}) do
f2r(front)
end
def drop_rear(%Queue{rear: [_item | rest]} = queue) do
%Queue{queue|rear: rest}
end
@doc """
Get the first value from the front of the queue without removing it
Returns the `{:ok, value}` or `:empty` if the queue has no items.
"""
@spec peek(t) :: { :ok, term } | :empty
def peek(%Queue{front: [], rear: []}) do
:empty
end
def peek(%Queue{front: [item | _]}) do
{ :ok, item }
end
def peek(%Queue{front: [], rear: [item]}) do
{ :ok, item }
end
@doc """
Get the last value from the rear of the queue without removing it
Returns the `{:ok, value}` or `:empty` if the queue has no items.
"""
@spec peek_rear(t) :: { :ok, term } | :empty
def peek_rear(%Queue{front: [], rear: []}) do
:empty
end
def peek_rear(%Queue{rear: [item | _]}) do
{ :ok, item }
end
def peek_rear(%Queue{front: [item], rear: []}) do
{ :ok, item }
end
@doc """
Join two queues
It effectively appends the second queue to the first queue.
"""
@spec join(t, t) :: t
def join(%Queue{} = q, %Queue{front: [], rear: []}) do
q
end
def join(%Queue{front: [], rear: []}, %Queue{} = q) do
q
end
def join(%Queue{front: f1, rear: r1}, %Queue{front: f2, rear: r2}) do
%Queue{front: f1 ++ :lists.reverse(r1, f2), rear: r2}
end
@doc """
Converts a queue to a list
The front item of the queue will be the first element in the list.
"""
@spec to_list(t) :: list
def to_list(%Queue{front: front, rear: rear}) do
front ++ :lists.reverse(rear, [])
end
@doc """
Converts a list to a queue
The first element in the list will be the front item of the queue.
"""
@spec from_list(list) :: t
def from_list(items) do
f2r(items)
end
@doc "Converts a queue to Erlang's queue data type"
@spec to_erl(t) :: { list, list }
def to_erl(%Queue{front: front, rear: rear}) do
{ rear, front }
end
@doc "Converts Erlang's queue data type to a queue"
@spec from_erl({ list, list }) :: t
def from_erl({ rear, front }) when is_list(rear) and is_list(front) do
%Queue{front: front, rear: rear}
end
@doc "Returns the number of items in the queue"
@spec size(t) :: non_neg_integer
def size(%Queue{front: front, rear: rear}) do
length(front) + length(rear)
end
@doc "Returns true if the given value exists in the queue"
@spec member?(t, term) :: boolean
def member?(%Queue{front: front, rear: rear}, item) do
:lists.member(item, rear) or :lists.member(item, front)
end
# Move half of elements from rear to front, if there are at least three
defp r2f([]), do: %Queue{}
defp r2f([_] = rear), do: %Queue{front: [], rear: rear}
defp r2f([x, y]), do: %Queue{front: [y], rear: [x]}
defp r2f(list) do
{ rear, front } = :lists.split(div(length(list), 2) + 1, list)
%Queue{front: :lists.reverse(front, []), rear: rear}
end
# Move half of elements from front to rear, if there are enough
defp f2r([]), do: %Queue{};
defp f2r([_] = front), do: %Queue{front: [], rear: front}
defp f2r([x, y]), do: %Queue{front: [x], rear: [y]}
defp f2r(list) do
{ front, rear } = :lists.split(div(length(list), 2) + 1, list)
%Queue{front: front, rear: :lists.reverse(rear, [])}
end
end
defimpl Enumerable, for: Queue do
def count(queue), do: { :ok, Queue.size(queue) }
def member?(queue, x), do: { :ok, Queue.member?(queue, x) }
def reduce(%Queue{front: front, rear: rear}, acc, fun) do
rear_acc = do_reduce(front, acc, fun)
case do_reduce(:lists.reverse(rear, []), rear_acc, fun) do
{ :cont, acc } ->
{ :done, acc }
{ :halt, acc } ->
{ :halted, acc }
suspended ->
suspended
end
end
defp do_reduce([h | t], { :cont, acc }, fun) do
do_reduce(t, fun.(h, acc), fun)
end
defp do_reduce([], { :cont, acc }, _fun) do
{ :cont, acc }
end
defp do_reduce(_queue, { :halt, acc }, _fun) do
{ :halt, acc }
end
defp do_reduce(queue, { :suspend, acc }, fun) do
{ :suspended, acc, &do_reduce(queue, &1, fun) }
end
defp do_reduce(queue, { :suspended, acc, continuation }, fun) do
{ :suspended, acc, fn acc ->
rear_acc = continuation.(acc)
do_reduce(queue, rear_acc, fun)
end }
end
end
defimpl Collectable, for: Queue do
def into(original) do
{ original, fn
queue, { :cont, item } -> Queue.put(queue, item)
queue, :done -> queue
_, :halt -> :ok
end }
end
end
defimpl Inspect, for: Queue do
import Inspect.Algebra
def inspect(%Queue{} = queue, opts) do
concat ["#Queue<", to_doc(Queue.to_list(queue), opts), ">"]
end
end
|
lib/queue.ex
| 0.876549 | 0.525004 |
queue.ex
|
starcoder
|
defmodule Ueberauth.Strategy.FreeAgent do
@moduledoc """
FreeAgent OAuth2 strategy for Überauth.
## Configuration
Add `freeagent` to your Überauth configuration:
```elixir
config :ueberauth, Ueberauth,
providers: [
freeagent: {Ueberauth.Strategy.FreeAgent, []}
]
```
Update your provider configuration, setting your `client_id` and `client_secret`:
```elixir
config :ueberauth, Ueberauth.Strategy.FreeAgent.OAuth,
client_id: System.get_env("FREEAGENT_CLIENT_ID"),
client_secret: System.get_env("FREEAGENT_CLIENT_SECRET")
```
**IMPORTANT**: To use the FreeAgent sandbox API, set `sandbox` to `true` for the `:ueberauth_freeagent` application:
```elixir
config :ueberauth_freeagent,
sandbox: true
```
This will automatically configure the correct URLs.
## OAuth2 Flow
For information on how to configure Phoenix to use this strategy, see the [README](./extra-readme.html)
"""
use Ueberauth.Strategy, uid_field: :email,
oauth2_module: Ueberauth.Strategy.FreeAgent.OAuth
alias Ueberauth.Auth.Info
alias Ueberauth.Auth.Credentials
alias Ueberauth.Auth.Extra
@doc """
Handles the initial redirect to the FreeAgent authentication page.
You can include a `state` param that FreeAgent will return to you.
"""
def handle_request!(conn) do
opts = [redirect_uri: callback_url(conn), response_type: "code"]
opts =
if conn.params["state"], do: Keyword.put(opts, :state, conn.params["state"]), else: opts
module = option(conn, :oauth2_module)
redirect!(conn, apply(module, :authorize_url!, [opts]))
end
@doc """
Handles the callback from FreeAgent. When there is a failure from FreeAgent the failure is included in the
`ueberauth_failure` struct. Otherwise the information returned from FreeAgent is returned in the `Ueberauth.Auth` struct.
"""
def handle_callback!(%Plug.Conn{params: %{"code" => code}} = conn) do
module = option(conn, :oauth2_module)
token = apply(module, :get_token!, [[code: code, redirect_uri: callback_url(conn)]])
if token.access_token == nil do
set_errors!(conn, [error(token.other_params["error"], token.other_params["error_description"])])
else
fetch_user(conn, token)
end
end
@doc false
def handle_callback!(conn) do
set_errors!(conn, [error("missing_code", "No code received")])
end
@doc """
Cleans up the private area of the connection used for passing the raw freeagent response around during the callback.
"""
def handle_cleanup!(conn) do
conn
|> put_private(:freeagent_user, nil)
|> put_private(:freeagent_token, nil)
end
@doc """
Fetches the uid field from the FreeAgent response.
This defaults to the option `uid_field` which in-turn defaults to `email`
"""
def uid(conn) do
user =
conn
|> option(:uid_field)
|> to_string
conn.private.freeagent_user[user]
end
@doc """
Includes the credentials from the FreeAgent response.
"""
def credentials(conn) do
token = conn.private.freeagent_token
%Credentials{
token: token.access_token,
refresh_token: token.refresh_token,
expires_at: token.expires_at,
token_type: token.token_type,
expires: !!token.expires_at
}
end
@doc """
Fetches the fields to populate the info section of the `Ueberauth.Auth` struct.
"""
def info(conn) do
user = conn.private.freeagent_user
%Info{
name: user["first_name"] <> " " <> user["last_name"],
first_name: user["first_name"],
last_name: user["last_name"],
email: user["email"],
description: user["role"],
urls: %{
url: user["url"]
}
}
end
@doc """
Stores the raw information (including the token) obtained from the freeagent callback.
"""
def extra(conn) do
%Extra {
raw_info: %{
token: conn.private.freeagent_token,
user: conn.private.freeagent_user
}
}
end
@spec fetch_user(conn :: Plug.Conn.t, token :: binary) :: Plug.Conn.t
defp fetch_user(conn, token) do
conn = put_private(conn, :freeagent_token, token)
case profile(token) do
{:ok, %OAuth2.Response{status_code: 401, body: _body}} ->
set_errors!(conn, [error("token", "unauthorized")])
{:ok, %OAuth2.Response{status_code: status_code, body: payload}} when status_code in 200..399 ->
case payload do
%{"user" => user} ->
put_private(conn, :freeagent_user, user)
_ ->
set_errors!(conn, [error("OAuth2", "could not find profile")])
end
{:error, %OAuth2.Error{reason: reason}} ->
set_errors!(conn, [error("OAuth2", reason)])
end
end
# Attempt to retrieve the user profile
@spec profile(token :: binary) :: {:ok, OAuth2.Response.t} | {:error, OAuth2.Error.t}
defp profile(token) do
Ueberauth.Strategy.FreeAgent.OAuth.client(token: token)
|> OAuth2.Client.get("/users/me")
end
# Extract an option from the connection
@spec option(conn :: Plug.Conn.t, key :: atom) :: any
defp option(conn, key) do
Keyword.get(options(conn), key, Keyword.get(default_options(), key))
end
end
|
lib/ueberauth/strategy/freeagent.ex
| 0.81841 | 0.751124 |
freeagent.ex
|
starcoder
|
defmodule ExUnit.CaptureLog do
@moduledoc ~S"""
Functionality to capture logs for testing.
## Examples
defmodule AssertionTest do
use ExUnit.Case
import ExUnit.CaptureLog
test "example" do
assert capture_log(fn ->
Logger.error "log msg"
end) =~ "log msg"
end
test "check multiple captures concurrently" do
fun = fn ->
for msg <- ["hello", "hi"] do
assert capture_log(fn -> Logger.error msg end) =~ msg
end
Logger.debug "testing"
end
assert capture_log(fun) =~ "hello"
assert capture_log(fun) =~ "testing"
end
end
"""
alias Logger.Backends.Console
@doc """
Captures Logger messages generated when evaluating `fun`.
Returns the binary which is the captured output.
This function mutes the `:console` backend
and captures any log messages sent to Logger.
Note that when the `async` is set to `true`,
the messages from another test might be captured.
It is possible to configure the level to capture with `:level`,
which will set the capturing level for the duration of the
capture, for instance, if the log level is set to :error
any message with the lower level will be ignored.
The default level is `nil`, which will capture all messages.
The behaviour is undetermined if async tests change Logger level.
The format, metadata and colors can be configured with `:format`,
`:metadata` and `:colors` respectively. These three options
defaults to the `:console` backend configuration parameters.
"""
@spec capture_log(Keyword.t, (() -> any)) :: String.t
def capture_log(opts \\ [], fun) do
opts = Keyword.put_new(opts, :level, nil)
{:ok, string_io} = StringIO.open("")
try do
:ok = add_capture(string_io, opts)
ref = ExUnit.Server.log_capture_on(self())
try do
fun.()
after
:ok = Logger.flush()
:ok = ExUnit.Server.log_capture_off(ref)
:ok = remove_capture(string_io)
end
:ok
catch
kind, reason ->
stack = System.stacktrace()
_ = StringIO.close(string_io)
:erlang.raise(kind, reason, stack)
else
:ok ->
{:ok, content} = StringIO.close(string_io)
elem(content, 1)
end
end
defp add_capture(pid, opts) do
GenEvent.add_mon_handler(Logger, {Console, pid}, {pid, opts})
end
defp remove_capture(pid) do
case GenEvent.remove_handler(Logger, {Console, pid}, nil) do
:ok ->
receive do
{:gen_event_EXIT, {Console, ^pid}, _reason} -> :ok
end
{:error, :not_found} = error ->
mfa = {ExUnit.Capture_log, :remove_capture, [pid]}
receive do
{:gen_event_EXIT, {Console, ^pid}, reason} -> exit({reason, mfa})
after
# In case someone accidentally flushed the message,
# let's raise not found.
0 -> exit({error, mfa})
end
end
end
end
|
lib/ex_unit/lib/ex_unit/capture_log.ex
| 0.784526 | 0.621799 |
capture_log.ex
|
starcoder
|
defmodule ExKdl.Parser do
@moduledoc false
alias ExKdl.DecodeError
alias ExKdl.Node
alias ExKdl.Token
alias ExKdl.Value
import ExKdl.Token, only: [is_type: 2]
import ExKdl.Parser.Utils
defguardp is_whitespace(token)
when is_type(token, :whitespace) or
is_type(token, :multiline_comment) or
is_type(token, :bom)
defguardp is_linespace(token)
when is_whitespace(token) or
is_type(token, :newline) or
is_type(token, :line_comment)
defguardp is_keyword(token)
when is_type(token, :null) or is_type(token, :boolean)
defguardp is_value(token)
when is_type(token, :string) or is_type(token, :number) or is_keyword(token)
defguardp is_identifier(token)
when is_type(token, :bare_identifier) or is_type(token, :string)
@spec parse([tuple]) :: {:ok, [Node.t()]} | {:error, DecodeError.t()}
def parse(tokens) do
case parse_nodes(tokens) do
{:match, [], nodes} ->
{:ok, nodes}
{:match, [token], nodes} when is_type(token, :eof) ->
{:ok, nodes}
{:match, _, _} ->
{:error, %DecodeError{message: "failed to parse KDL document"}}
end
end
defp parse_nodes(tokens) do
tokens = discard_while(tokens, &is_linespace/1)
case parse_node(tokens) do
{:match, tokens, node} ->
{:match, tokens, nodes} = parse_nodes(tokens)
nodes =
if is_nil(node) do
nodes
else
[node | nodes]
end
{:match, tokens, nodes}
:nomatch ->
tokens = discard_while(tokens, &is_linespace/1)
{:match, tokens, []}
end
end
defp parse_node(tokens) do
with {:match, tokens, is_commented} <- tokens |> zero_or_one(&node_comment/1),
{:match, tokens, _} <- tokens |> zero_or_more(&whitespace/1),
{:match, tokens, type} <- tokens |> zero_or_one(&type_annotation/1),
{:match, tokens, name} <- tokens |> one(&identifier/1),
{:match, tokens, props_and_vals} <- tokens |> zero_or_more(&node_props_and_vals/1),
{:match, tokens, children} <- tokens |> zero_or_more(&node_children/1),
{:match, tokens, _} <- tokens |> zero_or_more(&node_space/1),
{:match, tokens, _} <- tokens |> one(&node_terminator/1) do
if is_commented do
{:match, tokens, nil}
else
{properties, values} = process_props_and_vals(props_and_vals)
kdl_node = %Node{
name: name,
type: type,
values: values,
properties: properties,
children: List.flatten(children)
}
{:match, tokens, kdl_node}
end
end
end
defp node_children(tokens) do
with {:match, tokens, _} <- tokens |> zero_or_more(&node_space/1),
{:match, tokens, is_commented} <- tokens |> zero_or_one(&node_comment/1),
{:match, tokens, _} <- tokens |> zero_or_more(&node_space/1),
{:match, tokens, _} <- tokens |> one(&left_brace/1),
{:match, tokens, nodes} <- tokens |> one(&parse_nodes/1),
{:match, tokens, _} <- tokens |> one(&right_brace/1) do
if is_commented do
{:match, tokens, []}
else
{:match, tokens, nodes}
end
end
end
defp node_props_and_vals(tokens) do
with {:match, tokens, _} <- tokens |> one_or_more(&node_space/1),
{:match, tokens, is_commented} <- tokens |> zero_or_one(&node_comment/1),
{:match, tokens, _} <- tokens |> zero_or_more(&node_space/1),
{:match, tokens, prop_or_val} <- tokens |> one(&node_property/1, or: &node_value/1) do
if is_commented do
{:match, tokens, nil}
else
{:match, tokens, prop_or_val}
end
end
end
defp node_property(tokens) do
with {:match, tokens, key} <- tokens |> one(&identifier/1),
{:match, tokens, _} <- tokens |> one(&equals/1),
{:match, tokens, value} <- tokens |> one(&node_value/1) do
{:match, tokens, {key, value}}
end
end
defp node_value(tokens) do
with {:match, tokens, type} <- tokens |> zero_or_one(&type_annotation/1),
{:match, tokens, val} <- tokens |> one(&value/1) do
{:match, tokens, Value.new(val, type)}
end
end
defp node_terminator(tokens) do
one(
tokens,
&line_comment/1,
or: &newline/1,
or: &semicolon/1,
or: &eof/1
)
end
defp node_space(tokens) do
one(
tokens,
&escape_line/1,
or: fn tokens -> tokens |> one_or_more(&whitespace/1) end
)
end
defp escape_line(tokens) do
with {:match, tokens, _} <- tokens |> zero_or_more(&whitespace/1),
{:match, tokens, _} <- tokens |> one(&continuation/1),
{:match, tokens, _} <- tokens |> zero_or_more(&whitespace/1),
{:match, tokens, _} <- tokens |> zero_or_one(&line_comment/1),
{:match, _, _} = match <- tokens |> one(&newline/1) do
match
end
end
terminals = %{
eof: nil,
semicolon: nil,
left_brace: nil,
right_brace: nil,
equals: nil,
continuation: nil,
newline: nil,
node_comment: true,
line_comment: nil
}
for {terminal, value} <- terminals do
defp unquote(terminal)([token | tokens]) when is_type(token, unquote(terminal)) do
{:match, tokens, unquote(value)}
end
defp unquote(terminal)(_tokens) do
:nomatch
end
end
productions = ~w(
whitespace
identifier
value
)a
for production <- productions do
defp unquote(production)([token | tokens])
when unquote(String.to_atom("is_#{production}"))(token) do
{:match, tokens, Token.value(token)}
end
defp unquote(production)(_tokens) do
:nomatch
end
end
defp type_annotation([t1, t2, t3 | tokens])
when is_type(t1, :left_paren) and is_identifier(t2) and is_type(t3, :right_paren) do
{:match, tokens, Token.value(t2)}
end
defp type_annotation(_tokens) do
:nomatch
end
defp process_props_and_vals(props_and_vals) do
process_props_and_vals(props_and_vals, %{}, [])
end
defp process_props_and_vals([property | rest], props, vals) when is_tuple(property) do
case property do
{key, %Value{} = value} ->
process_props_and_vals(rest, Map.put(props, key, value), vals)
# This happens when a property was commented out with a slashdash.
# In that case, we need to ignore the property.
{_key, nil} ->
process_props_and_vals(rest, props, vals)
end
end
defp process_props_and_vals([value | rest], props, vals) do
case value do
%Value{} = value ->
process_props_and_vals(rest, props, [value | vals])
# This happens when a value was commented out with a slashdash.
# In that case, we need to ignore the value.
nil ->
process_props_and_vals(rest, props, vals)
end
end
defp process_props_and_vals([], props, vals) do
{props, Enum.reverse(vals)}
end
end
|
lib/ex_kdl/parser.ex
| 0.739986 | 0.555978 |
parser.ex
|
starcoder
|
defmodule Curvy do
@moduledoc """


Signatures and Bitcoin flavoured crypto written in pure Elixir. Curvy is an
implementation of `secp256k1`, an elliptic curve that can be used in signature
schemes, asymmetric encryption and ECDH shared secrets.
## Highlights
* Pure Elixir implementation of `secp256k1` - no external dependencies
* Fast ECDSA cryptography using Jacobian Point mathematics
* Supports deterministic ECDSA signatures as per [RFC 6979](https://tools.ietf.org/html/rfc6979)
* Securely generate random ECDSA keypairs
* Compute ECDH shared secrets
## Installation
The package can be installed by adding `curvy` to your list of dependencies in
`mix.exs`.
def deps do
[
{:curvy, "~> #{ Mix.Project.config[:version] }"}
]
end
## Usage
### 1. Key generation
Create random ECDSA keypairs.
iex> key = Curvy.generate_key()
%Curvy.Key{
crv: :secp256k1,
point: %Curvy.Point{},
private_key: <<>>
}
[`ECDSA Keypairs`](`t:Curvy.Key.t`) can by converted to public and private key
binaries.
iex> Curvy.Key.to_privkey(key)
<<privkey::binery-size(32)>>
iex> Curvy.Key.to_pubkey(key)
<<privkey::binary-size(33)>>
iex> Curvy.Key.to_pubkey(key, compressed: false)
<<privkey::binary-size(65)>>
### 2. Sign messages
Sign arbitrary messages with a private key. Signatures are deterministic as
per [RFC 6979](https://tools.ietf.org/html/rfc6979).
iex> sig = Curvy.sign("hello", key)
<<sig::binary-size(71)>>
iex> sig = Curvy.sign("hello", compact: true)
<<sig::binary-size(65)>>
iex> sig = Curvy.sign("hello", compact: true, encoding: :base64)
"IEnXUDXZ3aghwXaq1zu9ax2zJj7N+O4gGREmWBmrldwrIb9B7QuicjwPrrv3ocPpxYO7uCxcw+DR/FcHR9b/YjM="
### 3. Verify signatures
Verify a signature against the message and a public key.
iex> sig = Curvy.verify(sig, "hello", key)
true
iex> sig = Curvy.verify(sig, "hello", wrongkey)
false
# Returns :error if the signature cannot be decoded
iex> sig = Curvy.verify("notasig", "hello", key)
:error
### 4. Recover the public key from a signature
It's possible to recover the public key from a compact signature when given
with the signed message.
iex> sig = Curvy.sign("hello", key, compact: true)
iex> recovered = Curvy.recover_key(sig, "hello")
iex> recovered.point == key.point
true
The same can be done with DER encoded signatures if the recovery ID is known.
iex> {sig, recovery_id} = Curvy.sign("hello", key, recovery: true)
iex> recovered = Curvy.recover_key(sig, "hello", recovery_id: recovery_id)
iex> recovered.point == key.point
true
### 5. ECDH shared secrets
ECDH shared secrets are computed by multiplying a public key with a private
key. The operation yields the same result in both directions.
iex> s1 = Curvy.get_shared_secret(key1, key2)
iex> s2 = Curvy.get_shared_secret(key2, key1)
iex> s1 == s2
true
"""
use Bitwise, only_operators: true
alias Curvy.{Curve, Key, Point, Signature}
import Curvy.Util, only: [encode: 2, decode: 2, inv: 2, mod: 2]
@crv Curve.secp256k1
@doc """
Creates a new random ESCDA keypair.
"""
@spec generate_key() :: Key.t
def generate_key(), do: Key.generate()
@doc """
Computes an ECDH shared secret from the first given key's private key and
the second's public key.
Returns a 32 byte binary.
## Accepted options
* `:encoding` - Optionally encode the returned secret as `:base64` or `:hex`.
"""
@spec get_shared_secret(Key.t | binary, Key.t | binary) :: binary
def get_shared_secret(privkey, pubkey, opts \\ [])
def get_shared_secret(privkey, pubkey, opts) when is_binary(privkey),
do: get_shared_secret(Key.from_privkey(privkey), pubkey, opts)
def get_shared_secret(privkey, pubkey, opts) when is_binary(pubkey),
do: get_shared_secret(privkey, Key.from_pubkey(pubkey), opts)
def get_shared_secret(%Key{privkey: <<d::big-size(256)>>}, %Key{point: point}, opts) do
encoding = Keyword.get(opts, :encoding)
x = point
|> Point.mul(d)
|> Map.get(:x)
encode(<<x::big-size(256)>>, encoding)
end
@doc """
Recovers the public key from the signature and signed message.
Returns an [`ECDSA Keypair`](`t:t`) struct, without the privkey value.
If recovering fom a DER encoded signature, the [`Recovery ID`](`Signature.recovery_id`)
returned from `Curvy.sign(msg, key, recovery: true)` must be passed as an
option. If recovering from a compact signature the recovery ID is already
encoded in the signature.
## Accepted options
* `:encoding` - Optionally decode the given signature as `:base64` or `:hex`.
* `:hash` - Digest algorithm to hash the message with. Default is `:sha256`.
* `:recovery_id` - The signature [`Recovery ID`](`Signature.recovery_id`).
"""
@spec recover_key(Signature.t | binary, binary, keyword) :: Key.t | :error
def recover_key(sig, message, opts \\ [])
def recover_key(data, message, opts) when is_binary(data) do
encoding = Keyword.get(opts, :encoding)
with {:ok, data} <- decode(data, encoding),
%Signature{} = sig <- Signature.parse(data)
do
opts = case data do
<<prefix, _sig::binary-size(64)>> when (prefix - 27 - 4) < 0 ->
Keyword.put(opts, :compressed, false)
_ ->
opts
end
recover_key(sig, message, opts)
end
end
def recover_key(%Signature{recid: recid} = sig, message, opts) do
with recid when recid in 0..3 <- Keyword.get(opts, :recovery_id, recid) do
digest = Keyword.get(opts, :hash, :sha256)
e = message
|> hash_message(digest)
|> :binary.decode_unsigned()
sig
|> Signature.normalize()
|> Point.from_signature(e, recid)
|> Key.from_point(Keyword.take(opts, [:compressed]))
else
_ ->
raise "Recovery ID not in range 0..3"
end
end
@doc """
Signs the message with the given private key.
Returns a DER encoded or compact signature binary.
## Accepted options
* `:hash` - Digest algorithm to hash the message with. Default is `:sha256`.
* `:normalize` - Normalize the signature by enforcing low-S. Default is `true`.
* `:compact` - Return a compact 65 byte signature. Default is `false`.
* `:encoding` - Optionally encode the returned signature as `:base64` or `:hex`.
* `:recovery` - Return the signature in a tuple paired with a recovery ID. Default is `false`.
* `:k` - Optionally provide a signing secret `K` value, as a 256 bit integer or binary.
"""
@spec sign(binary, Key.t | binary, keyword) :: binary
def sign(message, privkey, opts \\ [])
def sign(message, %Key{privkey: privkey, compressed: compressed}, opts)
when is_binary(privkey)
do
opts = Keyword.put_new(opts, :compressed, compressed)
sign(message, privkey, opts)
end
def sign(message, <<d::big-size(256)>>, opts) do
digest = Keyword.get(opts, :hash, :sha256)
encoding = Keyword.get(opts, :encoding)
hash = hash_message(message, digest)
e = :binary.decode_unsigned(hash)
{q, r, s} = case Keyword.get(opts, :k) do
k when is_integer(k) and 0 < k and k < @crv.n ->
get_qrs(e, d, k)
<<k::big-size(256)>> ->
get_qrs(e, d, k)
nil ->
deterministic_k(hash, d)
end
recid = get_recovery_id(q, r)
sig = %Signature{r: r, s: s, recid: recid}
|> maybe_normalize(opts)
sig
|> maybe_compact(opts)
|> encode(encoding)
|> maybe_recovery(sig, opts)
end
@doc """
Verifies the signature against the given message and public key.
Returns a boolean.
## Accepted options
* `:encoding` - Optionally decode the given signature as `:base64` or `:hex`.
* `:hash` - Digest algorithm to hash the message with. Default is `:sha256`.
"""
@spec verify(Signature.t | binary, binary, Key.t | binary, keyword) :: boolean | :error
def verify(sig, message, pubkey, opts \\ [])
def verify(sig, message, pubkey, opts) when is_binary(pubkey),
do: verify(sig, message, Key.from_pubkey(pubkey), opts)
def verify(sig, message, %Key{} = pubkey, opts) when is_binary(sig) do
encoding = Keyword.get(opts, :encoding)
with {:ok, sig} <- decode(sig, encoding),
%Signature{} = sig <- Signature.parse(sig)
do
verify(sig, message, pubkey, opts)
end
end
def verify(%Signature{r: r, s: s}, message, %Key{point: point}, opts) do
digest = Keyword.get(opts, :hash, :sha256)
e = message
|> hash_message(digest)
|> :binary.decode_unsigned()
i = inv(s, @crv.n)
p = Point.mul(@crv[:G], mod(e * i, @crv.n))
q = Point.mul(point, mod(r * i, @crv.n))
Point.add(p, q)
|> Map.get(:x)
|> Kernel.==(r)
end
# Calculates the QRS values
defp get_qrs(e, d, k) do
q = Point.mul(@crv[:G], k)
r = mod(q.x, @crv.n)
s = (inv(k, @crv.n) * (e + r * d)) |> mod(@crv.n)
{q, r, s}
end
# Hashes the message with the given digest algorith
defp hash_message(message, digest) when digest in [:sha256, :sha384, :sha512],
do: :crypto.hash(digest, message)
defp hash_message(message, _digest), do: message
# Implements RFC 6979 and returns QRS values from deterministically generated K
defp deterministic_k(hash, d) do
e = :binary.decode_unsigned(hash)
v = :binary.copy(<<1>>, 32)
k = :binary.copy(<<0>>, 32)
k = :crypto.mac(:hmac, :sha256, k, <<v::binary, 0, d::big-size(256), hash::binary>>)
v = :crypto.mac(:hmac, :sha256, k, v)
k = :crypto.mac(:hmac, :sha256, k, <<v::binary, 1, d::big-size(256), hash::binary>>)
v = :crypto.mac(:hmac, :sha256, k, v)
Enum.reduce_while 0..1000, {k, v}, fn i, {k, v} ->
if i == 1000, do: throw "Tried 1000 k values, all were invalid"
v = :crypto.mac(:hmac, :sha256, k, v)
case v do
<<t::big-size(256)>> when 0 < t and t < @crv.n ->
case get_qrs(e, d, t) do
{_, r, s} when r == 0 or s == 0 ->
{:cont, {k, v}}
{q, r, s} ->
{:halt, {q, r, s}}
end
_ ->
k = :crypto.mac(:hmac, :sha256, k, <<v::binary, 0>>)
v = :crypto.mac(:hmac, :sha256, k, v)
{:cont, {k, v}}
end
end
end
# Get the recovery ID from the point and R value
defp get_recovery_id(%{x: x, y: y}, r) when x == r, do: 0 ||| (y &&& 1)
defp get_recovery_id(%{x: _x, y: y}, _r), do: 2 ||| (y &&& 1)
# Normalizes the given signature if opted for
defp maybe_normalize(%Signature{} = sig, opts) do
case Keyword.get(opts, :normalize, true) do
opt when opt in [false, nil] ->
sig
_ ->
Signature.normalize(sig)
end
end
# Returns compact or der encoded signature
defp maybe_compact(%Signature{} = sig, opts) do
case Keyword.get(opts, :compact, false) do
opt when opt in [false, nil] ->
Signature.to_der(sig)
_ ->
Signature.to_compact(sig, Keyword.take(opts, [:compressed]))
end
end
# Returns the signature with recovery is of opted for
defp maybe_recovery(encoded_sig, %Signature{recid: recid}, opts)
when is_integer(recid)
do
case Keyword.get(opts, :recovery) do
true -> {encoded_sig, recid}
_ -> encoded_sig
end
end
defp maybe_recovery(encoded_sig, _sig, _opts), do: encoded_sig
end
|
lib/curvy.ex
| 0.918651 | 0.609902 |
curvy.ex
|
starcoder
|
defmodule BitwiseNif do
@moduledoc """
BitwiseNif: NIF example module showing different NIF scheduling issues
This is an Elixir and Rust port of
The oricinal C and Erlang code were originally written by <NAME>
for bitwise at https://github.com/vinoski/bitwise.
This code was originally presented at Chicago Erlang on 22 Sep
2014. Please see the PDF file in this repository for the presentation.
The exor function variants here all take a binary and a byte value as
arguments and return a binary and either the number of times the
scheduler thread was yielded (if known) or the number of chunks of the
binary that were processed. The returned binary is the same size as the
binary argument, and its value is that of the binary argument with the
byte argument xor'd with each byte of the binary. The idea is that if
you pass in a large enough binary, you can get bad or good NIF behavior
with respect to Erlang scheduler threads depending on which function
variant you call, and different calls take different approaches to
trying to avoid scheduler collapse and other scheduling problems.
This code requires Erlang 17.3 or newer, built with dirty schedulers
enabled.
"""
@on_load :init
# @TODO: There should be an attribute or something that has the module name.
@module BitwiseNif
@doc """
With a large bin argument, `exor/2` and `exor_bad/2` take far too
long for a NIF
"""
def exor(bin, byte) when is_binary(bin) and byte >= 0 and byte < 256 do
:erlang.nif_error({:nif_not_loaded, @module})
end
@doc """
With a large bin argument, `exor/2` and `exor_bad/2` take far too
long for a NIF
"""
def exor_bad(bin, byte) when is_binary(bin) and byte >= 0 and byte < 256 do
:erlang.nif_error({:nif_not_loaded, @module})
end
@doc """
`exor_yield/2` processes bin in chunks and uses `enif_schedule_nif`
to yield the scheduler thread between chunks.
"""
def exor_yield(bin, byte) when is_binary(bin) and byte >= 0 and byte < 256 do
:erlang.nif_error({:nif_not_loaded, @module})
end
@doc """
exor_dirty processes bin on a dirty scheduler.
"""
def exor_dirty(bin, byte) when is_binary(bin) and byte >= 0 and byte < 256 do
:erlang.nif_error({:nif_not_loaded, @module})
end
@doc """
Similar to `exor_yield/2` but do the chunking in Elixir.
"""
def exor_chunks(bin, byte) when is_binary(bin) and byte >= 0 and byte < 256 do
exor_chunks(bin, byte, 4194304, 0, <<>>)
end
def exor_chunks(bin, byte, chunk_size, yields, acc) do
case byte_size(bin) do
size when size > chunk_size ->
<<chunk :: size(chunk_size), rest :: binary>> = bin
{res, _} = exor_bad(chunk, byte)
exor_chunks(rest, byte, chunk_size,
yields + 1, <<acc :: binary, res :: binary>>)
_ ->
{res, _} = exor_bad(bin, byte)
{<<acc :: binary, res :: binary>>, yields}
end
end
@doc """
Count reductions and number of scheduler yields for `fun`. `fun` is
assumed to be one of the above exor variants.
"""
def reds(bin, byte, fun) when is_binary(bin) and byte >= 0 and byte < 256 do
parent = self()
pid = spawn(fn() ->
self = self()
start = :os.timestamp
r0 = :erlang.process_info(self, :reductions)
{_, yields} = fun.(bin, byte)
r1 = :erlang.process_info(self, :reductions)
# Use new time API
t = :timer.now_diff(:os.timestamp, start)
send(parent, {self, {t, yields, r0, r1}})
end)
receive do
{^pid, result} ->
result
end
end
def init() do
# so_name = :filename.join(case :code.priv_dir(@module) do
# {:error, :bad_name} ->
# dir = :code.which(@module)
# :filename.join([:filename.dirname(dir),
# '..', 'priv'])
# dir ->
# dir
# end, :erlang.atom_to_list(@module) ++ '_nif'),
so_name = 'target/release/libbitwise_nif'
:erlang.load_nif(so_name, 0)
end
end
|
bitwise.ex
| 0.590897 | 0.720491 |
bitwise.ex
|
starcoder
|
defmodule Money.Currency do
@moduledoc """
Provides currency support to `Money`
Some useful helper methods include:
- `get/1`
- `get!/1`
- `exists?/1`
- `to_atom/1`
- `name/1`
- `name!/1`
- `symbol/1`
- `symbol!/1`
- `all/0`
A helper function exists for each currency using the lowercase three-character currency code
## Example:
iex> Money.Currency.usd(100)
%Money{amount: 100, currency: :USD}
"""
@currencies %{
AED: %{name: "UAE Dirham", symbol: "د.إ", exponent: 2},
AFN: %{name: "Afghani", symbol: "؋", exponent: 2},
ALL: %{name: "Lek", symbol: "Lek", exponent: 2},
AMD: %{name: "Armenian Dram", symbol: "AMD", exponent: 2},
ANG: %{name: "Netherlands Antillian Guilder", symbol: "ƒ", exponent: 2},
AOA: %{name: "Kwanza", symbol: "Kz", exponent: 2},
ARS: %{name: "Argentine Peso", symbol: "$", exponent: 2},
AUD: %{name: "Australian Dollar", symbol: "$", exponent: 2},
AWG: %{name: "Aruban Guilder", symbol: "ƒ", exponent: 2},
AZN: %{name: "Azerbaijanian Manat", symbol: "ман", exponent: 2},
BAM: %{name: "Convertible Marks", symbol: "KM", exponent: 2},
BBD: %{name: "Barbados Dollar", symbol: "$", exponent: 2},
BDT: %{name: "Taka", symbol: "৳", exponent: 2},
BGN: %{name: "Bulgarian Lev", symbol: "лв", exponent: 2},
BHD: %{name: "<NAME>", symbol: ".د.ب", exponent: 3},
BIF: %{name: "Burundi Franc", symbol: "FBu", exponent: 0},
BMD: %{name: "Bermudian Dollar (customarily known as Bermuda Dollar)", symbol: "$", exponent: 2},
BND: %{name: "Brunei Dollar", symbol: "$", exponent: 2},
BOB: %{name: "B<NAME>", symbol: "$b", exponent: 2},
BOV: %{name: "Bol<NAME>", symbol: "$b", exponent: 2},
BRL: %{name: "Brazilian Real", symbol: "R$", exponent: 2},
BSD: %{name: "Bahamian Dollar", symbol: "$", exponent: 2},
BTN: %{name: "Indian Rupee Ngultrum", symbol: "Nu.", exponent: 2},
BWP: %{name: "Pula", symbol: "P", exponent: 2},
BYN: %{name: "Belarusian Ruble", symbol: "p.", exponent: 2},
BYR: %{name: "Belarusian Ruble", symbol: "p.", exponent: 0},
BZD: %{name: "Belize Dollar", symbol: "BZ$", exponent: 2},
CAD: %{name: "Canadian Dollar", symbol: "$", exponent: 2},
CDF: %{name: "Congolese Franc", symbol: "CF", exponent: 2},
CHF: %{name: "Swiss Franc", symbol: "CHF", exponent: 2},
CLF: %{name: "Chilean Peso Unidades de fomento", symbol: "$", exponent: 4},
CLP: %{name: "Chilean Peso Unidades de fomento", symbol: "$", exponent: 0},
CNY: %{name: "<NAME>", symbol: "¥", exponent: 2},
COP: %{name: "Colombian Peso", symbol: "$", exponent: 2},
COU: %{name: "Colombian Peso Unidad de Valor Real", symbol: "$", exponent: 2},
CRC: %{name: "<NAME>", symbol: "₡", exponent: 2},
CUC: %{name: "Cuban Peso Peso Convertible", symbol: "₱", exponent: 2},
CUP: %{name: "Cuban Peso Peso Convertible", symbol: "₱", exponent: 2},
CVE: %{name: "Cape Verde Escudo", symbol: "$", exponent: 0},
CZK: %{name: "Czech Koruna", symbol: "Kč", exponent: 2},
DJF: %{name: "Djibouti Franc", symbol: "Fdj", exponent: 0},
DKK: %{name: "Danish Krone", symbol: "kr.", exponent: 2},
DOP: %{name: "Dominican Peso", symbol: "RD$", exponent: 2},
DZD: %{name: "Algerian Dinar", symbol: "دج", exponent: 2},
EEK: %{name: "Kroon", symbol: "KR", exponent: 2},
EGP: %{name: "Egyptian Pound", symbol: "£", exponent: 2},
ERN: %{name: "Nakfa", symbol: "Nfk", exponent: 2},
ETB: %{name: "Ethiopian Birr", symbol: "Br", exponent: 2},
EUR: %{name: "Euro", symbol: "€", exponent: 2},
FJD: %{name: "Fiji Dollar", symbol: "$", exponent: 2},
FKP: %{name: "Falkland Islands Pound", symbol: "£", exponent: 2},
GBP: %{name: "Pound Sterling", symbol: "£", exponent: 2},
GEL: %{name: "Lari", symbol: "₾", exponent: 2},
GHS: %{name: "Cedi", symbol: "GH₵", exponent: 2},
GIP: %{name: "Gibraltar Pound", symbol: "£", exponent: 2},
GMD: %{name: "Dalasi", symbol: "D", exponent: 2},
GNF: %{name: "Guinea Franc", symbol: "FG", exponent: 0},
GTQ: %{name: "Quetzal", symbol: "Q", exponent: 2},
GYD: %{name: "Guyana Dollar", symbol: "$", exponent: 2},
HKD: %{name: "Hong Kong Dollar", symbol: "$", exponent: 2},
HNL: %{name: "Lempira", symbol: "L", exponent: 2},
HRK: %{name: "Croatian Kuna", symbol: "kn", exponent: 2},
HTG: %{name: "Gourde US Dollar", symbol: " ", exponent: 2},
HUF: %{name: "Forint", symbol: "Ft", exponent: 2},
IDR: %{name: "Rupiah", symbol: "Rp", exponent: 2},
ILS: %{name: "New Israeli Sheqel", symbol: "₪", exponent: 2},
INR: %{name: "Indian Rupee", symbol: "₹", exponent: 2},
IQD: %{name: "Iraqi Dinar", symbol: "ع.د", exponent: 3},
IRR: %{name: "Iranian Rial", symbol: "﷼", exponent: 2},
ISK: %{name: "Iceland Krona", symbol: "kr", exponent: 0},
JMD: %{name: "Jamaican Dollar", symbol: "J$", exponent: 2},
JOD: %{name: "Jordanian Dinar", symbol: "JOD", exponent: 3},
JPY: %{name: "Yen", symbol: "¥", exponent: 0},
KES: %{name: "<NAME>", symbol: "KSh", exponent: 2},
KGS: %{name: "Som", symbol: "лв", exponent: 2},
KHR: %{name: "Riel", symbol: "៛", exponent: 2},
KMF: %{name: "Comoro Franc", symbol: "CF", exponent: 0},
KPW: %{name: "North Korean Won", symbol: "₩", exponent: 2},
KRW: %{name: "Won", symbol: "₩", exponent: 0},
KWD: %{name: "<NAME>", symbol: "د.ك", exponent: 3},
KYD: %{name: "Cayman Islands Dollar", symbol: "$", exponent: 2},
KZT: %{name: "Tenge", symbol: "лв", exponent: 2},
LAK: %{name: "Kip", symbol: "₭", exponent: 2},
LBP: %{name: "Lebanese Pound", symbol: "£", exponent: 2},
LKR: %{name: "Sri Lanka Rupee", symbol: "₨", exponent: 2},
LRD: %{name: "Liberian Dollar", symbol: "$", exponent: 2},
LSL: %{name: "<NAME>", symbol: " ", exponent: 2},
LTL: %{name: "Lithuanian Litas", symbol: "Lt", exponent: 2},
LVL: %{name: "Latvian Lats", symbol: "Ls", exponent: 2},
LYD: %{name: "<NAME>", symbol: "ل.د", exponent: 3},
MAD: %{name: "<NAME>", symbol: "د.م.", exponent: 2},
MDL: %{name: "<NAME>", symbol: "MDL", exponent: 2},
MGA: %{name: "<NAME>", symbol: "Ar", exponent: 2},
MKD: %{name: "Denar", symbol: "ден", exponent: 2},
MMK: %{name: "Kyat", symbol: "K", exponent: 2},
MNT: %{name: "Tugrik", symbol: "₮", exponent: 2},
MOP: %{name: "Pataca", symbol: "MOP$", exponent: 2},
MRO: %{name: "Ouguiya", symbol: "UM", exponent: 2},
MUR: %{name: "<NAME>", symbol: "₨", exponent: 2},
MVR: %{name: "Rufiyaa", symbol: "Rf", exponent: 2},
MWK: %{name: "Kwacha", symbol: "MK", exponent: 2},
MXN: %{name: "Mexican Peso", symbol: "$", exponent: 2},
MXV: %{name: "Mexican Peso Mexican Unidad de Inversion (UDI)", symbol: "UDI", exponent: 2},
MYR: %{name: "Mal<NAME>", symbol: "RM", exponent: 2},
MZN: %{name: "Metical", symbol: "MT", exponent: 2},
NAD: %{name: "Rand <NAME>", symbol: "$", exponent: 2},
NGN: %{name: "Naira", symbol: "₦", exponent: 2},
NIO: %{name: "C<NAME>", symbol: "C$", exponent: 2},
NOK: %{name: "Norwegian Krone", symbol: "kr", exponent: 2},
NPR: %{name: "Nepalese Rupee", symbol: "₨", exponent: 2},
NZD: %{name: "New Zealand Dollar", symbol: "$", exponent: 2},
OMR: %{name: "<NAME>", symbol: "﷼", exponent: 3},
PAB: %{name: "Balboa US Dollar", symbol: "B/.", exponent: 2},
PEN: %{name: "Nuevo Sol", symbol: "S/.", exponent: 2},
PGK: %{name: "Kina", symbol: "K", exponent: 2},
PHP: %{name: "Philippine Peso", symbol: "Php", exponent: 2},
PKR: %{name: "Pakistan Rupee", symbol: "₨", exponent: 2},
PLN: %{name: "Zloty", symbol: "zł", exponent: 2},
PYG: %{name: "Guarani", symbol: "₲", exponent: 0},
QAR: %{name: "Qatari Rial", symbol: "﷼", exponent: 2},
RON: %{name: "New Leu", symbol: "lei", exponent: 2},
RSD: %{name: "Serbian Dinar", symbol: "Дин.", exponent: 2},
RUB: %{name: "Russian Ruble", symbol: "₽", exponent: 2},
RWF: %{name: "Rwanda Franc", symbol: " ", exponent: 0},
SAR: %{name: "Saudi Riyal", symbol: "﷼", exponent: 2},
SBD: %{name: "Solomon Islands Dollar", symbol: "$", exponent: 2},
SCR: %{name: "Seychelles Rupee", symbol: "₨", exponent: 2},
SDG: %{name: "Sudanese Pound", symbol: "SDG", exponent: 2},
SEK: %{name: "Swedish Krona", symbol: "kr", exponent: 2},
SGD: %{name: "Singapore Dollar", symbol: "$", exponent: 2},
SHP: %{name: "Saint Hel<NAME>", symbol: "£", exponent: 2},
SLL: %{name: "Leone", symbol: "Le", exponent: 2},
SOS: %{name: "<NAME>", symbol: "S", exponent: 2},
SRD: %{name: "<NAME>", symbol: "$", exponent: 2},
STD: %{name: "Dobra", symbol: "Db", exponent: 2},
SVC: %{name: "El Salvador Colon US Dollar", symbol: "$", exponent: 2},
SYP: %{name: "<NAME>", symbol: "£", exponent: 2},
SZL: %{name: "Lilangeni", symbol: "E", exponent: 2},
THB: %{name: "Baht", symbol: "฿", exponent: 2},
TJS: %{name: "Somoni", symbol: " ", exponent: 2},
TMT: %{name: "Manat", symbol: "₼", exponent: 2},
TND: %{name: "Tunisian Dinar", symbol: "د.ت", exponent: 2},
TOP: %{name: "Pa'anga", symbol: "T$", exponent: 2},
TRY: %{name: "Turkish Lira", symbol: "TL", exponent: 2},
TTD: %{name: "Trinidad and Tobago Dollar", symbol: "TT$", exponent: 2},
TWD: %{name: "New Taiwan Dollar", symbol: "NT$", exponent: 2},
TZS: %{name: "<NAME>", symbol: "Tsh", exponent: 2},
UAH: %{name: "Hryvnia", symbol: "₴", exponent: 2},
UGX: %{name: "<NAME>", symbol: "Ush", exponent: 0},
USD: %{name: "US Dollar", symbol: "$", exponent: 2},
UYI: %{name: "Peso Uruguayo Uruguay Peso en Unidades Indexadas", symbol: "$U", exponent: 0},
UYU: %{name: "Peso Uruguayo Uruguay Peso en Unidades Indexadas", symbol: "$U", exponent: 2},
UZS: %{name: "<NAME>", symbol: "лв", exponent: 2},
VEF: %{name: "<NAME>", symbol: "Bs", exponent: 2},
VND: %{name: "Dong", symbol: "₫", exponent: 0},
VUV: %{name: "Vatu", symbol: "VT", exponent: 0},
WST: %{name: "Tala", symbol: "WS$", exponent: 2},
XAF: %{name: "CFA Franc BEAC", symbol: "FCFA", exponent: 0},
XAG: %{name: "Silver", symbol: " ", exponent: 2},
XAU: %{name: "Gold", symbol: " ", exponent: 2},
XBA: %{name: "Bond Markets Units European Composite Unit (EURCO)", symbol: " ", exponent: 2},
XBB: %{name: "European Monetary Unit (E.M.U.-6)", symbol: " ", exponent: 2},
XBC: %{name: "European Unit of Account 9(E.U.A.-9)", symbol: " ", exponent: 2},
XBD: %{name: "European Unit of Account 17(E.U.A.-17)", symbol: " ", exponent: 2},
XCD: %{name: "East Caribbean Dollar", symbol: "$", exponent: 2},
XDR: %{name: "SDR", symbol: " ", exponent: 2},
XFU: %{name: "UIC-Franc", symbol: " ", exponent: 2},
XOF: %{name: "CFA Franc BCEAO", symbol: " ", exponent: 0},
XPD: %{name: "Palladium", symbol: " ", exponent: 2},
XPF: %{name: "CF<NAME>", symbol: " ", exponent: 0},
XPT: %{name: "Platinum", symbol: " ", exponent: 2},
XTS: %{name: "Codes specifically reserved for testing purposes", symbol: " ", exponent: 2},
YER: %{name: "<NAME>", symbol: "﷼", exponent: 2},
ZAR: %{name: "Rand", symbol: "R", exponent: 2},
ZMK: %{name: "<NAME>", symbol: "ZK", exponent: 2},
ZWL: %{name: "Zim<NAME>", symbol: "$", exponent: 2}
}
@currencies |> Enum.each(fn ({cur, detail}) ->
currency = to_string(cur) |> String.downcase
@doc """
Convenience method to create a `Money` object for the #{detail.name} (#{cur}) currency.
## Example:
iex> Money.Currency.#{currency}(100)
%Money{amount: 100, currency: :#{cur}}
"""
def unquote(:"#{currency}")(amount) do
Money.new(amount, unquote(cur))
end
end)
@spec all() :: map
@doc ~S"""
Returns all the currencies
## Example:
iex> Money.Currency.all |> Map.fetch!(:GBP)
%{name: "Pound Sterling", symbol: "£", exponent: 2}
"""
def all, do: @currencies
@spec exists?(Money.t | String.t | atom) :: boolean
@doc ~S"""
Returns true if a currency is defined
## Example:
iex> Money.Currency.exists?(:USD)
true
iex> Money.Currency.exists?("USD")
true
iex> Money.Currency.exists?(:WRONG)
false
"""
def exists?(%Money{currency: currency}),
do: exists?(currency)
def exists?(currency),
do: Map.has_key?(@currencies, convert_currency(currency))
@spec get(Money.t | String.t | atom) :: map | nil
@doc ~S"""
Returns a map with the name and symbol of the currency or nil if it doesn’t exist.
## Example:
iex> Money.Currency.get(:USD)
%{name: "US Dollar", symbol: "$", exponent: 2}
iex> Money.Currency.get(:WRONG)
nil
"""
def get(%Money{currency: currency}),
do: get(currency)
def get(currency),
do: @currencies[convert_currency(currency)]
@spec get!(Money.t | String.t | atom) :: map
@doc ~S"""
Returns a map with the name and symbol of the currency.
An ArgumentError is raised if the currency doesn’t exist.
## Example:
iex> Money.Currency.get!(:USD)
%{name: "US Dollar", symbol: "$", exponent: 2}
iex> Money.Currency.get!(:WRONG)
** (ArgumentError) currency WRONG doesn’t exist
"""
def get!(currency),
do: get(currency) || currency_doesnt_exist_error(currency)
@spec to_atom(Money.t | String.t | atom) :: atom
@doc ~S"""
Returns the atom representation of the currency key
An ArgumentError is raised if the currency doesn’t exist.
## Example:
iex> Money.Currency.to_atom("usd")
:USD
iex> Money.Currency.to_atom(:WRONG)
** (ArgumentError) currency WRONG doesn’t exist
"""
def to_atom(%Money{currency: currency}),
do: to_atom(currency)
def to_atom(currency) do
currency = convert_currency(currency)
get!(currency)
currency
end
@spec name(Money.t | String.t | atom) :: String.t
@doc ~S"""
Returns the name of the currency or nil if it doesn’t exist.
## Example:
iex> Money.Currency.name(:USD)
"US Dollar"
iex> Money.Currency.name(:WRONG)
nil
"""
def name(%Money{currency: currency}),
do: name(currency)
def name(currency),
do: get(currency)[:name]
@spec name!(Money.t | String.t | atom) :: String.t
@doc ~S"""
Returns the name of the currency.
An ArgumentError is raised if the currency doesn’t exist.
## Example:
iex> Money.Currency.name!(:USD)
"US Dollar"
iex> Money.Currency.name!(:WRONG)
** (ArgumentError) currency WRONG doesn’t exist
"""
def name!(currency),
do: name(currency) || currency_doesnt_exist_error(currency)
@spec symbol(Money.t | String.t | atom) :: String.t
@doc ~S"""
Returns the symbol of the currency or nil if it doesn’t exist.
## Example:
iex> Money.Currency.symbol(:USD)
"$"
iex> Money.Currency.symbol(:WRONG)
nil
"""
def symbol(%Money{currency: currency}),
do: symbol(currency)
def symbol(currency),
do: get(currency)[:symbol]
@spec symbol!(Money.t | String.t | atom) :: String.t
@doc ~S"""
Returns the symbol of the currency.
An ArgumentError is raised if the currency doesn’t exist.
## Example:
iex> Money.Currency.symbol!(:USD)
"$"
iex> Money.Currency.symbol!(:WRONG)
** (ArgumentError) currency WRONG doesn’t exist
"""
def symbol!(currency),
do: symbol(currency) || currency_doesnt_exist_error(currency)
@spec exponent(Money.t | String.t | atom) :: integer
@doc ~S"""
Returns the exponent of the currency or nil if it doesn’t exist.
## Example:
iex> Money.Currency.exponent(:USD)
2
iex> Money.Currency.exponent(:WRONG)
nil
"""
def exponent(%Money{currency: currency}),
do: exponent(currency)
def exponent(currency),
do: get(currency)[:exponent]
@spec exponent!(Money.t | String.t | atom) :: integer
@doc ~S"""
Returns the exponent of the currency.
An ArgumentError is raised if the currency doesn’t exist.
## Example:
iex> Money.Currency.exponent!(:USD)
2
iex> Money.Currency.exponent!(:WRONG)
** (ArgumentError) currency WRONG doesn’t exist
"""
def exponent!(currency),
do: exponent(currency) || currency_doesnt_exist_error(currency)
@spec sub_units_count!(Money.t | String.t | atom) :: integer
@doc ~S"""
Returns the sub_units_count of the currency.
An ArgumentError is raised if the currency doesn’t exist.
## Example:
iex> Money.Currency.sub_units_count!(:USD)
100
iex> Money.Currency.sub_units_count!(:JPY)
1
iex> Money.Currency.sub_units_count!(:WRONG)
** (ArgumentError) currency WRONG doesn’t exist
"""
def sub_units_count!(currency) do
exponent = exponent!(currency)
round(:math.pow(10, exponent))
end
defp convert_currency(currency) when is_binary(currency) do
try do
currency |> String.upcase |> String.to_existing_atom |> convert_currency
rescue
_ -> nil
end
end
defp convert_currency(currency), do: currency
defp currency_doesnt_exist_error(currency),
do: raise ArgumentError, "currency #{currency} doesn’t exist"
end
|
lib/money/currency.ex
| 0.855293 | 0.513729 |
currency.ex
|
starcoder
|
defmodule Exzeitable.Database do
@moduledoc "Database interactions"
import Ecto.Query
@doc "Get the data using query"
@spec get_records(map) :: [map]
def get_records(%{query: query} = assigns) do
query
|> order_query(assigns)
|> search_query(assigns)
|> paginate_query(assigns)
|> get_query(assigns)
end
@spec order_query(Ecto.Query.t(), map) :: Ecto.Query.t()
defp order_query(query, %{order: nil}), do: query
defp order_query(query, %{order: order}) do
from(q in exclude(query, :order_by), order_by: ^order)
end
@spec search_query(Ecto.Query.t(), map) :: Ecto.Query.t()
defp search_query(query, %{search: ""}), do: query
defp search_query(query, %{search: search, module: module}) do
apply(module, :do_search, [query, search])
end
@spec remove_order(Ecto.Query.t()) :: Ecto.Query.t()
defp remove_order(query), do: exclude(query, :order_by)
@spec paginate_query(Ecto.Query.t(), map) :: Ecto.Query.t()
defp paginate_query(query, %{per_page: per_page, page: page}) do
offset = if page == 1, do: 0, else: (page - 1) * per_page
from(q in query, limit: ^per_page, offset: ^offset)
end
# Filter out the previous selects and preloads, because we only need the ids to get a count
@spec select_ids(Ecto.Query.t()) :: Ecto.Query.t()
defp select_ids(query) do
query =
query
|> exclude(:select)
|> exclude(:preload)
from(q in query, select: count(q.id))
end
# Repo.all
@spec get_query(Ecto.Query.t(), map) :: [map]
defp get_query(query, %{repo: repo}), do: apply(repo, :all, [query])
@doc "I want to just do a select: count(c.id)"
@spec get_record_count(map) :: integer
def get_record_count(%{query: query} = assigns) do
query
|> select_ids()
|> search_query(assigns)
|> remove_order()
|> get_query(assigns)
|> List.first()
end
@doc "We only want letters to avoid SQL injection attacks"
@spec prefix_search(String.t()) :: String.t()
def prefix_search(terms) do
terms
|> String.trim()
|> String.replace(~r/[^\w\s]|_/u, "")
|> String.replace(~r/\s+/u, ":* & ")
|> Kernel.<>(":*")
end
@doc """
Generates the magic SQL fragment that performs search dynamically.
Created outside macro to bypass ecto restrictions
"""
@spec tsvector_string([keyword]) :: String.t()
def tsvector_string(fields) do
search_columns =
fields
|> Enum.filter(fn {_k, field} -> Keyword.fetch!(field, :search) end)
|> Enum.map(fn {key, _v} -> "coalesce(#{Atom.to_string(key)}, ' ')" end)
|> Enum.join(" || ' ' || ")
"to_tsvector('english', #{search_columns}) @@ to_tsquery(?)"
end
end
|
lib/exzeitable/database.ex
| 0.745954 | 0.465448 |
database.ex
|
starcoder
|
defmodule Grizzly.ZIPGateway.Config do
@moduledoc false
# This module is for making the `zipgateway.cfg` file
require Logger
alias Grizzly.Supervisor
@type t :: %__MODULE__{
ca_cert: Path.t(),
cert: Path.t(),
priv_key: Path.t(),
eeprom_file: Path.t() | nil,
tun_script: Path.t(),
pvs_storage_file: Path.t(),
provisioning_config_file: Path.t(),
pan_ip: :inet.ip_address(),
lan_ip: :inet.ip_address(),
lan_gw6: String.t(),
psk: String.t(),
manufacturer_id: non_neg_integer() | nil,
hardware_version: non_neg_integer() | nil,
product_id: non_neg_integer() | nil,
product_type: non_neg_integer() | nil,
serial_log: String.t() | nil,
extra_classes: [byte()],
unsolicited_destination: {:inet.ip_address(), :inet.port_number()},
database_file: Path.t() | nil,
rf_region: Supervisor.rf_region() | nil,
power_level: {Supervisor.tx_power(), Supervisor.measured_power()} | nil
}
defstruct ca_cert: "./Portal.ca_x509.pem",
cert: "./ZIPR.x509_1024.pem",
priv_key: "./ZIPR.key_1024.pem",
eeprom_file: nil,
tun_script: "./zipgateway.tun",
pvs_storage_file: "/root/provisioning_list_store.dat",
provisioning_config_file: "/data/zipgateway_provisioning_list.cfg",
pan_ip: {0xFD00, 0xBBBB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
lan_ip: {0xFD00, 0xAAAA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
lan_gw6: "::1",
psk: "123456789012345678901234567890AA",
serial_log: nil,
product_id: nil,
product_type: nil,
hardware_version: nil,
manufacturer_id: nil,
extra_classes: [0x85, 0x59, 0x5A, 0x8E, 0x6C, 0x8F],
unsolicited_destination: {{0xFD00, 0xAAAA, 0, 0, 0, 0, 0, 0x0002}, 41230},
database_file: nil,
identify_script: nil,
rf_region: nil,
power_level: nil
@doc """
Make a new `ZipgatewayCfg.t()` from the supplied options
"""
@spec new(map()) :: t()
def new(opts \\ %{}) do
opts =
Map.take(opts, [
:manufacturer_id,
:hardware_version,
:product_id,
:product_type,
:serial_log,
:tun_script,
:lan_ip,
:pan_ip,
:database_file,
:eeprom_file,
:rf_region,
:power_level
])
struct(__MODULE__, opts)
end
@doc """
Write the contents of the `ZipgatewayCfg.t()` to the file system
"""
@spec write(t(), Path.t()) :: :ok | {:error, File.posix()}
def write(cfg, path) do
contents = __MODULE__.to_string(cfg)
File.write(path, contents)
end
@doc """
Turn the `ZipgatewayCfg.t()` into a string
"""
@spec to_string(t()) :: String.t()
def to_string(cfg) do
"""
ZipCaCert=#{cfg.ca_cert}
ZipCert=#{cfg.cert}
ZipPrivKey=#{cfg.priv_key}
TunScript=#{cfg.tun_script}
PVSStorageFile=#{cfg.pvs_storage_file}
ProvisioningConfigFile=#{cfg.provisioning_config_file}
ZipLanGw6=#{cfg.lan_gw6}
ZipPSK=#{cfg.psk}
"""
|> maybe_put_config_item(cfg, :serial_log, "SerialLog")
|> maybe_put_config_item(cfg, :product_id, "ZipProductID")
|> maybe_put_config_item(cfg, :manufacturer_id, "ZipManufacturerID")
|> maybe_put_config_item(cfg, :hardware_version, "ZipHardwareVersion")
|> maybe_put_config_item(cfg, :product_type, "ZipProductType")
|> maybe_put_config_item(cfg, :extra_classes, "ExtraClasses")
|> maybe_put_config_item(cfg, :pan_ip, "ZipPanIp6")
|> maybe_put_config_item(cfg, :lan_ip, "ZipLanIp6")
|> maybe_put_config_item(cfg, :unsolicited_destination, nil)
|> maybe_put_config_item(cfg, :database_file, "ZipGwDatabase")
|> maybe_put_config_item(cfg, :eeprom_file, "Eepromfile")
|> maybe_put_config_item(cfg, :identify_script, "ZipNodeIdentifyScript")
|> maybe_put_config_item(cfg, :rf_region, "ZWRFRegion")
|> maybe_put_config_item(cfg, :power_level, "")
end
@doc """
Ensure required files are on disk and contain the correct contents
This is useful to ensure other tools provided by `zipgateway` can work.
"""
@spec ensure_files(t()) :: t()
def ensure_files(config) do
:ok = ensure_provisioning_list_config(config.provisioning_config_file)
config
end
defp ensure_provisioning_list_config(provisioning_list_config_path) do
if File.exists?(provisioning_list_config_path) do
:ok
else
contents = """
# Provisioning list for Z/IP Gateway Smart Start devices.
ZIPGateway Smart Start Provisioning List Configuration, version = 1.0.
"""
case File.write(provisioning_list_config_path, contents) do
:ok ->
:ok
{:error, reason} ->
Logger.warn("Failed to write provision list file: #{inspect(reason)}")
end
end
end
defp maybe_put_config_item(config_string, cfg, :extra_classes = field, cfg_name) do
case Map.get(cfg, field) do
nil ->
config_string
extra_command_classes ->
extra_command_classes_string = Enum.join(extra_command_classes, " ")
config_string <> "#{cfg_name}= #{extra_command_classes_string}\n"
end
end
defp maybe_put_config_item(config_string, cfg, :unsolicited_destination, _) do
{ip, port} = cfg.unsolicited_destination
ip_string =
ip
|> :inet.ntoa()
|> Kernel.to_string()
config_string <>
"ZipUnsolicitedDestinationIp6=#{ip_string}\n" <>
"ZipUnsolicitedDestinationPort=#{port}\n"
end
defp maybe_put_config_item(config_string, cfg, :identify_script = field, cfg_name) do
case Map.get(cfg, field) do
nil ->
script_path = Application.app_dir(:grizzly, ["priv", "indicator.sh"])
config_string <> "#{cfg_name}=#{script_path}\n"
script_path ->
config_string <> "#{cfg_name}=#{script_path}\n"
end
end
defp maybe_put_config_item(config_string, cfg, :power_level = field, _cfg_name) do
case Map.get(cfg, field) do
nil ->
config_string
{tx_powerlevel, measured_dbm} ->
config_string <>
"NormalTxPowerLevel=#{tx_powerlevel}\nMeasured0dBmPower=#{measured_dbm}\n"
end
end
defp maybe_put_config_item(config_string, cfg, field, cfg_name)
when field in [:pan_ip, :lan_ip] do
ip =
cfg
|> Map.get(field)
|> :inet.ntoa()
|> List.to_string()
config_string <> "#{cfg_name}=#{ip}\n"
end
defp maybe_put_config_item(config_string, cfg, :rf_region, cfg_name) do
case Map.get(cfg, :rf_region) do
nil ->
config_string
region ->
config_string <> "#{cfg_name}=#{rf_region(region)}\n"
end
end
defp maybe_put_config_item(config_string, cfg, field, cfg_name) do
cfg_item = Map.get(cfg, field)
if cfg_item != nil do
config_string <> "#{cfg_name}=#{cfg_item}\n"
else
config_string
end
end
defp rf_region(:eu), do: 0x00
defp rf_region(:us), do: 0x01
defp rf_region(:anz), do: 0x02
defp rf_region(:hk), do: 0x03
defp rf_region(:id), do: 0x05
defp rf_region(:il), do: 0x06
defp rf_region(:ru), do: 0x07
defp rf_region(:cn), do: 0x08
defp rf_region(:us_lr), do: 0x09
defp rf_region(:jp), do: 0x20
defp rf_region(:kr), do: 0x21
end
|
lib/grizzly/zipgateway/config.ex
| 0.820721 | 0.420481 |
config.ex
|
starcoder
|
defmodule Mix.Tasks.Release do
@moduledoc """
Build a release for the current mix application.
## Command line options
* `--name` - selects a specific release to build
* `--env` - selects a specific release environment to build with
* `--profile` - selects both a release and environment, syntax for profiles is `name:env`
Releases and environments are defined in `rel/config.exs`, created via
`release.init`. When determining the name and environment to use, refer to the
definitions in that file if you are not sure what options are available.
* `--erl` - provide extra flags to `erl` when running the release, expects a string
* `--dev` - this switch indicates whether to build the release in "dev mode", which
symlinks build artifacts into the release rather than copying them, both significantly
speeding up release builds, as well as making it possible to recompile the project and
have the release pick up the changes without rebuilding the release.
* `--silent` - mutes all logging output
* `--quiet` - reduce logging output to essentials
* `--verbose` - produce detailed output about release assembly
* `--no-tar` - skip packaging the release in a tarball after assembly
* `--warnings-as-errors` - treat any release-time warnings as errors which fail the build
* `--no-warn-missing` - ignore any errors about missing applications
### Upgrades
You can tell Distillery to build an upgrade with `--upgrade`.
Upgrades require a source version and a target version (the current version).
Distillery will automatically determine a source version by looking at previously
built releases in the output directory, and selecting the most recent. If none
are available, building the upgrade will fail. You can specify a specific version
to upgrade from with `--upfrom`, which expects a version string. If the selected
version cannot be found, the upgrade build will fail.
### Executables
Distillery can build pseudo-executable files as an artifact, rather than plain
tarballs. These executables are not true executables, but rather self-extracting
TAR archives, which handle extraction and passing any command-line arguments to
the appropriate shell scripts in the release. The following flags are used for
these executables:
* `--executable` - tells Distillery to produce a self-extracting archive
* `--transient` - tells Distillery to produce a self-extracting archive which
will remove the extracted contents from disk after execution
## Usage
You are generally recommended to use `rel/config.exs` to configure Distillery, and
simply run `mix release` with `MIX_ENV` set to the Mix environment you are targeting.
The following are some usage examples:
# Builds a release with MIX_ENV=dev (the default)
mix release
# Builds a release with MIX_ENV=prod
MIX_ENV=prod mix release
# Builds a release for a specific release environment
MIX_ENV=prod mix release --env=dev
The default configuration produced by `release.init` will result in `mix release`
selecting the first release in the config file (`rel/config.exs`), and the
environment which matches the current Mix environment (i.e. the value of `MIX_ENV`).
"""
@shortdoc "Build a release for the current mix application"
use Mix.Task
alias Mix.Releases.Config
alias Mix.Releases.Release
alias Mix.Releases.Shell
alias Mix.Releases.Assembler
alias Mix.Releases.Archiver
alias Mix.Releases.Errors
@spec run(OptionParser.argv()) :: no_return
def run(args) do
# Parse options
opts = parse_args(args)
verbosity = Keyword.get(opts, :verbosity)
Shell.configure(verbosity)
# make sure we've compiled latest
Mix.Task.run("compile", [])
# make sure loadpaths are updated
Mix.Task.run("loadpaths", [])
# load release configuration
Shell.debug("Loading configuration..")
case Config.get(opts) do
{:error, {:config, :not_found}} ->
Shell.error("You are missing a release config file. Run the release.init task first")
System.halt(1)
{:error, {:config, reason}} ->
Shell.error("Failed to load config:\n #{reason}")
System.halt(1)
{:ok, config} ->
archive? = not Keyword.get(opts, :no_tar, false)
Shell.info("Assembling release..")
do_release(config, archive?: archive?)
end
end
defp do_release(config, archive?: false) do
case Assembler.assemble(config) do
{:ok, %Release{name: name} = release} ->
print_success(release, name)
{:error, _} = err ->
Shell.error(Errors.format_error(err))
System.halt(1)
end
rescue
e ->
Shell.error(
"Release failed: #{Exception.message(e)}\n" <>
Exception.format_stacktrace(System.stacktrace())
)
System.halt(1)
end
defp do_release(config, archive?: true) do
case Assembler.assemble(config) do
{:ok, %Release{name: name} = release} ->
if release.profile.dev_mode and not Release.executable?(release) do
Shell.warn("You have set dev_mode to true, skipping archival phase")
print_success(release, name)
else
Shell.info("Packaging release..")
case Archiver.archive(release) do
{:ok, _archive_path} ->
print_success(release, name)
{:error, _} = err ->
Shell.error(Errors.format_error(err))
System.halt(1)
end
end
{:error, _} = err ->
Shell.error(Errors.format_error(err))
System.halt(1)
end
rescue
e ->
Shell.error(
"Release failed: #{Exception.message(e)}\n" <>
Exception.format_stacktrace(System.stacktrace())
)
System.halt(1)
end
@spec print_success(Release.t(), atom) :: :ok
defp print_success(%{profile: %{output_dir: output_dir}} = release, app) do
relative_output_dir = Path.relative_to_cwd(output_dir)
app =
cond do
Release.executable?(release) ->
"#{app}.run"
:else ->
case :os.type() do
{:win32, _} -> "#{app}.bat"
{:unix, _} -> "#{app}"
end
end
bin = Path.join([relative_output_dir, "bin", app])
unless Shell.verbosity() in [:silent, :quiet] do
Shell.writef("Release succesfully built!\n", :green)
Shell.writef(
"To start the release you have built, you can use one of the following tasks:\n\n",
:green
)
Shell.writef(" # start a shell, like 'iex -S mix'\n", :normal)
Shell.writef(" > #{bin} #{Shell.colorf("console", :white)}", :cyan)
Shell.write("\n\n")
Shell.writef(" # start in the foreground, like 'mix run --no-halt'\n", :normal)
Shell.writef(" > #{bin} #{Shell.colorf("foreground", :white)}", :cyan)
Shell.write("\n\n")
Shell.writef(
" # start in the background, must be stopped with the 'stop' command\n",
:normal
)
Shell.writef(" > #{bin} #{Shell.colorf("start", :white)}", :cyan)
Shell.write("\n\n")
Shell.writef("If you started a release elsewhere, and wish to connect to it:\n\n", :green)
Shell.writef(" # connects a local shell to the running node\n", :normal)
Shell.writef(" > #{bin} #{Shell.colorf("remote_console", :white)}", :cyan)
Shell.write("\n\n")
Shell.writef(" # connects directly to the running node's console\n", :normal)
Shell.writef(" > #{bin} #{Shell.colorf("attach", :white)}", :cyan)
Shell.write("\n\n")
Shell.writef("For a complete listing of commands and their use:\n\n", :green)
Shell.writef(" > #{bin} #{Shell.colorf("help", :white)}", :cyan)
Shell.write("\n")
end
end
@doc false
@spec parse_args(OptionParser.argv()) :: Keyword.t() | no_return
@spec parse_args(OptionParser.argv(), Keyword.t()) :: Keyword.t() | no_return
def parse_args(argv, opts \\ []) do
switches = [
silent: :boolean,
quiet: :boolean,
verbose: :boolean,
executable: :boolean,
transient: :boolean,
dev: :boolean,
erl: :string,
run_erl_env: :string,
no_tar: :boolean,
upgrade: :boolean,
upfrom: :string,
name: :string,
profile: :string,
env: :string,
no_warn_missing: :boolean,
warnings_as_errors: :boolean
]
flags =
if Keyword.get(opts, :strict, true) do
{flags, _} = OptionParser.parse!(argv, strict: switches)
flags
else
{flags, _, _} = OptionParser.parse(argv, strict: switches)
flags
end
defaults = %{
verbosity: :normal,
selected_release: :default,
selected_environment: :default,
executable: [enabled: false, transient: false],
is_upgrade: false,
no_tar: false,
upgrade_from: :latest
}
do_parse_args(flags, defaults)
end
defp do_parse_args([], acc), do: Map.to_list(acc)
defp do_parse_args([{:verbose, _} | rest], acc) do
do_parse_args(rest, Map.put(acc, :verbosity, :verbose))
end
defp do_parse_args([{:quiet, _} | rest], acc) do
do_parse_args(rest, Map.put(acc, :verbosity, :quiet))
end
defp do_parse_args([{:silent, _} | rest], acc) do
do_parse_args(rest, Map.put(acc, :verbosity, :silent))
end
defp do_parse_args([{:profile, profile} | rest], acc) do
case String.split(profile, ":", trim: true, parts: 2) do
[rel, env] ->
new_acc =
acc
|> Map.put(:selected_release, rel)
|> Map.put(:selected_environment, env)
do_parse_args(rest, new_acc)
other ->
Shell.fail!("invalid profile name `#{other}`, must be `name:env`")
end
end
defp do_parse_args([{:name, name} | rest], acc) do
do_parse_args(rest, Map.put(acc, :selected_release, String.to_atom(name)))
end
defp do_parse_args([{:env, name} | rest], acc) do
do_parse_args(rest, Map.put(acc, :selected_environment, String.to_atom(name)))
end
defp do_parse_args([{:no_warn_missing, true} | rest], acc) do
Application.put_env(:distillery, :no_warn_missing, true)
do_parse_args(rest, acc)
end
defp do_parse_args([{:no_warn_missing, apps} | rest], acc) when is_list(apps) do
Application.put_env(:distillery, :no_warn_missing, apps)
do_parse_args(rest, acc)
end
defp do_parse_args([{:no_tar, _} | rest], acc) do
do_parse_args(rest, Map.put(acc, :no_tar, true))
end
defp do_parse_args([{:executable, _} | _rest], %{is_upgrade: true}) do
Shell.fail!("You cannot combine --executable with --upgrade")
end
defp do_parse_args([{:executable, val} | rest], acc) do
case :os.type() do
{:win32, _} when val == true ->
Shell.fail!("--executable is not supported on Windows")
_ ->
case Map.get(acc, :executable) do
nil ->
do_parse_args(rest, Map.put(acc, :executable, enabled: val, transient: false))
opts when is_list(opts) ->
do_parse_args(rest, Map.put(acc, :executable, Keyword.put(opts, :enabled, val)))
end
end
end
defp do_parse_args([{:upgrade, _} | _rest], %{executable: true}) do
Shell.fail!("You cannot combine --executable with --upgrade")
end
defp do_parse_args([{:upgrade, _} | rest], acc) do
do_parse_args(rest, Map.put(acc, :is_upgrade, true))
end
defp do_parse_args([{:warnings_as_errors, _} | rest], acc) do
Application.put_env(:distillery, :warnings_as_errors, true)
do_parse_args(rest, acc)
end
defp do_parse_args([{:transient, val} | rest], acc) do
executable =
case Map.get(acc, :executable) do
e when e in [nil, false] ->
[enabled: false, transient: val]
e when is_list(e) ->
Keyword.put(e, :transient, val)
end
do_parse_args(rest, Map.put(acc, :executable, executable))
end
defp do_parse_args([{:upfrom, version} | rest], acc) do
do_parse_args(rest, Map.put(acc, :upgrade_from, version))
end
end
|
lib/distillery/tasks/release.ex
| 0.82828 | 0.53692 |
release.ex
|
starcoder
|
defmodule Nx.Defn.Evaluator do
@moduledoc """
The default implementation of a `Nx.Defn.Compiler`
that evaluates the expression tree against the
tensor backend.
"""
@behaviour Nx.Defn.Compiler
alias Nx.Defn.{Expr, Tree}
@creation_ops [:tensor, :eye, :iota, :random_normal, :random_uniform, :from_binary]
@impl true
def __async__(key, vars, fun, opts) do
Nx.Defn.Async.async(fn -> __jit__(key, vars, fun, opts) end)
end
@impl true
def __jit__(_key, vars, fun, _opts) do
fun.(vars)
|> Tree.composite(%{}, &eval(&1, vars, &2))
|> elem(0)
end
defp eval(%Nx.Tensor{data: %Expr{op: :fun, args: [_, _, fun]}}, _vars, cache) do
{fun, cache}
end
defp eval(%Nx.Tensor{data: %Expr{op: :parameter, args: [i]}}, vars, cache) do
{Enum.fetch!(vars, i), cache}
end
defp eval(%Nx.Tensor{data: %Expr{op: :tensor, args: [t]}}, _vars, cache) do
{t, cache}
end
defp eval(%Nx.Tensor{data: %Expr{op: :cond, args: [clauses, last]}}, vars, cache) do
{res, cache} = find_clause(clauses, last, vars, cache)
Tree.composite(res, cache, &eval(&1, vars, &2))
end
defp eval(%Nx.Tensor{data: %Expr{op: :elem, args: args}}, vars, cache) do
[tuple, i, _size] = args
{tuple, cache} = Tree.composite(tuple, cache, &eval(&1, vars, &2))
{elem(tuple, i), cache}
end
defp eval(%Nx.Tensor{data: %Expr{op: :metadata, args: [expr, _meta]}}, vars, cache) do
eval(expr, vars, cache)
end
defp eval(%Nx.Tensor{data: %Expr{op: op, id: id} = expr, type: type} = ans, vars, cache) do
case cache do
%{^id => res} ->
{res, cache}
%{} when op in @creation_ops ->
{backend, _} = Nx.default_backend()
res = apply(backend, op, eval_args(type, ans, expr.args))
{res, Map.put(cache, id, res)}
%{} ->
{args, cache} = Tree.traverse_args(ans, cache, &eval(&1, vars, &2))
res = apply(Nx.Shared.find_impl!(args), op, eval_args(type, ans, args))
{res, Map.put(cache, id, res)}
end
end
defp eval(other, _vars, cache) do
{other, cache}
end
defp eval_args({:tuple, _}, _, args), do: args
defp eval_args(_, ans, args), do: [ans | args]
defp find_clause([{pred, clause} | clauses], last, vars, cache) do
{pred, cache} = eval(pred, vars, cache)
if Nx.to_scalar(pred) != 0, do: {clause, cache}, else: find_clause(clauses, last, vars, cache)
end
defp find_clause([], last, _vars, cache) do
{last, cache}
end
end
|
lib/nx/defn/evaluator.ex
| 0.808483 | 0.497864 |
evaluator.ex
|
starcoder
|
defmodule DebounceAndThrottle.Debounce do
defstruct([:timer_ref, :scheduled_at, :debounced_count, :extra_data])
alias DebounceAndThrottle.Debounce
@type t :: %Debounce{
timer_ref: reference(),
scheduled_at: DateTime.t(),
debounced_count: non_neg_integer(),
extra_data: map()
}
@moduledoc """
This module implements the Debounce API.
"""
@server DebounceAndThrottle.Server
@doc """
Sends a `message` to a given `pid`, but only after `period` has passed without any more calls to this function with the same `key`
Returns `{:ok, %Debounce{}}`.
"""
@spec send(pid() | atom(), term(), String.t(), non_neg_integer()) :: {:ok, Debounce.t()}
def send(pid, message, key, period) do
result = GenServer.call(@server, {:send_debounced, {pid, message, key, period}})
{:ok, result}
end
@doc """
Calls a `fun` but only after `period` has passed without any more calls to this function with the same `key`
Returns `{:ok, %Debounce{}}`.
"""
@spec call(fun(), String.t(), non_neg_integer()) :: {:ok, Debounce.t()}
def call(fun, key, period) when is_function(fun) do
result = GenServer.call(@server, {:call_debounced, {fun, key, period}})
{:ok, result}
end
@doc """
Calls a `fun` but only after `period` has passed without any more calls to this function with the same `key`
Returns `{:ok, %Debounce{}}`.
"""
@spec apply(module, fun :: atom(), [any], String.t(), non_neg_integer()) :: {:ok, Debounce.t()}
def apply(module, fun, args, key, period) do
result = GenServer.call(@server, {:apply_debounced, {module, fun, args, key, period}})
{:ok, result}
end
@doc """
Returns the state - the current list of debounced functions. Useful for debugging.
Returns something like:
%{
apply: %{},
call: %{
"say_hey" => %DebounceAndThrottle.Debounce{
debounced_count: 1,
extra_data: %{fun: #Function<45.65746770/0 in :erl_eval.expr/5>},
scheduled_at: ~U[2022-03-12 22:50:01.190171Z],
timer_ref: #Reference<0.418177534.3850108929.259344>
}
},
send: %{}
}
"""
@spec state() :: map()
def state(), do: GenServer.call(@server, {:state, :debounced})
end
|
lib/debounce_and_throttle/debounce.ex
| 0.788705 | 0.417865 |
debounce.ex
|
starcoder
|
defmodule Radixir.Crypto.PublicKey.RSAPrivateKey do
@moduledoc false
defstruct version: nil,
public_modulus: nil,
public_exponent: nil,
private_exponent: nil,
prime_one: nil,
prime_two: nil,
exponent_one: nil,
exponent_two: nil,
ctr_coefficient: nil,
other_prime_infos: nil
@type t :: %Radixir.Crypto.PublicKey.RSAPrivateKey{
version: atom,
public_modulus: integer,
public_exponent: integer,
private_exponent: integer,
prime_one: integer,
prime_two: integer,
exponent_one: integer,
exponent_two: integer,
ctr_coefficient: integer,
other_prime_infos: atom
}
def from_sequence(rsa_key_seq) do
%Radixir.Crypto.PublicKey.RSAPrivateKey{}
|> struct(
version: maybe_convert_version_to_atom(elem(rsa_key_seq, 1)),
public_modulus: elem(rsa_key_seq, 2),
public_exponent: elem(rsa_key_seq, 3),
private_exponent: elem(rsa_key_seq, 4),
prime_one: elem(rsa_key_seq, 5),
prime_two: elem(rsa_key_seq, 6),
exponent_one: elem(rsa_key_seq, 7),
exponent_two: elem(rsa_key_seq, 8),
ctr_coefficient: elem(rsa_key_seq, 9),
other_prime_infos: elem(rsa_key_seq, 10)
)
end
def as_sequence(rsa_private_key) do
case rsa_private_key do
%__MODULE__{} ->
{:ok,
{
:RSAPrivateKey,
Map.get(rsa_private_key, :version),
Map.get(rsa_private_key, :public_modulus),
Map.get(rsa_private_key, :public_exponent),
Map.get(rsa_private_key, :private_exponent),
Map.get(rsa_private_key, :prime_one),
Map.get(rsa_private_key, :prime_two),
Map.get(rsa_private_key, :exponent_one),
Map.get(rsa_private_key, :exponent_two),
Map.get(rsa_private_key, :ctr_coefficient),
Map.get(rsa_private_key, :other_prime_infos)
}}
_ ->
{:error, "invalid Radixir.Crypto.PublicKey.RSAPrivateKey: #{inspect(rsa_private_key)}"}
end
end
def decode_der(der_encoded) do
key_sequence = :public_key.der_decode(:RSAPrivateKey, der_encoded)
rsa_private_key = from_sequence(key_sequence)
{:ok, rsa_private_key}
end
def encode_der(rsa_private_key = %__MODULE__{}) do
with {:ok, key_sequence} <- as_sequence(rsa_private_key) do
der_encoded = :public_key.der_encode(:RSAPrivateKey, key_sequence)
{:ok, der_encoded}
end
end
def get_public(rsa_private_key = %__MODULE__{}) do
%Radixir.Crypto.RSAPublicKey{
public_modulus: rsa_private_key.public_modulus,
public_exponent: rsa_private_key.public_exponent
}
end
def get_fingerprint(rsa_private_key = %__MODULE__{}, opts \\ []) do
get_public(rsa_private_key)
|> Radixir.Crypto.RSAPublicKey.get_fingerprint(opts)
end
# Protocols
defimpl Inspect do
import Inspect.Algebra
@doc """
Formats the RSAPrivateKey without exposing any private information.
example:
```
#Radixir.Crypto.PublicKey.RSAPrivateKey<
fingerprint_sha256=7a:40:1c:b9:4b:b8:a5:bb:6b:98:b6:1b:8b:7a:24:8d:45:9b:e5:54
fc00:e968:6179::de52:7100:39:14:7b:b2>
```
"""
def inspect(data, _opts) do
fp_opts = [format: :sha256, colons: true]
fp_sha256_parts_doc =
Radixir.Crypto.PublicKey.RSAPrivateKey.get_fingerprint(data, fp_opts)
|> String.split(":")
|> fold_doc(fn doc, acc -> glue(doc, ":", acc) end)
fp_sha256_doc =
glue("fingerprint_sha256=", "", fp_sha256_parts_doc)
|> group()
|> nest(2)
glue("#Radixir.Crypto.PublicKey.RSAPrivateKey<", "", fp_sha256_doc)
|> concat(">")
|> nest(2)
end
end
# Helpers
# Generating a RSA key on OTP 20.0 results in a RSAPrivateKey with version 0, which is the internal number that matches to :"two-prime".
# Parsing this structure to PEM and then converting it back will yield a version not of 0, but of :"two-prime".
# This conversion ensures it is always the symbol.
defp maybe_convert_version_to_atom(0), do: :"two-prime"
defp maybe_convert_version_to_atom(version), do: version
end
|
lib/radixir/crypto/rsa_private_key.ex
| 0.7413 | 0.528351 |
rsa_private_key.ex
|
starcoder
|
defmodule Mix.Tasks.Surface.Init.Patches do
@moduledoc false
alias Mix.Tasks.Surface.Init.Patchers
# Common patches
def add_surface_live_reload_pattern_to_endpoint_config(context_app, web_module, web_path) do
%{
name: "Update patterns in :reload_patterns",
patch:
&Patchers.Phoenix.replace_live_reload_pattern_in_endpoint_config(
&1,
~s[~r"#{web_path}/(live|views)/.*(ex)$"],
~s[~r"#{web_path}/(live|views|components)/.*(ex|sface|js)$"],
"sface",
context_app,
web_module
),
instructions: """
Update the :reload_patterns entry to include surface-related files.
# Example
```
config :my_app, MyAppWeb.Endpoint,
live_reload: [
patterns: [
~r"lib/my_app_web/(live|views|components)/.*(ex|sface|js)$",
...
]
]
```
"""
}
end
def add_import_surface_to_view_macro(web_module) do
%{
name: "Add `import Surface` to view config",
patch: &Patchers.Phoenix.add_import_to_view_macro(&1, Surface, web_module),
instructions: """
In order to have `~F` available for any Phoenix view, you can import surface.
# Example
```elixir
def view do
quote do
...
import Surface
end
end
```
"""
}
end
# Formatter patches
def add_surface_inputs_to_formatter_config() do
%{
name: "Add file extensions to :surface_inputs",
patch: &Patchers.Formatter.add_config(&1, :surface_inputs, ~S(["{lib,test}/**/*.{ex,exs,sface}"])),
instructions: """
In case you'll be using `mix format`, make sure you add the required file patterns
to your `.formatter.exs` file.
# Example
```
[
surface_inputs: ["{lib,test}/**/*.{ex,exs,sface}"],
...
]
```
"""
}
end
def add_surface_to_import_deps_in_formatter_config() do
%{
name: "Add :surface to :import_deps",
patch: &Patchers.Formatter.add_import_dep(&1, ":surface"),
instructions: """
In case you'll be using `mix format`, make sure you add `:surface` to the `import_deps`
configuration in your `.formatter.exs` file.
# Example
```
[
import_deps: [:ecto, :phoenix, :surface],
...
]
```
"""
}
end
# Catalogue patches
def add_surface_catalogue_to_mix_deps() do
%{
name: "Add `surface_catalogue` dependency",
update_deps: [:surface_catalogue],
patch:
&Patchers.MixExs.add_dep(
&1,
":surface_catalogue",
~S(github: "surface-ui/surface_catalogue")
),
instructions: """
Add `surface_catalogue` to the list of dependencies in `mix.exs`.
# Example
```
def deps do
[
{:surface_catalogue, "~> 0.2.0"}
]
end
```
"""
}
end
def configure_catalogue_in_mix_exs() do
%{
name: "Configure `elixirc_paths` for the catalogue",
patch: [
&Patchers.MixExs.add_elixirc_paths_entry(&1, ":dev", ~S|["lib"] ++ catalogues()|, "catalogues()"),
&Patchers.MixExs.append_def(&1, "catalogues", """
[
"priv/catalogue"
]\
""")
],
instructions: """
If you want to access examples and playgrounds for components, edit your `mix.exs` file,
adding a new entry for `elixirc_paths` along with a `catalogues` function listing the
catalogues you want to be loaded.
# Example
```
defp elixirc_paths(:dev), do: ["lib"] ++ catalogues()
...
def catalogues do
[
"priv/catalogue"
]
end
"""
}
end
def configure_catalogue_route(web_module) do
%{
name: "Configure catalogue route",
patch: [
&Patchers.Phoenix.add_import_to_router(&1, Surface.Catalogue.Router, web_module),
&Patchers.Phoenix.append_route(&1, "/catalogue", web_module, """
if Mix.env() == :dev do
scope "/" do
pipe_through :browser
surface_catalogue "/catalogue"
end
end\
""")
],
instructions: """
Update your `router.ex` configuration so the catalogue can be available at `/catalogue`.
# Example
```
import Surface.Catalogue.Router
...
if Mix.env() == :dev do
scope "/" do
pipe_through :browser
surface_catalogue "/catalogue"
end
end
```
"""
}
end
def configure_demo_route(web_module) do
%{
name: "Configure demo route",
patch: &Patchers.Phoenix.append_route_to_main_scope(&1, ~S("/demo"), web_module, ~S(live "/demo", Demo)),
instructions: """
Update your `router.ex` configuration so the demo can be available at `/demo`.
# Example
```
scope "/", MyAppWeb do
pipe_through :browser
live "/demo", Demo
end
```
"""
}
end
def add_catalogue_live_reload_pattern_to_endpoint_config(context_app, web_module) do
%{
name: "Update patterns in :reload_patterns to reload catalogue files",
patch:
&Patchers.Phoenix.add_live_reload_pattern_to_endpoint_config(
&1,
~S|~r"priv/catalogue/.*(ex)$"|,
"catalogue",
context_app,
web_module
),
instructions: """
Update the :reload_patterns entry to include catalogue files.
# Example
```
config :my_app, MyAppWeb.Endpoint,
live_reload: [
patterns: [
~r"priv/catalogue/.*(ex)$"
...
]
]
```
"""
}
end
# ErrorTag patches
def config_error_tag(web_module) do
name = "Configure the ErrorTag component to use Gettext"
instructions = """
Set the `default_translator` option to the project's `ErrorHelpers.translate_error/1` function,
which should be using Gettext for translations.
# Example
```
config :surface, :components, [
...
{Surface.Components.Form.ErrorTag, default_translator: {MyAppWeb.ErrorHelpers, :translate_error}}
]
```
"""
patch =
&Patchers.Component.add_config(
&1,
"Surface.Components.Form.ErrorTag",
"default_translator: {#{inspect(web_module)}.ErrorHelpers, :translate_error}"
)
%{name: name, instructions: instructions, patch: patch}
end
# JS hooks patches
def add_surface_to_mix_compilers() do
%{
name: "Add :surface to compilers",
patch: &Patchers.MixExs.add_compiler(&1, ":surface"),
instructions: """
Append `:surface` to the list of compilers.
# Example
```
def project do
[
...
compilers: [:gettext] ++ Mix.compilers() ++ [:surface],
...
]
end
```
"""
}
end
def add_surface_to_reloadable_compilers_in_endpoint_config(context_app, web_module) do
%{
name: "Add :surface to :reloadable_compilers",
patch: &Patchers.Phoenix.add_reloadable_compiler_to_endpoint_config(&1, :surface, context_app, web_module),
instructions: """
Add :surface to the list of reloadable compilers.
# Example
```
config :my_app, MyAppWeb.Endpoint,
reloadable_compilers: [:phoenix, :elixir, :surface],
...
```
"""
}
end
def js_hooks() do
%{
name: "Configure components' JS hooks",
instructions: """
Import Surface components' hooks and pass them to `new LiveSocket(...)`.
# Example
```JS
import Hooks from "./_hooks"
let liveSocket = new LiveSocket("/live", Socket, { hooks: Hooks, ... })
```
""",
patch: [
&Patchers.JS.add_import(&1, ~S[import Hooks from "./_hooks"]),
&Patchers.JS.replace_line_text(
&1,
~S[let liveSocket = new LiveSocket("/live", Socket, {params: {_csrf_token: csrfToken}})],
~S[let liveSocket = new LiveSocket("/live", Socket, {params: {_csrf_token: csrfToken}, hooks: Hooks})]
)
]
}
end
def add_ignore_js_hooks_to_gitignore() do
%{
name: "Ignore generated JS hook files for components",
instructions: "",
patch:
&Patchers.Text.append_line(
&1,
"""
# Ignore generated js hook files for components
assets/js/_hooks/
""",
"assets/js/_hooks/"
)
}
end
end
|
lib/mix/tasks/surface/surface.init/patches.ex
| 0.860061 | 0.634119 |
patches.ex
|
starcoder
|
defmodule ExRabbitMQ do
@version Mix.Project.config()[:version]
|> Version.parse()
|> elem(1)
|> Map.take([:major, :minor])
|> (fn %{major: major, minor: minor} -> "#{major}.#{minor}" end).()
@moduledoc """
A project providing the following abstractions:
1. Connection lifecycle handling
2. Channel lifecycle handling
3. A consumer behaviour for consuming from a RabbitMQ queue
4. A producer behaviour for publishing messages to a RabbitMQ queue
The goal of the project are:
1. Make it unnecessary for the programmer to directly handle connections and channels
2. Reduce the boilerplate when creating new projects that interact with RabbitMQ
As such, hooks are provided to enable the programmer to handle message delivery, cancellation, acknowlegement,
rejection as well as publishing.
For more information on implementing a consumer, see the documentation of the `ExRabbitMQ.Consumer` behaviour.
For more information on implementing a producer, see the documentation of the `ExRabbitMQ.Producer` behaviour.
## Installation
1. Add `{:exrabbitmq, "~> #{@version}"}` ([https://hex.pm/packages/exrabbitmq](https://hex.pm/packages/exrabbitmq))
in your project's `deps` function in `mix.exs`
2. Run `mix deps.get` and `mix compile` in your project's root directory to download and compile the package
## Documentation
1. Run `mix deps.get`, `mix compile` and `mix docs` in `:exrabbitmq`'s root directory
2. Serve the `doc` folder in `:exrabbitmq`'s root directory with a web server
"""
alias ExRabbitMQ.Config.Utils, as: XRMQConfigUtils
# logging
defdelegate logging_set?(), to: XRMQConfigUtils
defdelegate enable_logging(), to: XRMQConfigUtils
defdelegate disable_logging(), to: XRMQConfigUtils
# accounting
defdelegate accounting_set?(), to: XRMQConfigUtils
defdelegate enable_accounting(), to: XRMQConfigUtils
defdelegate disable_accounting(), to: XRMQConfigUtils
# message buffering
defdelegate message_buffering_set?(), to: XRMQConfigUtils
defdelegate enable_message_buffering(), to: XRMQConfigUtils
defdelegate disable_message_buffering(), to: XRMQConfigUtils
# try_init interval
defdelegate get_try_init_interval(), to: XRMQConfigUtils
defdelegate set_try_init_interval(interval), to: XRMQConfigUtils
# KBs of messages seen so far threshold
defdelegate get_kb_of_messages_seen_so_far_threshold(), to: XRMQConfigUtils
defdelegate set_kb_of_messages_seen_so_far_threshold(threshold), to: XRMQConfigUtils
# continue_tuple_try_init
defdelegate continue_tuple_try_init(
connection_config,
session_config,
auto_consume,
continuation
),
to: XRMQConfigUtils
defdelegate continue_tuple_try_init(connection_config, session_config, continuation),
to: XRMQConfigUtils
end
|
lib/ex_rabbit_m_q.ex
| 0.808937 | 0.551996 |
ex_rabbit_m_q.ex
|
starcoder
|
defmodule TripPlan.Leg do
@moduledoc """
A single-mode part of an Itinerary
An Itinerary can take multiple modes of transportation (walk, bus,
train, &c). Leg represents a single mode of travel during journey.
"""
alias TripPlan.{PersonalDetail, TransitDetail, NamedPosition}
defstruct start: DateTime.from_unix!(-1),
stop: DateTime.from_unix!(0),
mode: nil,
from: nil,
to: nil,
name: nil,
long_name: nil,
type: nil,
description: nil,
url: nil,
polyline: ""
@type mode :: PersonalDetail.t() | TransitDetail.t()
@type t :: %__MODULE__{
start: DateTime.t(),
stop: DateTime.t(),
mode: mode,
from: NamedPosition.t() | nil,
to: NamedPosition.t(),
name: String.t(),
long_name: String.t(),
type: String.t(),
description: String.t(),
url: String.t(),
polyline: String.t()
}
@doc "Returns the route ID for the leg, if present"
@spec route_id(t) :: {:ok, Routes.Route.id_t()} | :error
def route_id(%__MODULE__{mode: %TransitDetail{route_id: route_id}}), do: {:ok, route_id}
def route_id(%__MODULE__{}), do: :error
@doc "Returns the trip ID for the leg, if present"
@spec trip_id(t) :: {:ok, Schedules.Trip.id_t()} | :error
def trip_id(%__MODULE__{mode: %TransitDetail{trip_id: trip_id}}), do: {:ok, trip_id}
def trip_id(%__MODULE__{}), do: :error
@spec route_trip_ids(t) :: {:ok, {Routes.Route.id_t(), Schedules.Trip.id_t()}} | :error
def route_trip_ids(%__MODULE__{mode: %TransitDetail{} = mode}) do
{:ok, {mode.route_id, mode.trip_id}}
end
def route_trip_ids(%__MODULE__{}) do
:error
end
@doc "Determines if this leg uses public transit"
@spec transit?(t) :: boolean
def transit?(%__MODULE__{mode: %PersonalDetail{}}), do: false
def transit?(%__MODULE__{mode: %TransitDetail{}}), do: true
@spec walking_distance(t) :: float
def walking_distance(%__MODULE__{mode: %PersonalDetail{distance: distance}}), do: distance
def walking_distance(%__MODULE__{mode: %TransitDetail{}}), do: 0.0
@doc "Returns the stop IDs for the leg"
@spec stop_ids(t) :: [Stops.Stop.id_t()]
def stop_ids(%__MODULE__{from: from, to: to}) do
for %NamedPosition{stop_id: stop_id} <- [from, to],
stop_id do
stop_id
end
end
@doc "Determines if two legs have the same to and from fields"
@spec same_leg?(t, t) :: boolean
def same_leg?(%__MODULE__{from: from, to: to}, %__MODULE__{from: from, to: to}), do: true
def same_leg?(_leg_1, _leg_2), do: false
@spec stop_is_silver_line_airport?([t], atom) :: boolean()
def stop_is_silver_line_airport?([], _), do: false
def stop_is_silver_line_airport?([leg], key) when not is_nil(leg) do
route_id = leg.mode.route_id
stop_id =
leg
|> Kernel.get_in([Access.key(key), Access.key(:stop_id)])
Fares.silver_line_airport_stop?(route_id, stop_id)
end
def stop_is_silver_line_airport?(_, _), do: false
# Fare calculation is not possible if the route is a commuter rail route and
# either from/to stop is missing zone information.
@spec is_fare_complete_transit_leg?(t) :: boolean
def is_fare_complete_transit_leg?(leg), do: transit?(leg) and not leg_missing_zone?(leg)
# Cannot compute fare for commuter rail route
# between stops where we don't know the zones
@spec leg_missing_zone?(t) :: boolean
defp leg_missing_zone?(%__MODULE__{
mode: %TransitDetail{route_id: route_id},
from: %NamedPosition{stop_id: origin_id},
to: %NamedPosition{stop_id: destination_id}
}) do
route = Routes.Repo.get(route_id)
Routes.Route.type_atom(route) == :commuter_rail and
not Enum.all?([origin_id, destination_id], &Stops.Stop.has_zone?(&1))
end
defp leg_missing_zone?(_), do: false
end
|
apps/trip_plan/lib/trip_plan/leg.ex
| 0.877929 | 0.466116 |
leg.ex
|
starcoder
|
defmodule NashvilleZoneLookup.Zoning.LandUse do
@moduledoc ~S"""
An arrangement, activity, or input that might be undertaken on a property
For example, a Land Use might be a class of business ("Bed and breakfast
inn"), an agricultural activity ("Domestic hens"), or an institution
("Correctional facility").
A Land Use has a `:name` that is unique. Land Uses also
have a `:category`, such as "Residential" or "Industrial" that can be used
to group similar Land Uses.
"""
use Ecto.Schema
import Ecto.Changeset
alias NashvilleZoneLookup.Zoning.LandUse
# Private Constants
# These categories were manually copied from rows ending in "Uses" in
# https://docs.google.com/spreadsheets/d/1O0Qc8nErSbstCiWpbpRQ0tPMS0NukCmcov2-s_u8Umg/edit#gid=1126820804
@category_residential "Residential"
@category_institutional "Institutional"
@category_educational "Educational"
@category_office "Office"
@category_medical "Medical"
@category_commercial "Commercial"
@category_communication "Communication"
@category_industrial "Industrial"
@category_transportation "Transportation"
@category_utility "Utility"
@category_waste_management "Waste Management"
@category_recreation_and_entertainment "Recreation and Entertainment"
@category_other "Other"
@categories [
@category_residential,
@category_institutional,
@category_educational,
@category_office,
@category_medical,
@category_commercial,
@category_communication,
@category_industrial,
@category_transportation,
@category_utility,
@category_waste_management,
@category_recreation_and_entertainment,
@category_other
]
schema "land_uses" do
field(:category, :string)
field(:name, :string)
timestamps()
end
@doc false
def changeset(%LandUse{} = land_use, attrs) do
land_use
|> cast(attrs, [:category, :name])
|> validate_required([:category, :name])
|> validate_inclusion(:category, @categories)
|> unique_constraint(:name)
end
# Public Constants
def categories, do: @categories
def category_residential, do: @category_residential
def category_institutional, do: @category_institutional
def category_educational, do: @category_educational
def category_office, do: @category_office
def category_medical, do: @category_medical
def category_commercial, do: @category_commercial
def category_communication, do: @category_communication
def category_industrial, do: @category_industrial
def category_transportation, do: @category_transportation
def category_utility, do: @category_utility
def category_waste_management, do: @category_waste_management
def category_recreation_and_entertainment, do: @category_recreation_and_entertainment
def category_other, do: @category_other
end
|
lib/nashville_zone_lookup/zoning/land_use.ex
| 0.666497 | 0.46041 |
land_use.ex
|
starcoder
|
defmodule Duration.Parser do
@moduledoc false
@doc """
Parses the given `binary` as parse.
Returns `{:ok, [token], rest, context, position, byte_offset}` or
`{:error, reason, rest, context, line, byte_offset}` where `position`
describes the location of the parse (start position) as `{line, column_on_line}`.
## Options
* `:line` - the initial line, defaults to 1
* `:byte_offset` - the initial byte offset, defaults to 0
* `:context` - the initial context value. It will be converted
to a map
"""
@spec parse(binary, keyword) ::
{:ok, [term], rest, context, line, byte_offset}
| {:error, reason, rest, context, line, byte_offset}
when line: {pos_integer, byte_offset},
byte_offset: pos_integer,
rest: binary,
reason: String.t(),
context: map()
def parse(binary, opts \\ []) when is_binary(binary) do
line = Keyword.get(opts, :line, 1)
offset = Keyword.get(opts, :byte_offset, 0)
context = Map.new(Keyword.get(opts, :context, []))
case(parse__0(binary, [], [], context, {line, offset}, offset)) do
{:ok, acc, rest, context, line, offset} ->
{:ok, :lists.reverse(acc), rest, context, line, offset}
{:error, _, _, _, _, _} = error ->
error
end
end
defp parse__0(rest, acc, stack, context, line, offset) do
parse__8(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp parse__2(rest, acc, stack, context, line, offset) do
case(datetime_3__0(rest, acc, [], context, line, offset)) do
{:ok, acc, rest, context, line, offset} ->
parse__3(rest, acc, stack, context, line, offset)
{:error, _, _, _, _, _} = error ->
error
end
end
defp parse__3(rest, acc, [_, previous_acc | stack], context, line, offset) do
parse__1(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp parse__4(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
parse__2(rest, [], stack, context, line, offset)
end
defp parse__5(rest, acc, stack, context, line, offset) do
case(datetime_2__0(rest, acc, [], context, line, offset)) do
{:ok, acc, rest, context, line, offset} ->
parse__6(rest, acc, stack, context, line, offset)
{:error, _, _, _, _, _} ->
parse__4(rest, acc, stack, context, line, offset)
end
end
defp parse__6(rest, acc, [_, previous_acc | stack], context, line, offset) do
parse__1(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp parse__7(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
parse__5(rest, [], stack, context, line, offset)
end
defp parse__8(rest, acc, stack, context, line, offset) do
case(datetime_1__0(rest, acc, [], context, line, offset)) do
{:ok, acc, rest, context, line, offset} ->
parse__9(rest, acc, stack, context, line, offset)
{:error, _, _, _, _, _} ->
parse__7(rest, acc, stack, context, line, offset)
end
end
defp parse__9(rest, acc, [_, previous_acc | stack], context, line, offset) do
parse__1(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp parse__1(rest, acc, _stack, context, line, offset) do
{:ok, acc, rest, context, line, offset}
end
@doc """
Parses the given `binary` as datetime_3.
Returns `{:ok, [token], rest, context, position, byte_offset}` or
`{:error, reason, rest, context, line, byte_offset}` where `position`
describes the location of the datetime_3 (start position) as `{line, column_on_line}`.
## Options
* `:line` - the initial line, defaults to 1
* `:byte_offset` - the initial byte offset, defaults to 0
* `:context` - the initial context value. It will be converted
to a map
"""
@spec datetime_3(binary, keyword) ::
{:ok, [term], rest, context, line, byte_offset}
| {:error, reason, rest, context, line, byte_offset}
when line: {pos_integer, byte_offset},
byte_offset: pos_integer,
rest: binary,
reason: String.t(),
context: map()
def datetime_3(binary, opts \\ []) when is_binary(binary) do
line = Keyword.get(opts, :line, 1)
offset = Keyword.get(opts, :byte_offset, 0)
context = Map.new(Keyword.get(opts, :context, []))
case(datetime_3__0(binary, [], [], context, {line, offset}, offset)) do
{:ok, acc, rest, context, line, offset} ->
{:ok, :lists.reverse(acc), rest, context, line, offset}
{:error, _, _, _, _, _} = error ->
error
end
end
defp datetime_3__0(<<"P", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
datetime_3__1(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp datetime_3__0(rest, _acc, _stack, context, line, offset) do
{:error, "expected string \"P\"", rest, context, line, offset}
end
defp datetime_3__1(rest, acc, stack, context, line, offset) do
case(date_3__0(rest, acc, [], context, line, offset)) do
{:ok, acc, rest, context, line, offset} ->
datetime_3__2(rest, acc, stack, context, line, offset)
{:error, _, _, _, _, _} = error ->
error
end
end
defp datetime_3__2(rest, acc, stack, context, line, offset) do
datetime_3__6(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp datetime_3__4(rest, acc, [_, previous_acc | stack], context, line, offset) do
datetime_3__3(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp datetime_3__5(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
datetime_3__4(rest, [], stack, context, line, offset)
end
defp datetime_3__6(<<"T", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
datetime_3__7(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp datetime_3__6(rest, acc, stack, context, line, offset) do
datetime_3__5(rest, acc, stack, context, line, offset)
end
defp datetime_3__7(rest, acc, stack, context, line, offset) do
case(time_3__0(rest, acc, [], context, line, offset)) do
{:ok, acc, rest, context, line, offset} ->
datetime_3__8(rest, acc, stack, context, line, offset)
{:error, _, _, _, _, _} ->
datetime_3__5(rest, acc, stack, context, line, offset)
end
end
defp datetime_3__8(rest, acc, [_, previous_acc | stack], context, line, offset) do
datetime_3__3(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp datetime_3__3(<<""::binary>>, acc, stack, context, comb__line, comb__offset) do
datetime_3__9("", [] ++ acc, stack, context, comb__line, comb__offset)
end
defp datetime_3__3(rest, _acc, _stack, context, line, offset) do
{:error, "expected end of string", rest, context, line, offset}
end
defp datetime_3__9(rest, acc, _stack, context, line, offset) do
{:ok, acc, rest, context, line, offset}
end
@doc """
Parses the given `binary` as datetime_2.
Returns `{:ok, [token], rest, context, position, byte_offset}` or
`{:error, reason, rest, context, line, byte_offset}` where `position`
describes the location of the datetime_2 (start position) as `{line, column_on_line}`.
## Options
* `:line` - the initial line, defaults to 1
* `:byte_offset` - the initial byte offset, defaults to 0
* `:context` - the initial context value. It will be converted
to a map
"""
@spec datetime_2(binary, keyword) ::
{:ok, [term], rest, context, line, byte_offset}
| {:error, reason, rest, context, line, byte_offset}
when line: {pos_integer, byte_offset},
byte_offset: pos_integer,
rest: binary,
reason: String.t(),
context: map()
def datetime_2(binary, opts \\ []) when is_binary(binary) do
line = Keyword.get(opts, :line, 1)
offset = Keyword.get(opts, :byte_offset, 0)
context = Map.new(Keyword.get(opts, :context, []))
case(datetime_2__0(binary, [], [], context, {line, offset}, offset)) do
{:ok, acc, rest, context, line, offset} ->
{:ok, :lists.reverse(acc), rest, context, line, offset}
{:error, _, _, _, _, _} = error ->
error
end
end
defp datetime_2__0(<<"P", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
datetime_2__1(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp datetime_2__0(rest, _acc, _stack, context, line, offset) do
{:error, "expected string \"P\"", rest, context, line, offset}
end
defp datetime_2__1(rest, acc, stack, context, line, offset) do
case(date_2__0(rest, acc, [], context, line, offset)) do
{:ok, acc, rest, context, line, offset} ->
datetime_2__2(rest, acc, stack, context, line, offset)
{:error, _, _, _, _, _} = error ->
error
end
end
defp datetime_2__2(<<"T", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
datetime_2__3(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp datetime_2__2(rest, _acc, _stack, context, line, offset) do
{:error, "expected string \"T\"", rest, context, line, offset}
end
defp datetime_2__3(rest, acc, stack, context, line, offset) do
case(time_2__0(rest, acc, [], context, line, offset)) do
{:ok, acc, rest, context, line, offset} ->
datetime_2__4(rest, acc, stack, context, line, offset)
{:error, _, _, _, _, _} = error ->
error
end
end
defp datetime_2__4(<<""::binary>>, acc, stack, context, comb__line, comb__offset) do
datetime_2__5("", [] ++ acc, stack, context, comb__line, comb__offset)
end
defp datetime_2__4(rest, _acc, _stack, context, line, offset) do
{:error, "expected end of string", rest, context, line, offset}
end
defp datetime_2__5(rest, acc, _stack, context, line, offset) do
{:ok, acc, rest, context, line, offset}
end
@doc """
Parses the given `binary` as datetime_1.
Returns `{:ok, [token], rest, context, position, byte_offset}` or
`{:error, reason, rest, context, line, byte_offset}` where `position`
describes the location of the datetime_1 (start position) as `{line, column_on_line}`.
## Options
* `:line` - the initial line, defaults to 1
* `:byte_offset` - the initial byte offset, defaults to 0
* `:context` - the initial context value. It will be converted
to a map
"""
@spec datetime_1(binary, keyword) ::
{:ok, [term], rest, context, line, byte_offset}
| {:error, reason, rest, context, line, byte_offset}
when line: {pos_integer, byte_offset},
byte_offset: pos_integer,
rest: binary,
reason: String.t(),
context: map()
def datetime_1(binary, opts \\ []) when is_binary(binary) do
line = Keyword.get(opts, :line, 1)
offset = Keyword.get(opts, :byte_offset, 0)
context = Map.new(Keyword.get(opts, :context, []))
case(datetime_1__0(binary, [], [], context, {line, offset}, offset)) do
{:ok, acc, rest, context, line, offset} ->
{:ok, :lists.reverse(acc), rest, context, line, offset}
{:error, _, _, _, _, _} = error ->
error
end
end
defp datetime_1__0(<<"P", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
datetime_1__1(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp datetime_1__0(rest, _acc, _stack, context, line, offset) do
{:error, "expected string \"P\"", rest, context, line, offset}
end
defp datetime_1__1(rest, acc, stack, context, line, offset) do
case(date_1__0(rest, acc, [], context, line, offset)) do
{:ok, acc, rest, context, line, offset} ->
datetime_1__2(rest, acc, stack, context, line, offset)
{:error, _, _, _, _, _} = error ->
error
end
end
defp datetime_1__2(<<"T", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
datetime_1__3(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp datetime_1__2(rest, _acc, _stack, context, line, offset) do
{:error, "expected string \"T\"", rest, context, line, offset}
end
defp datetime_1__3(rest, acc, stack, context, line, offset) do
case(time_1__0(rest, acc, [], context, line, offset)) do
{:ok, acc, rest, context, line, offset} ->
datetime_1__4(rest, acc, stack, context, line, offset)
{:error, _, _, _, _, _} = error ->
error
end
end
defp datetime_1__4(<<""::binary>>, acc, stack, context, comb__line, comb__offset) do
datetime_1__5("", [] ++ acc, stack, context, comb__line, comb__offset)
end
defp datetime_1__4(rest, _acc, _stack, context, line, offset) do
{:error, "expected end of string", rest, context, line, offset}
end
defp datetime_1__5(rest, acc, _stack, context, line, offset) do
{:ok, acc, rest, context, line, offset}
end
@doc """
Parses the given `binary` as time_3.
Returns `{:ok, [token], rest, context, position, byte_offset}` or
`{:error, reason, rest, context, line, byte_offset}` where `position`
describes the location of the time_3 (start position) as `{line, column_on_line}`.
## Options
* `:line` - the initial line, defaults to 1
* `:byte_offset` - the initial byte offset, defaults to 0
* `:context` - the initial context value. It will be converted
to a map
"""
@spec time_3(binary, keyword) ::
{:ok, [term], rest, context, line, byte_offset}
| {:error, reason, rest, context, line, byte_offset}
when line: {pos_integer, byte_offset},
byte_offset: pos_integer,
rest: binary,
reason: String.t(),
context: map()
def time_3(binary, opts \\ []) when is_binary(binary) do
line = Keyword.get(opts, :line, 1)
offset = Keyword.get(opts, :byte_offset, 0)
context = Map.new(Keyword.get(opts, :context, []))
case(time_3__0(binary, [], [], context, {line, offset}, offset)) do
{:ok, acc, rest, context, line, offset} ->
{:ok, :lists.reverse(acc), rest, context, line, offset}
{:error, _, _, _, _, _} = error ->
error
end
end
defp time_3__0(rest, acc, stack, context, line, offset) do
time_3__4(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp time_3__2(rest, acc, [_, previous_acc | stack], context, line, offset) do
time_3__1(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp time_3__3(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
time_3__2(rest, [], stack, context, line, offset)
end
defp time_3__4(rest, acc, stack, context, line, offset) do
time_3__5(rest, [], [acc | stack], context, line, offset)
end
defp time_3__5(rest, acc, stack, context, line, offset) do
time_3__6(rest, [], [acc | stack], context, line, offset)
end
defp time_3__6(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
time_3__7(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp time_3__6(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
time_3__3(rest, acc, stack, context, line, offset)
end
defp time_3__7(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
time_3__9(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp time_3__7(rest, acc, stack, context, line, offset) do
time_3__8(rest, acc, stack, context, line, offset)
end
defp time_3__9(rest, acc, stack, context, line, offset) do
time_3__7(rest, acc, stack, context, line, offset)
end
defp time_3__8(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
time_3__10(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp time_3__10(<<"H", _::binary>> = rest, acc, stack, context, line, offset) do
time_3__11(rest, acc, stack, context, line, offset)
end
defp time_3__10(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
time_3__3(rest, acc, stack, context, line, offset)
end
defp time_3__11(<<"H", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
time_3__12(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp time_3__11(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
time_3__3(rest, acc, stack, context, line, offset)
end
defp time_3__12(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
time_3__13(
rest,
[
hours:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp time_3__13(rest, acc, [_, previous_acc | stack], context, line, offset) do
time_3__1(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp time_3__1(rest, acc, stack, context, line, offset) do
time_3__17(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp time_3__15(rest, acc, [_, previous_acc | stack], context, line, offset) do
time_3__14(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp time_3__16(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
time_3__15(rest, [], stack, context, line, offset)
end
defp time_3__17(rest, acc, stack, context, line, offset) do
time_3__18(rest, [], [acc | stack], context, line, offset)
end
defp time_3__18(rest, acc, stack, context, line, offset) do
time_3__19(rest, [], [acc | stack], context, line, offset)
end
defp time_3__19(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
time_3__20(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp time_3__19(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
time_3__16(rest, acc, stack, context, line, offset)
end
defp time_3__20(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
time_3__22(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp time_3__20(rest, acc, stack, context, line, offset) do
time_3__21(rest, acc, stack, context, line, offset)
end
defp time_3__22(rest, acc, stack, context, line, offset) do
time_3__20(rest, acc, stack, context, line, offset)
end
defp time_3__21(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
time_3__23(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp time_3__23(<<"M", _::binary>> = rest, acc, stack, context, line, offset) do
time_3__24(rest, acc, stack, context, line, offset)
end
defp time_3__23(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
time_3__16(rest, acc, stack, context, line, offset)
end
defp time_3__24(<<"M", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
time_3__25(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp time_3__24(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
time_3__16(rest, acc, stack, context, line, offset)
end
defp time_3__25(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
time_3__26(
rest,
[
minutes:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp time_3__26(rest, acc, [_, previous_acc | stack], context, line, offset) do
time_3__14(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp time_3__14(rest, acc, stack, context, line, offset) do
time_3__30(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp time_3__28(rest, acc, [_, previous_acc | stack], context, line, offset) do
time_3__27(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp time_3__29(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
time_3__28(rest, [], stack, context, line, offset)
end
defp time_3__30(rest, acc, stack, context, line, offset) do
time_3__31(rest, [], [acc | stack], context, line, offset)
end
defp time_3__31(rest, acc, stack, context, line, offset) do
time_3__32(rest, [], [acc | stack], context, line, offset)
end
defp time_3__32(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
time_3__33(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp time_3__32(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
time_3__29(rest, acc, stack, context, line, offset)
end
defp time_3__33(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
time_3__35(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp time_3__33(rest, acc, stack, context, line, offset) do
time_3__34(rest, acc, stack, context, line, offset)
end
defp time_3__35(rest, acc, stack, context, line, offset) do
time_3__33(rest, acc, stack, context, line, offset)
end
defp time_3__34(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
time_3__36(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp time_3__36(<<"S", _::binary>> = rest, acc, stack, context, line, offset) do
time_3__37(rest, acc, stack, context, line, offset)
end
defp time_3__36(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
time_3__29(rest, acc, stack, context, line, offset)
end
defp time_3__37(<<"S", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
time_3__38(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp time_3__37(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
time_3__29(rest, acc, stack, context, line, offset)
end
defp time_3__38(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
time_3__39(
rest,
[
seconds:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp time_3__39(rest, acc, [_, previous_acc | stack], context, line, offset) do
time_3__27(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp time_3__27(rest, acc, _stack, context, line, offset) do
{:ok, acc, rest, context, line, offset}
end
@doc """
Parses the given `binary` as time_2.
Returns `{:ok, [token], rest, context, position, byte_offset}` or
`{:error, reason, rest, context, line, byte_offset}` where `position`
describes the location of the time_2 (start position) as `{line, column_on_line}`.
## Options
* `:line` - the initial line, defaults to 1
* `:byte_offset` - the initial byte offset, defaults to 0
* `:context` - the initial context value. It will be converted
to a map
"""
@spec time_2(binary, keyword) ::
{:ok, [term], rest, context, line, byte_offset}
| {:error, reason, rest, context, line, byte_offset}
when line: {pos_integer, byte_offset},
byte_offset: pos_integer,
rest: binary,
reason: String.t(),
context: map()
def time_2(binary, opts \\ []) when is_binary(binary) do
line = Keyword.get(opts, :line, 1)
offset = Keyword.get(opts, :byte_offset, 0)
context = Map.new(Keyword.get(opts, :context, []))
case(time_2__0(binary, [], [], context, {line, offset}, offset)) do
{:ok, acc, rest, context, line, offset} ->
{:ok, :lists.reverse(acc), rest, context, line, offset}
{:error, _, _, _, _, _} = error ->
error
end
end
defp time_2__0(rest, acc, stack, context, line, offset) do
time_2__1(rest, [], [acc | stack], context, line, offset)
end
defp time_2__1(rest, acc, stack, context, line, offset) do
time_2__2(rest, [], [acc | stack], context, line, offset)
end
defp time_2__2(
<<x0::integer, x1::integer, rest::binary>>,
acc,
stack,
context,
comb__line,
comb__offset
)
when x0 >= 48 and x0 <= 57 and (x1 >= 48 and x1 <= 57) do
time_2__3(
rest,
[x1 - 48 + (x0 - 48) * 10] ++ acc,
stack,
context,
comb__line,
comb__offset + 2
)
end
defp time_2__2(rest, _acc, _stack, context, line, offset) do
{:error, "expected 2 digits", rest, context, line, offset}
end
defp time_2__3(rest, user_acc, [acc | stack], context, line, offset) do
case(validate_moduli(rest, user_acc, context, line, offset, 24)) do
{user_acc, context} when is_list(user_acc) ->
time_2__4(rest, user_acc ++ acc, stack, context, line, offset)
{:error, reason} ->
{:error, reason, rest, context, line, offset}
end
end
defp time_2__4(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
time_2__5(
rest,
[
hours:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp time_2__5(<<":", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
time_2__6(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp time_2__5(rest, _acc, _stack, context, line, offset) do
{:error, "expected colon", rest, context, line, offset}
end
defp time_2__6(rest, acc, stack, context, line, offset) do
time_2__7(rest, [], [acc | stack], context, line, offset)
end
defp time_2__7(rest, acc, stack, context, line, offset) do
time_2__8(rest, [], [acc | stack], context, line, offset)
end
defp time_2__8(
<<x0::integer, x1::integer, rest::binary>>,
acc,
stack,
context,
comb__line,
comb__offset
)
when x0 >= 48 and x0 <= 57 and (x1 >= 48 and x1 <= 57) do
time_2__9(
rest,
[x1 - 48 + (x0 - 48) * 10] ++ acc,
stack,
context,
comb__line,
comb__offset + 2
)
end
defp time_2__8(rest, _acc, _stack, context, line, offset) do
{:error, "expected 2 digits", rest, context, line, offset}
end
defp time_2__9(rest, user_acc, [acc | stack], context, line, offset) do
case(validate_moduli(rest, user_acc, context, line, offset, 60)) do
{user_acc, context} when is_list(user_acc) ->
time_2__10(rest, user_acc ++ acc, stack, context, line, offset)
{:error, reason} ->
{:error, reason, rest, context, line, offset}
end
end
defp time_2__10(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
time_2__11(
rest,
[
minutes:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp time_2__11(<<":", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
time_2__12(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp time_2__11(rest, _acc, _stack, context, line, offset) do
{:error, "expected colon", rest, context, line, offset}
end
defp time_2__12(rest, acc, stack, context, line, offset) do
time_2__13(rest, [], [acc | stack], context, line, offset)
end
defp time_2__13(rest, acc, stack, context, line, offset) do
time_2__14(rest, [], [acc | stack], context, line, offset)
end
defp time_2__14(
<<x0::integer, x1::integer, rest::binary>>,
acc,
stack,
context,
comb__line,
comb__offset
)
when x0 >= 48 and x0 <= 57 and (x1 >= 48 and x1 <= 57) do
time_2__15(
rest,
[x1 - 48 + (x0 - 48) * 10] ++ acc,
stack,
context,
comb__line,
comb__offset + 2
)
end
defp time_2__14(rest, _acc, _stack, context, line, offset) do
{:error, "expected 2 digits", rest, context, line, offset}
end
defp time_2__15(rest, user_acc, [acc | stack], context, line, offset) do
case(validate_moduli(rest, user_acc, context, line, offset, 60)) do
{user_acc, context} when is_list(user_acc) ->
time_2__16(rest, user_acc ++ acc, stack, context, line, offset)
{:error, reason} ->
{:error, reason, rest, context, line, offset}
end
end
defp time_2__16(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
time_2__17(
rest,
[
seconds:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp time_2__17(rest, acc, _stack, context, line, offset) do
{:ok, acc, rest, context, line, offset}
end
@doc """
Parses the given `binary` as time_1.
Returns `{:ok, [token], rest, context, position, byte_offset}` or
`{:error, reason, rest, context, line, byte_offset}` where `position`
describes the location of the time_1 (start position) as `{line, column_on_line}`.
## Options
* `:line` - the initial line, defaults to 1
* `:byte_offset` - the initial byte offset, defaults to 0
* `:context` - the initial context value. It will be converted
to a map
"""
@spec time_1(binary, keyword) ::
{:ok, [term], rest, context, line, byte_offset}
| {:error, reason, rest, context, line, byte_offset}
when line: {pos_integer, byte_offset},
byte_offset: pos_integer,
rest: binary,
reason: String.t(),
context: map()
def time_1(binary, opts \\ []) when is_binary(binary) do
line = Keyword.get(opts, :line, 1)
offset = Keyword.get(opts, :byte_offset, 0)
context = Map.new(Keyword.get(opts, :context, []))
case(time_1__0(binary, [], [], context, {line, offset}, offset)) do
{:ok, acc, rest, context, line, offset} ->
{:ok, :lists.reverse(acc), rest, context, line, offset}
{:error, _, _, _, _, _} = error ->
error
end
end
defp time_1__0(rest, acc, stack, context, line, offset) do
time_1__1(rest, [], [acc | stack], context, line, offset)
end
defp time_1__1(rest, acc, stack, context, line, offset) do
time_1__2(rest, [], [acc | stack], context, line, offset)
end
defp time_1__2(
<<x0::integer, x1::integer, rest::binary>>,
acc,
stack,
context,
comb__line,
comb__offset
)
when x0 >= 48 and x0 <= 57 and (x1 >= 48 and x1 <= 57) do
time_1__3(
rest,
[x1 - 48 + (x0 - 48) * 10] ++ acc,
stack,
context,
comb__line,
comb__offset + 2
)
end
defp time_1__2(rest, _acc, _stack, context, line, offset) do
{:error, "expected 2 digits", rest, context, line, offset}
end
defp time_1__3(rest, user_acc, [acc | stack], context, line, offset) do
case(validate_moduli(rest, user_acc, context, line, offset, 24)) do
{user_acc, context} when is_list(user_acc) ->
time_1__4(rest, user_acc ++ acc, stack, context, line, offset)
{:error, reason} ->
{:error, reason, rest, context, line, offset}
end
end
defp time_1__4(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
time_1__5(
rest,
[
hours:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp time_1__5(rest, acc, stack, context, line, offset) do
time_1__6(rest, [], [acc | stack], context, line, offset)
end
defp time_1__6(rest, acc, stack, context, line, offset) do
time_1__7(rest, [], [acc | stack], context, line, offset)
end
defp time_1__7(
<<x0::integer, x1::integer, rest::binary>>,
acc,
stack,
context,
comb__line,
comb__offset
)
when x0 >= 48 and x0 <= 57 and (x1 >= 48 and x1 <= 57) do
time_1__8(
rest,
[x1 - 48 + (x0 - 48) * 10] ++ acc,
stack,
context,
comb__line,
comb__offset + 2
)
end
defp time_1__7(rest, _acc, _stack, context, line, offset) do
{:error, "expected 2 digits", rest, context, line, offset}
end
defp time_1__8(rest, user_acc, [acc | stack], context, line, offset) do
case(validate_moduli(rest, user_acc, context, line, offset, 60)) do
{user_acc, context} when is_list(user_acc) ->
time_1__9(rest, user_acc ++ acc, stack, context, line, offset)
{:error, reason} ->
{:error, reason, rest, context, line, offset}
end
end
defp time_1__9(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
time_1__10(
rest,
[
minutes:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp time_1__10(rest, acc, stack, context, line, offset) do
time_1__11(rest, [], [acc | stack], context, line, offset)
end
defp time_1__11(rest, acc, stack, context, line, offset) do
time_1__12(rest, [], [acc | stack], context, line, offset)
end
defp time_1__12(
<<x0::integer, x1::integer, rest::binary>>,
acc,
stack,
context,
comb__line,
comb__offset
)
when x0 >= 48 and x0 <= 57 and (x1 >= 48 and x1 <= 57) do
time_1__13(
rest,
[x1 - 48 + (x0 - 48) * 10] ++ acc,
stack,
context,
comb__line,
comb__offset + 2
)
end
defp time_1__12(rest, _acc, _stack, context, line, offset) do
{:error, "expected 2 digits", rest, context, line, offset}
end
defp time_1__13(rest, user_acc, [acc | stack], context, line, offset) do
case(validate_moduli(rest, user_acc, context, line, offset, 60)) do
{user_acc, context} when is_list(user_acc) ->
time_1__14(rest, user_acc ++ acc, stack, context, line, offset)
{:error, reason} ->
{:error, reason, rest, context, line, offset}
end
end
defp time_1__14(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
time_1__15(
rest,
[
seconds:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp time_1__15(rest, acc, _stack, context, line, offset) do
{:ok, acc, rest, context, line, offset}
end
@doc """
Parses the given `binary` as date_3.
Returns `{:ok, [token], rest, context, position, byte_offset}` or
`{:error, reason, rest, context, line, byte_offset}` where `position`
describes the location of the date_3 (start position) as `{line, column_on_line}`.
## Options
* `:line` - the initial line, defaults to 1
* `:byte_offset` - the initial byte offset, defaults to 0
* `:context` - the initial context value. It will be converted
to a map
"""
@spec date_3(binary, keyword) ::
{:ok, [term], rest, context, line, byte_offset}
| {:error, reason, rest, context, line, byte_offset}
when line: {pos_integer, byte_offset},
byte_offset: pos_integer,
rest: binary,
reason: String.t(),
context: map()
def date_3(binary, opts \\ []) when is_binary(binary) do
line = Keyword.get(opts, :line, 1)
offset = Keyword.get(opts, :byte_offset, 0)
context = Map.new(Keyword.get(opts, :context, []))
case(date_3__0(binary, [], [], context, {line, offset}, offset)) do
{:ok, acc, rest, context, line, offset} ->
{:ok, :lists.reverse(acc), rest, context, line, offset}
{:error, _, _, _, _, _} = error ->
error
end
end
defp date_3__0(rest, acc, stack, context, line, offset) do
date_3__4(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp date_3__2(rest, acc, [_, previous_acc | stack], context, line, offset) do
date_3__1(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp date_3__3(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
date_3__2(rest, [], stack, context, line, offset)
end
defp date_3__4(rest, acc, stack, context, line, offset) do
date_3__5(rest, [], [acc | stack], context, line, offset)
end
defp date_3__5(rest, acc, stack, context, line, offset) do
date_3__6(rest, [], [acc | stack], context, line, offset)
end
defp date_3__6(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
date_3__7(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp date_3__6(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
date_3__3(rest, acc, stack, context, line, offset)
end
defp date_3__7(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
date_3__9(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp date_3__7(rest, acc, stack, context, line, offset) do
date_3__8(rest, acc, stack, context, line, offset)
end
defp date_3__9(rest, acc, stack, context, line, offset) do
date_3__7(rest, acc, stack, context, line, offset)
end
defp date_3__8(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
date_3__10(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp date_3__10(<<"Y", _::binary>> = rest, acc, stack, context, line, offset) do
date_3__11(rest, acc, stack, context, line, offset)
end
defp date_3__10(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
date_3__3(rest, acc, stack, context, line, offset)
end
defp date_3__11(<<"Y", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
date_3__12(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp date_3__11(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
date_3__3(rest, acc, stack, context, line, offset)
end
defp date_3__12(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
date_3__13(
rest,
[
years:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp date_3__13(rest, acc, [_, previous_acc | stack], context, line, offset) do
date_3__1(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp date_3__1(rest, acc, stack, context, line, offset) do
date_3__17(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp date_3__15(rest, acc, [_, previous_acc | stack], context, line, offset) do
date_3__14(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp date_3__16(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
date_3__15(rest, [], stack, context, line, offset)
end
defp date_3__17(rest, acc, stack, context, line, offset) do
date_3__18(rest, [], [acc | stack], context, line, offset)
end
defp date_3__18(rest, acc, stack, context, line, offset) do
date_3__19(rest, [], [acc | stack], context, line, offset)
end
defp date_3__19(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
date_3__20(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp date_3__19(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
date_3__16(rest, acc, stack, context, line, offset)
end
defp date_3__20(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
date_3__22(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp date_3__20(rest, acc, stack, context, line, offset) do
date_3__21(rest, acc, stack, context, line, offset)
end
defp date_3__22(rest, acc, stack, context, line, offset) do
date_3__20(rest, acc, stack, context, line, offset)
end
defp date_3__21(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
date_3__23(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp date_3__23(<<"M", _::binary>> = rest, acc, stack, context, line, offset) do
date_3__24(rest, acc, stack, context, line, offset)
end
defp date_3__23(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
date_3__16(rest, acc, stack, context, line, offset)
end
defp date_3__24(<<"M", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
date_3__25(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp date_3__24(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
date_3__16(rest, acc, stack, context, line, offset)
end
defp date_3__25(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
date_3__26(
rest,
[
months:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp date_3__26(rest, acc, [_, previous_acc | stack], context, line, offset) do
date_3__14(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp date_3__14(rest, acc, stack, context, line, offset) do
date_3__30(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp date_3__28(rest, acc, [_, previous_acc | stack], context, line, offset) do
date_3__27(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp date_3__29(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
date_3__28(rest, [], stack, context, line, offset)
end
defp date_3__30(rest, acc, stack, context, line, offset) do
date_3__31(rest, [], [acc | stack], context, line, offset)
end
defp date_3__31(rest, acc, stack, context, line, offset) do
date_3__32(rest, [], [acc | stack], context, line, offset)
end
defp date_3__32(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
date_3__33(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp date_3__32(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
date_3__29(rest, acc, stack, context, line, offset)
end
defp date_3__33(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
date_3__35(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp date_3__33(rest, acc, stack, context, line, offset) do
date_3__34(rest, acc, stack, context, line, offset)
end
defp date_3__35(rest, acc, stack, context, line, offset) do
date_3__33(rest, acc, stack, context, line, offset)
end
defp date_3__34(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
date_3__36(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp date_3__36(<<"D", _::binary>> = rest, acc, stack, context, line, offset) do
date_3__37(rest, acc, stack, context, line, offset)
end
defp date_3__36(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
date_3__29(rest, acc, stack, context, line, offset)
end
defp date_3__37(<<"D", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
date_3__38(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp date_3__37(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
date_3__29(rest, acc, stack, context, line, offset)
end
defp date_3__38(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
date_3__39(
rest,
[
days:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp date_3__39(rest, acc, [_, previous_acc | stack], context, line, offset) do
date_3__27(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp date_3__27(rest, acc, _stack, context, line, offset) do
{:ok, acc, rest, context, line, offset}
end
@doc """
Parses the given `binary` as date_2.
Returns `{:ok, [token], rest, context, position, byte_offset}` or
`{:error, reason, rest, context, line, byte_offset}` where `position`
describes the location of the date_2 (start position) as `{line, column_on_line}`.
## Options
* `:line` - the initial line, defaults to 1
* `:byte_offset` - the initial byte offset, defaults to 0
* `:context` - the initial context value. It will be converted
to a map
"""
@spec date_2(binary, keyword) ::
{:ok, [term], rest, context, line, byte_offset}
| {:error, reason, rest, context, line, byte_offset}
when line: {pos_integer, byte_offset},
byte_offset: pos_integer,
rest: binary,
reason: String.t(),
context: map()
def date_2(binary, opts \\ []) when is_binary(binary) do
line = Keyword.get(opts, :line, 1)
offset = Keyword.get(opts, :byte_offset, 0)
context = Map.new(Keyword.get(opts, :context, []))
case(date_2__0(binary, [], [], context, {line, offset}, offset)) do
{:ok, acc, rest, context, line, offset} ->
{:ok, :lists.reverse(acc), rest, context, line, offset}
{:error, _, _, _, _, _} = error ->
error
end
end
defp date_2__0(
<<x0::integer, x1::integer, x2::integer, x3::integer, "-", rest::binary>>,
acc,
stack,
context,
comb__line,
comb__offset
)
when x0 >= 48 and x0 <= 57 and (x1 >= 48 and x1 <= 57) and (x2 >= 48 and x2 <= 57) and
(x3 >= 48 and x3 <= 57) do
date_2__1(
rest,
[years: x3 - 48 + (x2 - 48) * 10 + (x1 - 48) * 100 + (x0 - 48) * 1000] ++ acc,
stack,
context,
comb__line,
comb__offset + 5
)
end
defp date_2__0(rest, _acc, _stack, context, line, offset) do
{:error, "expected 4 digits, followed by dash", rest, context, line, offset}
end
defp date_2__1(rest, acc, stack, context, line, offset) do
date_2__2(rest, [], [acc | stack], context, line, offset)
end
defp date_2__2(rest, acc, stack, context, line, offset) do
date_2__3(rest, [], [acc | stack], context, line, offset)
end
defp date_2__3(
<<x0::integer, x1::integer, rest::binary>>,
acc,
stack,
context,
comb__line,
comb__offset
)
when x0 >= 48 and x0 <= 57 and (x1 >= 48 and x1 <= 57) do
date_2__4(
rest,
[x1 - 48 + (x0 - 48) * 10] ++ acc,
stack,
context,
comb__line,
comb__offset + 2
)
end
defp date_2__3(rest, _acc, _stack, context, line, offset) do
{:error, "expected 2 digits", rest, context, line, offset}
end
defp date_2__4(rest, user_acc, [acc | stack], context, line, offset) do
case(validate_moduli(rest, user_acc, context, line, offset, 12)) do
{user_acc, context} when is_list(user_acc) ->
date_2__5(rest, user_acc ++ acc, stack, context, line, offset)
{:error, reason} ->
{:error, reason, rest, context, line, offset}
end
end
defp date_2__5(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
date_2__6(
rest,
[
months:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp date_2__6(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
date_2__7(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp date_2__6(rest, _acc, _stack, context, line, offset) do
{:error, "expected dash", rest, context, line, offset}
end
defp date_2__7(rest, acc, stack, context, line, offset) do
date_2__8(rest, [], [acc | stack], context, line, offset)
end
defp date_2__8(rest, acc, stack, context, line, offset) do
date_2__9(rest, [], [acc | stack], context, line, offset)
end
defp date_2__9(
<<x0::integer, x1::integer, rest::binary>>,
acc,
stack,
context,
comb__line,
comb__offset
)
when x0 >= 48 and x0 <= 57 and (x1 >= 48 and x1 <= 57) do
date_2__10(
rest,
[x1 - 48 + (x0 - 48) * 10] ++ acc,
stack,
context,
comb__line,
comb__offset + 2
)
end
defp date_2__9(rest, _acc, _stack, context, line, offset) do
{:error, "expected 2 digits", rest, context, line, offset}
end
defp date_2__10(rest, user_acc, [acc | stack], context, line, offset) do
case(validate_moduli(rest, user_acc, context, line, offset, 31)) do
{user_acc, context} when is_list(user_acc) ->
date_2__11(rest, user_acc ++ acc, stack, context, line, offset)
{:error, reason} ->
{:error, reason, rest, context, line, offset}
end
end
defp date_2__11(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
date_2__12(
rest,
[
days:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp date_2__12(rest, acc, _stack, context, line, offset) do
{:ok, acc, rest, context, line, offset}
end
@doc """
Parses the given `binary` as date_1.
Returns `{:ok, [token], rest, context, position, byte_offset}` or
`{:error, reason, rest, context, line, byte_offset}` where `position`
describes the location of the date_1 (start position) as `{line, column_on_line}`.
## Options
* `:line` - the initial line, defaults to 1
* `:byte_offset` - the initial byte offset, defaults to 0
* `:context` - the initial context value. It will be converted
to a map
"""
@spec date_1(binary, keyword) ::
{:ok, [term], rest, context, line, byte_offset}
| {:error, reason, rest, context, line, byte_offset}
when line: {pos_integer, byte_offset},
byte_offset: pos_integer,
rest: binary,
reason: String.t(),
context: map()
def date_1(binary, opts \\ []) when is_binary(binary) do
line = Keyword.get(opts, :line, 1)
offset = Keyword.get(opts, :byte_offset, 0)
context = Map.new(Keyword.get(opts, :context, []))
case(date_1__0(binary, [], [], context, {line, offset}, offset)) do
{:ok, acc, rest, context, line, offset} ->
{:ok, :lists.reverse(acc), rest, context, line, offset}
{:error, _, _, _, _, _} = error ->
error
end
end
defp date_1__0(
<<x0::integer, x1::integer, x2::integer, x3::integer, rest::binary>>,
acc,
stack,
context,
comb__line,
comb__offset
)
when x0 >= 48 and x0 <= 57 and (x1 >= 48 and x1 <= 57) and (x2 >= 48 and x2 <= 57) and
(x3 >= 48 and x3 <= 57) do
date_1__1(
rest,
[years: x3 - 48 + (x2 - 48) * 10 + (x1 - 48) * 100 + (x0 - 48) * 1000] ++ acc,
stack,
context,
comb__line,
comb__offset + 4
)
end
defp date_1__0(rest, _acc, _stack, context, line, offset) do
{:error, "expected 4 digits", rest, context, line, offset}
end
defp date_1__1(rest, acc, stack, context, line, offset) do
date_1__2(rest, [], [acc | stack], context, line, offset)
end
defp date_1__2(rest, acc, stack, context, line, offset) do
date_1__3(rest, [], [acc | stack], context, line, offset)
end
defp date_1__3(
<<x0::integer, x1::integer, rest::binary>>,
acc,
stack,
context,
comb__line,
comb__offset
)
when x0 >= 48 and x0 <= 57 and (x1 >= 48 and x1 <= 57) do
date_1__4(
rest,
[x1 - 48 + (x0 - 48) * 10] ++ acc,
stack,
context,
comb__line,
comb__offset + 2
)
end
defp date_1__3(rest, _acc, _stack, context, line, offset) do
{:error, "expected 2 digits", rest, context, line, offset}
end
defp date_1__4(rest, user_acc, [acc | stack], context, line, offset) do
case(validate_moduli(rest, user_acc, context, line, offset, 12)) do
{user_acc, context} when is_list(user_acc) ->
date_1__5(rest, user_acc ++ acc, stack, context, line, offset)
{:error, reason} ->
{:error, reason, rest, context, line, offset}
end
end
defp date_1__5(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
date_1__6(
rest,
[
months:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp date_1__6(rest, acc, stack, context, line, offset) do
date_1__7(rest, [], [acc | stack], context, line, offset)
end
defp date_1__7(rest, acc, stack, context, line, offset) do
date_1__8(rest, [], [acc | stack], context, line, offset)
end
defp date_1__8(
<<x0::integer, x1::integer, rest::binary>>,
acc,
stack,
context,
comb__line,
comb__offset
)
when x0 >= 48 and x0 <= 57 and (x1 >= 48 and x1 <= 57) do
date_1__9(
rest,
[x1 - 48 + (x0 - 48) * 10] ++ acc,
stack,
context,
comb__line,
comb__offset + 2
)
end
defp date_1__8(rest, _acc, _stack, context, line, offset) do
{:error, "expected 2 digits", rest, context, line, offset}
end
defp date_1__9(rest, user_acc, [acc | stack], context, line, offset) do
case(validate_moduli(rest, user_acc, context, line, offset, 31)) do
{user_acc, context} when is_list(user_acc) ->
date_1__10(rest, user_acc ++ acc, stack, context, line, offset)
{:error, reason} ->
{:error, reason, rest, context, line, offset}
end
end
defp date_1__10(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
date_1__11(
rest,
[
days:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp date_1__11(rest, acc, _stack, context, line, offset) do
{:ok, acc, rest, context, line, offset}
end
defp validate_moduli(_rest, args, context, _line, _offset, moduli) do
if Enum.any?(args, fn x -> x > moduli end) do
[head | _] = args
{:error, "moduli #{head} / #{moduli}"}
else
{args, context}
end
end
end
|
lib/duration/parser.ex
| 0.922434 | 0.530845 |
parser.ex
|
starcoder
|
defmodule RDF.Quad do
@moduledoc """
Helper functions for RDF quads.
An RDF Quad is represented as a plain Elixir tuple consisting of four valid
RDF values for subject, predicate, object and a graph name.
"""
alias RDF.{Statement, PropertyMap}
@type t :: {
Statement.subject(),
Statement.predicate(),
Statement.object(),
Statement.graph_name()
}
@type t_values :: {String.t(), String.t(), any, String.t()}
@doc """
Creates a `RDF.Quad` with proper RDF values.
An error is raised when the given elements are not coercible to RDF values.
Note: The `RDF.quad` function is a shortcut to this function.
## Examples
iex> RDF.Quad.new("http://example.com/S", "http://example.com/p", 42, "http://example.com/Graph")
{~I<http://example.com/S>, ~I<http://example.com/p>, RDF.literal(42), ~I<http://example.com/Graph>}
iex> RDF.Quad.new(EX.S, EX.p, 42, EX.Graph)
{RDF.iri("http://example.com/S"), RDF.iri("http://example.com/p"), RDF.literal(42), RDF.iri("http://example.com/Graph")}
iex> RDF.Quad.new(EX.S, :p, 42, EX.Graph, RDF.PropertyMap.new(p: EX.p))
{RDF.iri("http://example.com/S"), RDF.iri("http://example.com/p"), RDF.literal(42), RDF.iri("http://example.com/Graph")}
"""
@spec new(
Statement.coercible_subject(),
Statement.coercible_predicate(),
Statement.coercible_object(),
Statement.coercible_graph_name(),
PropertyMap.t() | nil
) :: t
def new(subject, predicate, object, graph_name, property_map \\ nil)
def new(subject, predicate, object, graph_name, nil) do
{
Statement.coerce_subject(subject),
Statement.coerce_predicate(predicate),
Statement.coerce_object(object),
Statement.coerce_graph_name(graph_name)
}
end
def new(subject, predicate, object, graph_name, %PropertyMap{} = property_map) do
{
Statement.coerce_subject(subject),
Statement.coerce_predicate(predicate, property_map),
Statement.coerce_object(object),
Statement.coerce_graph_name(graph_name)
}
end
@doc """
Creates a `RDF.Quad` with proper RDF values.
An error is raised when the given elements are not coercible to RDF values.
Note: The `RDF.quad` function is a shortcut to this function.
## Examples
iex> RDF.Quad.new {"http://example.com/S", "http://example.com/p", 42, "http://example.com/Graph"}
{~I<http://example.com/S>, ~I<http://example.com/p>, RDF.literal(42), ~I<http://example.com/Graph>}
iex> RDF.Quad.new {EX.S, EX.p, 42, EX.Graph}
{RDF.iri("http://example.com/S"), RDF.iri("http://example.com/p"), RDF.literal(42), RDF.iri("http://example.com/Graph")}
iex> RDF.Quad.new {EX.S, EX.p, 42}
{RDF.iri("http://example.com/S"), RDF.iri("http://example.com/p"), RDF.literal(42), nil}
iex> RDF.Quad.new {EX.S, :p, 42, EX.Graph}, RDF.PropertyMap.new(p: EX.p)
{RDF.iri("http://example.com/S"), RDF.iri("http://example.com/p"), RDF.literal(42), RDF.iri("http://example.com/Graph")}
"""
@spec new(Statement.coercible_t(), PropertyMap.t() | nil) :: t
def new(statement, property_map \\ nil)
def new({subject, predicate, object, graph_name}, property_map) do
new(subject, predicate, object, graph_name, property_map)
end
def new({subject, predicate, object}, property_map) do
new(subject, predicate, object, nil, property_map)
end
@doc """
Returns a tuple of native Elixir values from a `RDF.Quad` of RDF terms.
When a `:context` option is given with a `RDF.PropertyMap`, predicates will
be mapped to the terms defined in the `RDF.PropertyMap`, if present.
Returns `nil` if one of the components of the given tuple is not convertible via `RDF.Term.value/1`.
## Examples
iex> RDF.Quad.values {~I<http://example.com/S>, ~I<http://example.com/p>, RDF.literal(42), ~I<http://example.com/Graph>}
{"http://example.com/S", "http://example.com/p", 42, "http://example.com/Graph"}
iex> {~I<http://example.com/S>, ~I<http://example.com/p>, RDF.literal(42), ~I<http://example.com/Graph>}
...> |> RDF.Quad.values(context: %{p: ~I<http://example.com/p>})
{"http://example.com/S", :p, 42, "http://example.com/Graph"}
"""
@spec values(t, keyword) :: t_values | nil
def values(quad, opts \\ []) do
if property_map = PropertyMap.from_opts(opts) do
map(quad, Statement.default_property_mapping(property_map))
else
map(quad, &Statement.default_term_mapping/1)
end
end
@doc """
Returns a tuple where each element from a `RDF.Quad` is mapped with the given function.
Returns `nil` if one of the components of the given tuple is not convertible via `RDF.Term.value/1`.
The function `fun` will receive a tuple `{statement_position, rdf_term}` where
`statement_position` is one of the atoms `:subject`, `:predicate`, `:object` or
`:graph_name` while `rdf_term` is the RDF term to be mapped. When the given function
returns `nil` this will be interpreted as an error and will become the overhaul
result of the `map/2` call.
## Examples
iex> {~I<http://example.com/S>, ~I<http://example.com/p>, RDF.literal(42), ~I<http://example.com/Graph>}
...> |> RDF.Quad.map(fn
...> {:object, object} ->
...> RDF.Term.value(object)
...> {:graph_name, graph_name} ->
...> graph_name
...> {_, resource} ->
...> resource |> to_string() |> String.last() |> String.to_atom()
...> end)
{:S, :p, 42, ~I<http://example.com/Graph>}
"""
@spec map(t, Statement.term_mapping()) :: t_values | nil
def map({subject, predicate, object, graph_name}, fun) do
with subject_value when not is_nil(subject_value) <- fun.({:subject, subject}),
predicate_value when not is_nil(predicate_value) <- fun.({:predicate, predicate}),
object_value when not is_nil(object_value) <- fun.({:object, object}),
graph_name_value <- fun.({:graph_name, graph_name}) do
{subject_value, predicate_value, object_value, graph_name_value}
else
_ -> nil
end
end
@doc """
Checks if the given tuple is a valid RDF quad.
The elements of a valid RDF quad must be RDF terms. On the subject
position only IRIs and blank nodes allowed, while on the predicate and graph
name position only IRIs allowed. The object position can be any RDF term.
"""
@spec valid?(t | any) :: boolean
def valid?(tuple)
def valid?({_, _, _, _} = quad), do: Statement.valid?(quad)
def valid?(_), do: false
end
|
lib/rdf/quad.ex
| 0.925002 | 0.698379 |
quad.ex
|
starcoder
|
defmodule FusionDsl do
@moduledoc """
Fusion DSL main API.
This module is a standard interface for the following.
- Managing packages.
- Compiling Fusion Code (Lexing, AstProccess).
- Configuring runtime enviornment.
- Code execution.
"""
require FusionDsl.Kernel
require FusionDsl.Logger
require Logger
alias FusionDsl.Kernel
alias FusionDsl.Processor.Lexer
alias FusionDsl.Processor.AstProcessor
alias FusionDsl.Runtime.Environment
alias FusionDsl.Runtime.Executor
alias FusionDsl.NativeImpl
alias FusionDsl.Helpers.CodeReloader
@predefined_packages Application.get_env(:fusion_dsl, :predefined_packages, [
{Kernel, []},
{FusionDsl.Logger, []}
])
@typedoc """
Keywords used in package configs
- `:as`: Defines name of module to be used inside fusion scripts. `SnakeCase` prefferred.
- `:name`: An atom unique name for package. (In case of multiple use of same package)
"""
@type package_options :: {:as, String.t()} | {:name, atom()}
def start(_type, _args) do
:timer.sleep(100)
CodeReloader.reload_module(FusionDsl.Processor.Lexer)
CodeReloader.reload_module(FusionDsl.Processor.AstProcessor)
{:ok, self()}
end
@doc """
Returns a list of configured packages in their original configuration format
"""
@spec get_packages :: [{atom(), [package_options]}]
def get_packages do
raw_packages = Application.get_env(:fusion_dsl, :packages, [])
packages = NativeImpl.create_native_packages(raw_packages)
all_packages = @predefined_packages ++ packages
# Remove all unavailable packages
Enum.reduce(all_packages, [], fn {mod, _} = pack, acc ->
# Ensures that module is loaded
Code.ensure_loaded(mod)
if function_exported?(mod, :__list_fusion_functions__, 0) do
# Adds the package if package module exists
acc ++ [pack]
else
Logger.warn("Fusion package missing #{mod} (Ignore this on compile!)")
acc
end
end)
end
@doc """
Compiles a fusion code and returns the base environment
for code execution. This environment struct contains `:prog`
data and basic default environment data.
"""
@spec compile(String.t()) :: {:ok, Environment.t()}
def compile(code) do
{:ok, conf, tokens} = Lexer.tokenize(code)
lines = Lexer.split_by_lines(tokens, conf.start_code)
{:ok, ast_data} = AstProcessor.generate_ast(conf, lines)
Environment.prepare_env(ast_data)
end
@doc """
Reads a file with fusion code and compiles it.
"""
@spec compile_file(String.t()) :: {:ok, Environment.t()}
def compile_file(filename) do
filename
|> File.read!()
|> compile()
end
@doc """
Executes and environment with the given procedure (default is `:main`)
Returns the environment in case of success.
"""
@spec execute(Environment.t()) :: {:end, Environment.t()}
def execute(env, proc \\ :main) do
Executor.execute(env, proc)
end
end
|
lib/fusion_dsl.ex
| 0.821689 | 0.446374 |
fusion_dsl.ex
|
starcoder
|
defmodule Serum.Result do
@moduledoc """
This module defines types for positive results or errors returned by
functions in this project.
"""
import Serum.IOProxy, only: [put_err: 2]
alias Serum.Error
alias Serum.Error.Format
alias Serum.Error.SimpleMessage
@type t(type) :: {:ok, type} | {:error, Error.t()}
@doc """
Takes a list of results with values and checks if there is no error.
If there is no error, it returns `{:ok, list}` where `list` is a list of
returned values.
Returns an aggregated error object if there is one or more errors.
"""
@spec aggregate([t(a)], binary()) :: t([a]) when a: term()
def aggregate(results, msg) do
results
|> do_aggregate([], [])
|> case do
{values, []} when is_list(values) ->
{:ok, values}
{_, errors} when is_list(errors) ->
{:error, %Error{message: %SimpleMessage{text: msg}, caused_by: errors}}
end
end
@spec do_aggregate([t(a)], [a], [Error.t()]) :: {[a], [Error.t()]} when a: term()
defp do_aggregate(results, values, errors)
defp do_aggregate([], values, errors) do
{Enum.reverse(values), errors |> Enum.reverse() |> Enum.uniq()}
end
defp do_aggregate([{:ok, value} | results], values, errors) do
do_aggregate(results, [value | values], errors)
end
defp do_aggregate([{:error, error} | results], values, errors) do
do_aggregate(results, values, [error | errors])
end
@doc "Prints an error object in a beautiful format."
@spec show(t(term()), non_neg_integer()) :: t({})
def show(result, indent \\ 0)
def show({:ok, _} = result, depth), do: put_err(:info, get_message(result, depth))
def show(error, depth), do: put_err(:error, get_message(error, depth))
@doc """
Gets a human friendly message from the given `result`.
You can control the indentation level by passing a non-negative integer to
the `depth` parameter.
"""
@spec get_message(t(term), non_neg_integer()) :: binary()
def get_message(result, depth) do
result |> do_get_message(depth) |> IO.iodata_to_binary()
end
@spec do_get_message(t(term), non_neg_integer()) :: IO.chardata()
defp do_get_message(result, depth)
defp do_get_message({:ok, _}, depth), do: indented("No error detected", depth)
defp do_get_message({:error, %Error{} = error}, depth) do
error |> Format.format_text(depth) |> IO.ANSI.format()
end
@spec indented(IO.ANSI.ansidata(), non_neg_integer()) :: IO.ANSI.ansidata()
defp indented(str, 0), do: str
defp indented(str, depth), do: [List.duplicate(" ", depth - 1), :red, "- ", :reset, str]
@doc "Provides \"do-notation\"-like syntactic sugar for operation chaining."
defmacro run(expr), do: build_run(expr)
defp build_run(do: do_expr) do
default_else =
quote do
{:error, %Serum.Error{}} = error -> error
end
build_run(do: do_expr, else: default_else)
end
defp build_run(do: {:__block__, _, exprs}, else: else_expr) do
[last | leadings] = Enum.reverse(exprs)
leadings =
leadings
|> Enum.reverse()
|> Enum.map(fn
{:<-, _, [lhs, rhs]} -> quote(do: {:ok, unquote(lhs)} <- unquote(rhs))
{:=, _, _} = assignment -> assignment
expr -> quote(do: {:ok, _} <- unquote(expr))
end)
{:with, [], leadings ++ [[do: last, else: else_expr]]}
end
@doc "Expands into `{:ok, {}}` tuple."
defmacro return, do: quote(do: {:ok, {}})
@doc "Wraps `expr` into `{:ok, expr}` tuple."
defmacro return(expr)
defmacro return(do: do_block), do: quote(do: {:ok, unquote(do_block)})
defmacro return(expr), do: quote(do: {:ok, unquote(expr)})
@doc "Expands into `{:error, %Error{...}}` tuple."
defmacro fail({:__aliases__, _, [type]}, args, opts \\ [])
when is_atom(type) and is_list(args) and is_list(opts) do
msg_module = Module.concat(Serum.Error, "#{type}Message")
caused_by = opts[:caused_by] || []
quote do
{:error,
%Serum.Error{
message: unquote(msg_module).message(unquote(args)),
caused_by: unquote(caused_by),
file: unquote(opts[:file]),
line: unquote(opts[:line])
}}
end
end
end
|
lib/serum/result.ex
| 0.854809 | 0.499878 |
result.ex
|
starcoder
|
defmodule Pelemay do
import SumMag
alias Pelemay.Generator
alias Pelemay.Db
@moduledoc """
## Pelemay: The Penta (Five) “Elemental Way”: Freedom, Insight, Beauty, Efficiency and Robustness
For example, the following code of the function `map_square` will be compiled to native code using SIMD instructions by Pelemay.
```elixir
defmodule M do
require Pelemay
import Pelemay
defpelemay do
def map_square (list) do
list
|> Enum.map(& &1 * &1)
end
end
```
1. Find Enum.map with a specific macro
2. Analyze internal anonymous functions
3. Register(ETS) following information as Map.
- Module
- Original function name
- Function name for NIF
- Value of Anonymous Function
4. Insert NIF in AST
5. Do Step 1 ~ 4 to each macro
6. Receiving Map from ETS, and...
7. Generate NIF Code
8. Generate Elixir's functions
9. Compile NIF as Custom Mix Task, using Clang
"""
defmacro defpelemay(functions) do
Db.init()
functions
|> SumMag.map(
&Optimizer.replace_expr(
&1,
__CALLER__.module |> Generator.elixir_nif_module() |> String.to_atom()
)
)
|> pelemaystub(__CALLER__.module)
end
defp pelemaystub(ret, module) do
Generator.generate(module)
ret
end
end
defmodule Optimizer do
@moduledoc """
Provides a optimizer for [AST](https://elixirschool.com/en/lessons/advanced/metaprogramming/)
"""
def replace_expr({atom, _, nil} = arg, _module)
when atom |> is_atom do
arg
end
def replace_expr(quoted, module) do
quoted
|> Optimizer.Enum.replace_expr(module)
end
end
defmodule Optimizer.Enum do
alias Pelemay.Db
alias Analyzer.AFunc
def replace_expr({quoted, :map}, module) do
# include ast of Enum.map
{_enum_map, _, anonymous_func} = quoted
anonymous_func
|> AFunc.supported?()
|> call_nif(:map, module)
end
def replace_expr({quoted, :chunk_every}, module) do
{_enum, _, num} = quoted
call_nif(num, :chunk_every, module)
end
def replace_expr({quoted, _func}, _module) do
str = Macro.to_string(quoted)
IO.puts("Sorry, #{str} not supported yet.")
quoted
end
def replace_expr(other, module) do
other
|> which_enum_func?
|> replace_expr(module)
end
defp which_enum_func?(ast) do
{_, flag} =
Macro.prewalk(ast, false, fn
{:__aliases__, _, [:Enum]} = ast, _ -> {ast, true}
other, acc -> {other, acc}
end)
case flag do
true -> {ast, ast |> which_function?}
false -> {ast, nil}
end
end
defp which_function?(ast) do
{_, func} =
Macro.prewalk(ast, false, fn
:map = ast, _acc -> {ast, :map}
:chunk_every = ast, _acc -> {ast, :chunk_every}
other, acc -> {other, acc}
end)
func
end
def call_nif(num, :chunk_every, module) do
quote do: unquote(module).chunk_every(unquote(num))
end
def call_nif({:ok, asm}, :map, module) do
%{
operators: operators,
args: args
} = asm
func_name = generate_function_name(:map, operators)
case Db.validate(func_name) do
nil ->
# plan to fix this data
info = %{
module: :enum,
function: :map,
nif_name: func_name,
arg_num: 1,
args: args,
operators: operators
}
Db.register(info)
# plan to fix this data
true ->
info = %{
module: :enum,
function: :map,
nif_name: func_name,
arg_num: 1,
args: args,
operators: operators
}
Db.register(info)
false ->
nil
end
func_name = func_name |> String.to_atom()
quote do: unquote(module).unquote(func_name)
end
def call_nif({:error, asm}, _atom, _module) do
asm
end
defp generate_function_name(func, operators) do
ret =
operators
|> Enum.map(&(&1 |> operator_to_string))
|> Enum.reduce("", fn x, acc -> acc <> "_#{x}" end)
Atom.to_string(func) <> ret
end
defp operator_to_string(operator)
when operator |> is_atom do
case operator do
:* -> "mult"
:+ -> "plus"
:- -> "minus"
:/ -> "div"
:rem -> "mod"
end
end
end
defmodule Analyzer.AFunc do
import SumMag
@type asm :: %{args: list(any), operators: list(atom)}
@moduledoc """
Provides optimizer for anonymous functions.
"""
@doc """
Check if expressions can be optimzed.
When the expression is enable to optimize, {:ok, map} is returned.
The map is shape following: %{args: _, operators: _}.
"""
@spec supported?(Macro.t()) :: asm
def supported?([{:fn, _, [{:->, _, [_arg, expr]}]}]) do
supported_expr?(expr)
end
def supported?({:fn, _, [{:->, _, [_arg, expr]}]}) do
supported_expr?(expr)
end
# Anonymous functions by &
def supported?([{:&, _, other}]) do
other |> hd |> supported_expr?
end
def supported?({:&, _, other}) do
other |> hd |> supported_expr?
end
def supported?(other), do: {:error, other}
defp supported_expr?({_atom, _, [_left, _right]} = ast) do
expr_map = ast |> polynomial_map
if supported_operators?(expr_map) do
{:ok, expr_map}
else
{:error, ast}
end
end
def polynomial_map(ast) do
acc = %{
operators: [],
args: []
}
Macro.prewalk(ast, acc, &numerical?/2) |> elem(1)
end
defp operator(:+), do: :+
defp operator(:-), do: :-
defp operator(:/), do: :/
defp operator(:*), do: :*
defp operator(:rem), do: :rem
defp operator(_), do: false
defp numerical?({atom, _, [left, right]} = ast, acc) do
%{
operators: operators,
args: args
} = acc
operators =
case operator(atom) do
false -> operators
atom -> [atom | operators]
end
args =
args
|> listing_literal(right)
|> listing_literal(left)
ret = %{
operators: operators,
args: args
}
{ast, ret}
end
defp numerical?(other, acc) do
{other, acc}
end
defp listing_literal(acc, term) do
if Macro.quoted_literal?(term) do
[term | acc]
else
case quoted_var?(term) do
false -> acc
_ -> [term | acc]
end
end
end
defp supported_operators?(%{operators: operators, args: args}) do
if length(operators) != length(args) - 1 do
false
else
true
end
end
end
|
lib/pelemay.ex
| 0.766206 | 0.66034 |
pelemay.ex
|
starcoder
|
defmodule Exdis.IoData do
use Bitwise
require Record
## ------------------------------------------------------------------
## Record and Type Definitions
## ------------------------------------------------------------------
Record.defrecord(:io_data,
bytes: nil,
size: nil,
fragments: nil
)
@opaque t :: record(:io_data,
bytes: iodata,
size: non_neg_integer,
fragments: non_neg_integer)
## ------------------------------------------------------------------
## API Functions
## ------------------------------------------------------------------
def append(io_data(bytes: bytes, size: size, fragments: fragments) = io_data, tail_bytes) do
{tail_size, tail_fragments} = count_size_and_fragments(tail_bytes)
io_data(io_data,
bytes: [bytes, tail_bytes],
size: size + tail_size,
fragments: fragments + tail_fragments)
end
def bit_count(io_data, start, finish) do
range_bytes = get_range(io_data, start, finish)
bit_count_recur(range_bytes, 0)
end
def bit_position(io_data, bit, start, finish) do
io_data(bytes: bytes, size: size) = io_data
case normalize_byte_range(size, start, finish) do
{:valid, start, length} ->
{_, 0, range_bytes} = get_range_recur(bytes, start, length, [])
bit_position_recur(range_bytes, bit, start * 8)
:invalid ->
{:skipped, size(io_data)}
end
end
def bytes(io_data(bytes: bytes)), do: bytes
def flatten(io_data(bytes: bytes, size: size) = io_data) do
binary = :erlang.iolist_to_binary(bytes)
^size = byte_size(binary)
io_data(io_data, bytes: binary, size: size, fragments: 1)
end
def fragments(io_data(fragments: fragments)), do: fragments
# optimization
def get_bit(io_data(size: size), offset) when offset >= size * 8 do
0
end
def get_bit(io_data(bytes: bytes), offset) when offset >= 0 do
case find_bit_recur(bytes, offset) do
{:found, bit_value} ->
bit_value
{:skipped, _} ->
0
end
end
def get_range(io_data(bytes: bytes, size: size), start, finish) do
case normalize_byte_range(size, start, finish) do
{:valid, start, length} ->
{_, 0, range_bytes} = get_range_recur(bytes, start, length, [])
range_bytes
:invalid ->
""
end
end
def new(bytes) do
{size, fragments} = count_size_and_fragments(bytes)
io_data(
bytes: bytes,
size: size,
fragments: fragments)
end
def size(io_data(size: size)), do: size
## ------------------------------------------------------------------
## Private Function: count_size_and_fragments
## ------------------------------------------------------------------
defp count_size_and_fragments(bytes) do
count_size_and_fragments_recur(bytes, 0, 0)
end
defp count_size_and_fragments_recur(<<binary :: bytes>>, size_acc, fragments_acc) do
{size_acc + byte_size(binary), fragments_acc + 1}
end
defp count_size_and_fragments_recur([list_head | list_tail], size_acc, fragments_acc) do
{size_acc, fragments_acc} = count_size_and_fragments_recur(list_head, size_acc, fragments_acc)
count_size_and_fragments_recur(list_tail, size_acc, fragments_acc)
end
defp count_size_and_fragments_recur([], size_acc, fragments_acc) do
{size_acc, fragments_acc}
end
defp count_size_and_fragments_recur(byte, size_acc, fragments_acc)
when is_integer(byte) and byte >= 0 and byte < 256
do
{size_acc + 1, fragments_acc + 1}
end
## -----------------------------------------------------------------
## Private Functions: bit_count_recur
## ------------------------------------------------------------------
defp bit_count_recur(<<binary :: bytes>>, acc) do
acc + Exdis.Bitstring.bit_count(binary)
end
defp bit_count_recur([head|tail], acc) do
acc = bit_count_recur(head, acc)
bit_count_recur(tail, acc)
end
defp bit_count_recur([], acc), do: acc
defp bit_count_recur(byte, acc) when is_integer(byte) do
acc + Exdis.Byte.bit_count(byte)
end
## -----------------------------------------------------------------
## Private Functions: bit0_position_recur
## ------------------------------------------------------------------
defp bit_position_recur(<<binary :: bytes>>, bit, acc) do
case Exdis.Bitstring.bit_position(binary, bit) do
:skipped ->
{:skipped, bit_size(binary)}
{:found, offset} ->
{:found, acc + offset}
end
end
defp bit_position_recur([head|tail], bit, acc) do
case bit_position_recur(head, bit, acc) do
{:skipped, acc} ->
bit_position_recur(tail, bit, acc)
{:found, _} = found ->
found
end
end
defp bit_position_recur([], _bit, acc) do
{:skipped, acc}
end
defp bit_position_recur(byte, bit, acc) when is_integer(byte) do
case Exdis.Byte.bit_position(byte, bit) do
{:found, offset} ->
{:found, acc + offset}
:skipped ->
{:skipped, acc + 8}
end
end
## ------------------------------------------------------------------
## Private Functions: count_size_and_fragments
## ------------------------------------------------------------------
defp get_range_recur(_bytes, start, length, chunks_acc) when length === 0 do
range_bytes = Enum.reverse(chunks_acc)
{start, length, range_bytes}
end
defp get_range_recur(<<binary :: bytes>>, start, length, chunks_acc) do
binary_size = byte_size(binary)
case start >= binary_size do
true ->
start = start - binary_size
{start, length, chunks_acc}
false ->
chunk_size = min(length, binary_size - start)
<<_ :: bytes-size(start), chunk :: bytes-size(chunk_size), _ :: bytes>> = binary
start = 0
length = length - chunk_size
chunks_acc = [chunk | chunks_acc]
{start, length, chunks_acc}
end
end
defp get_range_recur([list_head | list_tail], start, length, chunks_acc) do
{start, length, chunks_acc} = get_range_recur(list_head, start, length, chunks_acc)
get_range_recur(list_tail, start, length, chunks_acc)
end
defp get_range_recur([], start, length, chunks_acc) do
{start, length, chunks_acc}
end
defp get_range_recur(byte, start, length, chunks_acc) when is_integer(byte) do
start = start + 1
length = length - 1
chunks_acc = [byte | chunks_acc]
{start, length, chunks_acc}
end
## ------------------------------------------------------------------
## Private Function: find_bit_recur
## ------------------------------------------------------------------
defp find_bit_recur(<<binary :: bytes>>, offset) do
case binary do
<<_ :: bits-size(offset), bit_value :: 1, _ :: bits>> ->
# bit value found within binary
{:found, bit_value}
_ ->
{:skipped, offset - bit_size(binary)}
end
end
defp find_bit_recur([list_head | list_tail], offset) do
case find_bit_recur(list_head, offset) do
{:found, _} = found ->
found
{:skipped, new_offset} ->
find_bit_recur(list_tail, new_offset)
end
end
defp find_bit_recur([], offset) do
{:skipped, offset}
end
defp find_bit_recur(byte, offset) when is_integer(byte) do
case offset < 8 do
true ->
bit_value = (byte >>> offset) &&& 1
{:found, bit_value}
false ->
{:skipped, 8}
end
end
## ------------------------------------------------------------------
## Private Functions: Normalization of Offsets
## ------------------------------------------------------------------
defp normalize_byte_range(size, start, finish) when finish === nil do
normalize_byte_range(size, start, -1)
end
defp normalize_byte_range(size, start, finish) do
start = max(0, normalize_byte_offset(size, start))
finish = min(size - 1, normalize_byte_offset(size, finish))
case start >= 0 and start < size and start <= finish do
true ->
length = finish - start + 1
{:valid, start, length}
false ->
:invalid
end
end
defp normalize_byte_offset(_size, offset) when offset >= 0, do: offset
defp normalize_byte_offset(size, offset), do: size + offset
end
|
lib/exdis/io_data.ex
| 0.506103 | 0.614914 |
io_data.ex
|
starcoder
|
defmodule Talib.MACD do
alias Talib.EMA
require OK
@moduledoc ~S"""
Defines a Moving Average Convergence/Divergence index.
## History
Version: 1.0
Source: http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:moving_average_convergence_divergence_macd
Audited by:
| Name | Title |
| :----------- | :---------------- |
| | |
"""
@typedoc """
Defines a Moving Average Convergence/Divergence index.
* :long_period - The long period of the MACD
* :short_period - The short period of the MACD
* :signal_period - The signal period of the MACD
* :values - List of values resulting from the calculation
"""
@type t :: %Talib.MACD{
long_period: integer,
short_period: integer,
signal_period: integer,
values: [number]
}
defstruct long_period: 0,
short_period: 0,
signal_period: 0,
values: []
@doc """
Gets the MACD of a list.
The return tuple looks like the following: {MACD, MACD Signal}.
Raises `NoDataError` if the given list is an empty list.
## Examples
iex>Talib.MACD.from_list([1, 2, 3], 26, 12, 9)
{:ok, %Talib.MACD{
long_period: 26,
short_period: 12,
signal_period: 9,
values: [
{0.0, 1.0},
{0.07977207977207978, 1.2000000000000002},
{0.22113456871291648, 1.5600000000000003}
]
}}
iex>Talib.MACD.from_list([], 26, 12, 9)
{:error, :no_data}
"""
@spec from_list([number], integer, integer, integer) ::
{:ok, Talib.MACD.t()}
| {:error, atom}
def from_list(data, long \\ 26, short \\ 12, signal \\ 9),
do: calculate(data, long, short, signal)
@doc """
Gets the MACD of a list.
The return tuple looks like the following: {MACD, MACD Signal}.
Raises `NoDataError` if the given list is an empty list.
## Examples
iex>Talib.MACD.from_list!([1, 2, 3], 26, 12, 9)
%Talib.MACD{
long_period: 26,
short_period: 12,
signal_period: 9,
values: [
{0.0, 1.0},
{0.07977207977207978, 1.2000000000000002},
{0.22113456871291648, 1.5600000000000003}
]
}
iex>Talib.MACD.from_list!([], 26, 12, 9)
** (NoDataError) no data error
"""
@spec from_list!([number], integer, integer, integer) ::
Talib.MACD.t()
| no_return
def from_list!(data, long \\ 26, short \\ 12, signal \\ 9) do
case calculate(data, long, short, signal) do
{:ok, result} -> result
{:error, :no_data} -> raise NoDataError
end
end
@doc false
@spec calculate([number], integer, integer, integer) ::
{:ok, Talib.MACD.t()}
| {:error, atom}
defp calculate(data, long_period, short_period, signal_period) do
OK.try do
%EMA{values: long_ema} <- EMA.from_list(data, long_period)
%EMA{values: short_ema} <- EMA.from_list(data, short_period)
macd_data =
for {long, short} <- Enum.zip([long_ema, short_ema]) do
short - long
end
%EMA{values: signal_ema} <- EMA.from_list(macd_data, signal_period)
# %EMA{values: signal_ema} <- EMA.from_list(data, signal_period)
macd_signal = Enum.zip([macd_data, signal_ema])
result =
for {macd, signal} <- macd_signal do
{macd - signal, macd, signal}
end
after
{:ok,
%Talib.MACD{
long_period: long_period,
short_period: short_period,
signal_period: signal_period,
values: result
}}
rescue
:no_data -> {:error, :no_data}
end
end
end
|
lib/talib/macd.ex
| 0.889978 | 0.733523 |
macd.ex
|
starcoder
|
defmodule CLI do
@moduledoc """
Interface for launching a command line application.
The CLI application receives JSON input and, depending on command line options, launches the application in one of the following modes:
- a one_shot mode which accepts JSON from the standard input, interprets it
into commands, generates any graphics and returns any text output (e.g.
JSON) to the standard output and exits.
- a mode which launches a console interface
"""
@const_options %{
switches: [help: :boolean, one_shot: :boolean, console: :boolean],
aliases: [h: :help, o: :one_shot, c: :console],
help: [
help: "Returns this help message",
one_shot:
"Accepts JSON from the standard input, interprets it into commands, generates any graphics and returns any text output (e.g. JSON) to the standard output and exits",
console: "Launches as a console application"
]
}
@spec const_options() :: %{(atom() | String.t()) => [{atom(), atom() | String.t()}]}
def const_options(), do: @const_options
@spec parse_args([String]) :: {[], [], []}
def parse_args(args) do
%{switches: switches, aliases: aliases} = const_options()
OptionParser.parse(args, strict: switches, aliases: aliases)
end
@spec usage() :: String.t()
defp usage() do
"#{
:application.get_application(__MODULE__)
|> (fn result ->
case result do
{:ok, name} -> name
_ -> "imposc"
end
end).()
} [options]"
end
@spec process({[{atom(), boolean()}], any(), any()}) :: iodata()
def process({[help: true], _, _}) do
IO.puts("Usage: #{usage()}\n")
%{help: help, aliases: aliases} = const_options()
for {k, v} <- aliases, do: IO.puts("\n-#{k}, --#{v}:\t#{help[v]}")
end
def process({[one_shot: true], _, _}) do
CoreWrapper.process_input()
end
def process({[console: true], _, _}) do
Console.run()
end
def process({[{a, true}], _, _}) do
IO.puts("Not yet implemented: #{a}")
end
def process(args) do
IO.puts("Not yet implemented:")
IO.inspect(args)
end
def main(args) do
Application.put_env(:imposc, :default_outfile, nil)
args |> parse_args |> process
end
end
|
apps/imposc/lib/cli/cli.ex
| 0.719384 | 0.501465 |
cli.ex
|
starcoder
|
defmodule Terminus.Planaria do
@moduledoc """
A behaviour module for implementing [Planaria](https://neon.planaria.network)-like
state machines in Elixir.
A module using `Terminus.Planaria` is a GenStage consumer process that will
automatically mangage its own producer processes to crawl and listen to Bitcoin
transaction events. Developers only need to implement callback functions to
handle transaction events.
## Example
The following code demonstrates how a [Twetch](http://twetch.app) scraper can
be built in a few lines of code.
defmodule TwetchScraper do
@query %{
"find" => %{
"out.s2": "19HxigV4QyBv3tHpQVcUEQyq1pzZVdoAut",
"out.s25": "twetch"
}
}
use Terminus.Planaria, token: {:my_app, :planaria_token},
from: 600000,
query: @query
def handle_data(:block, txns) do
# Handle confirmed transactions
end
def handle_data(:mempool, txns) do
# Handle unconfirmed transactions
end
end
The `c:handle_data/2` callback can be implemented for each [`tx_event`](`t:tx_event/0`),
and is typically be used to persist required data from each transaction. The
`c:handle_tape/2` callback can also be implemented for loading and persisting
the tape head so a re-crawl isn't necessary if the process is interrupted.
## Options
When invoking `use Terminus.Planaria`, the following [`config`](`t:config/0`)
options are accepted:
* `:token` - Planaria Token. Required.
* `:host` - The Bitbus/Bitsocket endpoint to use. Defaults to `:txo`.
* `:from` - The block height from which to crawl for transactions. Required.
* `:query` - Full or shorthand [Bitquery](https://bitquery.planaria.network) map.
* `:poll` - Interval (in seconds) to poll Bitbus for new blocks. Defaults to `300` (5 minutes).
* `:recycle` - Interval (in seconds) to recycle quiet Bitsocket requests. Defaults to `900` (15 minutes).
## Supervision
Each `Terminus.Planaria` will most commonly be started under your application's
supervision tree. When you invoke `use Terminus.Planaria`, it automatically
defines a `child_spec/1` function so your Planaria modules can be started
directly under a supervisor.
And this is where we can have some fun and take full advantage of Elixir's
concurrency model. Why not run many Planarias concurrently in your app?
children = [
TwetchScraper,
PreevScraper,
WeathersvScraper
]
Supervisor.start_link(children, strategy: :one_for_all)
"""
require Logger
use GenStage
alias Terminus.{Bitbus,Bitsocket}
defstruct mod: nil,
crawl_sub: nil,
listen_sub: nil,
tape: %{
head: 0,
height: 0
},
config: %{
poll: 300,
query: %{}
}
@typedoc "Planaria state."
@type t :: %__MODULE__{
mod: atom,
crawl_sub: {pid, GenStage.subscription_tag},
listen_sub: {pid, GenStage.subscription_tag},
tape: tape,
config: config
}
@typedoc "Planaria config."
@type config :: %{
token: String.t,
poll: integer,
from: integer,
query: map
}
@typedoc "Planaria tape."
@type tape :: %{
head: integer,
height: integer
}
@typedoc "Planaria tape event."
@type tape_event :: :start | :block
@typedoc "Planaria transaction event."
@type tx_event :: :block | :mempool
@doc """
Invoked for each new transaction seen by the Planaria.
This is the main callback you will need to implement for your Planaria module.
Typically it will be used to pull the necessary data from each transaction
event and store it to a local database.
When an unconfirmed transaction is seen the callback is invoked with the
[`tx_event`](`t:tx_event/0`) of `:mempool`. For each confirmed transaction,
the callback is invoked with the [`tx_event`](`t:tx_event/0`) of `:block`.
The callback can return any value.
## Examples
def handle_data(:block, txns) do
txns
|> Enum.map(&MyApp.Transaction.build/1)
|> Repo.insert(on_conflict: :replace_all, conflict_target: :txid)
end
def handle_data(:mempool, txns) do
txns
|> Enum.map(&MyApp.Transaction.build/1)
|> Repo.insert
end
"""
@callback handle_data(tx_event, list) :: any
@doc """
Invoked when a Planaria starts and also after each crawl of new blocks.
This callback can be used to load and persist the tape head so a re-crawl
isn't necessary if the process is interrupted.
When a Planaria starts the callback is invoked with the [`tape_event`](`t:tape_event/0`)
of `:start`. This provides an oppurtunity to load the current `:head` of the
tape from a database and update the given [`tape`](`t:tape/0`).
The callback must return `{:ok, tape}`.
After each crawl of block data the callback in invoked with the [`tape_event`](`t:tape_event/0`)
if `:block`. This allows us to store the [`tape`](`t:tape/0`)
`:head`. In this case any return value is acceptable.
## Examples
Load the `:head` from a database when the Planaria starts.
def handle_tape(:start, tape) do
tape = case MyApp.Config.get("tape_head") do
{:ok, head} -> put_in(tape.head, head)
_ -> tape
end
{:ok, tape}
end
Persist the `:head` after each crawl of new blocks.
def handle_tape(:update, tape) do
MyApp.Config.put("tape_head", tape.head)
end
"""
@callback handle_tape(tape_event, tape) :: {:ok, tape} | any
@doc false
defmacro __using__(config \\ []) do
quote location: :keep, bind_quoted: [config: config] do
alias Terminus.Planaria
@behaviour Planaria
@doc false
def child_spec(opts) do
spec = %{
id: __MODULE__,
start: {__MODULE__, :start_link, [opts]}
}
Supervisor.child_spec(spec, [])
end
@doc false
def start_link(opts \\ []) do
Planaria.start_link(__MODULE__, unquote(Macro.escape(config)), opts)
end
@doc false
def start(opts \\ []) do
Planaria.start(__MODULE__, unquote(Macro.escape(config)), opts)
end
@doc false
def handle_data(_type, _txns), do: true
@doc false
def handle_tape(_type, tape), do: {:ok, tape}
defoverridable handle_tape: 2, handle_data: 2
end
end
@doc """
Starts a `Terminus.Planaria` process linked to the current process.
This is often used to start the Planaria as part of a supervision tree.
"""
@spec start_link(atom, config, keyword) :: GenServer.on_start
def start_link(module, config, options) do
GenStage.start_link(__MODULE__, {module, config}, options)
end
@doc """
Starts a `Terminus.Planaria` process without links (outside of a supervision
tree).
See `start_link/3` for more information.
"""
@spec start(atom, config, keyword) :: GenServer.on_start
def start(module, config, options) do
GenStage.start(__MODULE__, {module, config}, options)
end
# Callbacks
@impl true
def init({module, config}) do
state = %__MODULE__{mod: module}
|> Map.update!(:config, & Map.merge(&1, Enum.into(config, %{})))
tape = state.tape
|> Map.put(:head, state.config.from)
case apply(state.mod, :handle_tape, [:start, tape]) do
{:ok, tape} ->
Logger.info "#{ state.mod } starting..."
Process.send_after(self(), :status, 500)
state = put_in(state.tape, tape)
{:consumer, state}
{:error, reason} ->
{:stop, reason}
end
end
@impl true
def handle_cast(:crawl, %__MODULE__{tape: tape, config: config} = state) do
Logger.info "#{ state.mod } starting crawl from #{ tape.head }"
query = config.query
|> Terminus.HTTPStream.normalize_query
|> update_in(["q", "find"], & default_find_params(&1, tape))
|> update_in(["q", "sort"], &default_sort_params/1)
options = [token: config.token, stage: true]
|> Keyword.put(:host, Map.get(config, :host, :txo))
case Bitbus.crawl(query, options) do
{:ok, pid} ->
GenStage.async_subscribe(self(), to: pid, cancel: :transient, mode: :crawl)
{:noreply, [], state}
{:error, reason} ->
{:stop, reason, state}
end
end
def handle_cast(:listen, %__MODULE__{config: config} = state) do
Logger.info "#{ state.mod } starting listen"
query = config.query
|> Terminus.HTTPStream.normalize_query
options = [stage: true]
|> Keyword.put(:host, Map.get(config, :host, :txo))
|> Keyword.put(:recycle, Map.get(config, :recycle, 900))
case Bitsocket.listen(query, options) do
{:ok, pid} ->
GenStage.async_subscribe(self(), to: pid, mode: :listen)
{:noreply, [], state}
{:error, reason} ->
{:stop, reason, state}
end
end
# Put default find query params
defp default_find_params(nil, %{} = tape),
do: %{"blk.i" => %{"$gt" => tape.head}}
defp default_find_params(%{} = find, %{} = tape),
do: Map.put(find, "blk.i", %{"$gt" => tape.head})
# Put default sort query params
defp default_sort_params(nil),
do: %{"blk.i" => 1}
defp default_sort_params(%{} = sort),
do: Map.put(sort, "blk.i", 1)
@impl true
def handle_info(:status, %__MODULE__{tape: tape, config: config} = state) do
Logger.info "#{ state.mod } checking chain status"
case Bitbus.status do
{:ok, status} ->
tape = put_in(tape.height, status["height"])
state = put_in(state.tape, tape)
cond do
tape.height > tape.head && is_nil(state.crawl_sub) ->
GenStage.cast(self(), :crawl)
is_nil(state.listen_sub) ->
GenStage.cast(self(), :listen)
true ->
true
end
Process.send_after(self(), :status, config.poll * 1000)
{:noreply, [], state}
{:error, reason} ->
{:stop, reason, state}
end
end
@impl true
def handle_subscribe(:producer, opts, from, %__MODULE__{} = state) do
Logger.info "#{ state.mod } subscribed to #{ inspect(from) }"
state = case Keyword.get(opts, :mode) do
:crawl -> put_in(state.crawl_sub, from)
:listen -> put_in(state.listen_sub, from)
end
{:automatic, state}
end
@impl true
def handle_events(events, from, %__MODULE__{crawl_sub: crawl_sub} = state)
when from == crawl_sub,
do: process_events(events, :block, state)
def handle_events(events, from, %__MODULE__{listen_sub: listen_sub} = state)
when from == listen_sub,
do: process_events(events, :mempool, state)
# Send events to the handle_data callbacks
defp process_events(events, type, state) do
apply(state.mod, :handle_data, [type, events])
{:noreply, [], state}
end
@impl true
def handle_cancel({:down, :normal}, from, %__MODULE__{crawl_sub: crawl_sub, tape: tape} = state)
when from == crawl_sub
do
Logger.info "#{__MODULE__} Finished crawl to #{ tape.height }"
state = Map.merge(state, %{
crawl_sub: nil,
tape: put_in(tape.head, tape.height)
})
apply(state.mod, :handle_tape, [:update, state.tape])
if is_nil(state.listen_sub),
do: GenStage.cast(self(), :listen)
{:noreply, [], state}
end
end
|
lib/terminus/planaria.ex
| 0.918667 | 0.63385 |
planaria.ex
|
starcoder
|
defmodule ExUnitFixtures do
@moduledoc """
A library for declaring & using test fixtures in ExUnit.
For an overview of it's purpose see the [README](README.html).
To use ExUnitFixtures we need to start it. Add the following code to your
`test_helpers.exs`:
ExUnitFixtures.start
This starts the ExUnitFixtures application and imports any `fixtures.exs`
files that are found in the test directory heiararchy. See
`ExUnitFixtures.start/1` for more details.
Next you should:
1. Add `use ExUnitFixtures` to your test cases (before `use ExUnit.Case`)
2. Add `ExUnit.Case.register_attribute __MODULE__, :fixtures` (after `use ExUnit.Case`)
3. Define some fixtures using `deffixture/3`
4. Tag some tests with `@fixtures: [:your_fixtures_here]`. Fixtures may be specified as an atom, a list, or a tuple.
The tagged tests will automatically have all the requested fixtures injected
into their `context`. For example:
iex(2)> defmodule MyTests do
...(2)> use ExUnitFixtures
...(2)> use ExUnit.Case
...(2)> ExUnit.Case.register_attribute __MODULE__, :fixtures
...(2)>
...(2)> deffixture my_model do
...(2)> # Create a model somehow...
...(2)> %{test: 1}
...(2)> end
...(2)>
...(2)> @fixtures [:my_model]
...(2)> test "that we have some fixtures", context do
...(2)> assert context.my_model.test == 1
...(2)> end
...(2)> end
iex(3)> true
true
## Fixtures with dependencies
Fixtures can also depend on other fixtures by naming a parameter after that
fixture. For example, if you needed to setup a database instance before
creating some models:
iex(4)> defmodule MyTests2 do
...(4)> use ExUnitFixtures
...(4)> use ExUnit.Case
...(4)> ExUnit.Case.register_attribute __MODULE__, :fixtures
...(4)>
...(4)> deffixture database do
...(4)> # set up the database somehow...
...(4)> end
...(4)>
...(4)> deffixture my_model(database) do
...(4)> # use the database to insert a model
...(4)> end
...(4)>
...(4)> @fixtures :my_model
...(4)> test "something", %{my_model: my_model} do
...(4)> # Test something with my_model
...(4)> end
...(4)> end
iex(5)> true
true
In the sample above, we have 2 fixtures: one which creates the database and
another which inserts a model into that database. The test function depends on
`my_model` which depends on the database. ExUnitFixtures knows this, and takes
care of setting up the database and passing it in to `my_model`.
## Fixture Scoping
Fixtures may optionally be provided with a scope:
- `:test` scoped fixtures will be created before each test that requires them
and not re-used between tests. This is the default scope for a fixture.
- `:module` scoped fixtures will be created when a test requires them and then
re-used in any further tests in that module.
- `:session` scoped fixtures will be created when a test requires them and
then re-used in any further tests across the entire test run.
For details on how to specify scopes, see `deffixture/3`.
## Tearing down Fixtures
If you need to do some teardown work for a fixture you can use the
`teardown/2` function.
iex(8)> defmodule TestWithTearDowns do
...(8)> use ExUnitFixtures
...(8)> use ExUnit.Case
...(8)>
...(8)> deffixture database, scope: :module do
...(8)> # Setup the database
...(8)> teardown :module, fn ->
...(8)> # Tear down the database
...(8)> nil
...(8)> end
...(8)> end
...(8)>
...(8)> deffixture model do
...(8)> # Insert the model
...(8)> teardown :test, fn ->
...(8)> # Delete the model
...(8)> nil
...(8)> end
...(8)> end
...(8)> end
iex(9)> true
true
## Sharing Fixtures Amongst Test Cases.
It is possible to share fixtures among test cases by declaring that module a
fixture module. See `ExUnitFixtures.FixtureModule` for more details.
When started, `ExUnitFixtures` automatically loads any `fixtures.exs` files it
finds in the test directory hierarchy. Any test or fixture module will also
automatically import any fixtures defined in `fixtures.exs` files in it's
current or parent directories. This allows ExUnitFixtures to provide a
powerful yet simple method of sharing fixtures amongst tests in a directory
heirarchy. See `ExUnitFixtures.AutoImport` for more details.
"""
alias ExUnitFixtures.FixtureDef
alias ExUnitFixtures.SessionFixtureStore
@doc """
Starts the ExUnitFixtures application.
By default this will also look for any `fixtures.exs` files in the test
directory and load them into the VM so we can use the fixtures contained
within. This can be controlled by the `auto_load` option described below.
The keyword list `opts` may be provided to override any of the default
options.
### Options
- `auto_import` controls whether tests & fixture modules should automatically
import fixtures from `fixtures.exs` files in their directory tree. This is
true by default
- `auto_load` controls whether `ExUnitFixtures` should automatically load
`fixtures.exs` files it finds in the test directory tree on startup. This is
true by default.
"""
def start(opts \\ []) do
Enum.each opts, fn {key ,val} ->
Application.put_env(:ex_unit_fixtures, key, val, persistent: true)
end
Application.ensure_all_started(:ex_unit_fixtures)
end
@doc false
def start(_type, _args) do
import Supervisor.Spec, warn: false
alias ExUnitFixtures.Imp
children = [
worker(ExUnitFixtures.Teardown, []),
worker(Imp.ModuleStore, []),
worker(Imp.FixtureStore, [[name: ExUnitFixtures.SessionFixtureStore]])
] ++
if Application.get_env(:ex_unit_fixtures, :auto_load) do
[worker(Imp.FileLoader, [])]
else
[]
end
Supervisor.start_link(children, strategy: :one_for_one, name: ExUnitFixtures)
end
@doc """
Loads all files it finds matching `fixture_pattern` into the VM.
"""
@spec load_fixture_files(Regex.t) :: nil
def load_fixture_files(fixture_pattern \\ "test/**/fixtures.exs") do
ExUnitFixtures.Imp.FileLoader.load_fixture_files(fixture_pattern)
end
@doc """
Defines a fixture in the current module.
This is intended to be used much like a def statement:
deffixture my_fixture do
"my_fixture_text"
end
A fixture may optionally depend on other fixtures. This is done by creating a
fixture that accepts parameters named after other fixtures. These fixtures
will automatically be run and injected as parameters to the current fixture.
For example:
deffixture database do
%{database: true}
end
deffixture model(database) do
%{model: true}
end
Note: `deffixture/3` does not support guards or pattern matching in it's
definitions. If you want to use those you should define a constructor
function yourself and register it with `register_fixture/3`.
#### Fixture Options
Fixtures can accept various options that control how they are defined:
deffixture database, scope: :module do
%{database: true}
end
These options are supported:
- `scope` controls the scope of the fixture. See Fixture Scoping for details.
- Passing `autouse: true` will cause a fixture to be passed to every test in
the module.
"""
defmacro deffixture({name, info, params}, opts \\ [], body) do
dep_names = for {dep_name, _, _} <- params || [] do
dep_name
end
quote do
def unquote({name, info, params}), unquote(body)
ExUnitFixtures.register_fixture(
unquote(name), unquote(dep_names), unquote(opts)
)
end
end
@doc """
Registers a function as a fixture in the current module.
This registers a fixture named `name` in the current module. The fixture will
be constructed by a function named `name`, which should be defined separately.
The fixture will depend on the fixtures listed in `dep_names`, which will be
passed to the function in the same order as they are present in `dep_names`.
`register_fixture/3` should be used instead of `deffixture/3` when using an
existing function as a fixture, or when you want to use pattern matching or
guards in the definition of the fixture constructor.
register_fixture :a_model, [:db]
def a_model(db) do
# Construct a model somehow
end
#### Options
- `scope` controls the scope of the fixture. See Fixture Scoping for details.
- `autouse: true` will cause a fixture to be passed to every test in the
module.
"""
defmacro register_fixture(name, dep_names \\ [], opts \\ []) do
if name == :context do
raise """
The name context is reserved for the ExUnit context.
It may not be used for fixtures.
"""
end
scope = Dict.get(opts, :scope, :test)
autouse = Dict.get(opts, :autouse, false)
unless scope in [:test, :module, :session] do
raise "Unknown scope: #{scope}"
end
quote do
ExUnitFixtures.Imp.Preprocessing.check_clashes(unquote(name), @__fixtures)
@__fixtures %FixtureDef{
name: unquote(name),
func: {__MODULE__, unquote(name)},
dep_names: unquote(dep_names),
scope: unquote(scope),
autouse: unquote(autouse),
qualified_name: Module.concat(__MODULE__, unquote(name))
}
end
end
@doc """
Registers a teardown function for the current test pid.
`scope` should be provided, and should usually match the scope of the current
fixture. It determines whether the teardown should be run at the end of the
test or end of the module.
There are some use-cases for providing a non-matching scope. You might want
to reset a module fixture inbetween each of the individual tests, which could
easily be done with a test scoped teardown.
Note: Currently there is no session scope for teardowns. Hopefully this will
change in a future release.
"""
@spec teardown(:test | :module, fun) :: :ok
def teardown(scope \\ :test, fun) when is_function(fun, 0) do
ExUnitFixtures.Teardown.register_teardown(scope, fun)
end
defmacro __using__(_opts) do
quote do
if is_list(Module.get_attribute(__MODULE__, :ex_unit_tests)) do
raise "`use ExUnitFixtures` must come before `use ExUnit.Case`"
end
Module.register_attribute(__MODULE__,
:fixture_modules,
accumulate: true)
Module.register_attribute __MODULE__, :__fixtures, accumulate: true
@before_compile ExUnitFixtures
import ExUnitFixtures
if Application.get_env(:ex_unit_fixtures, :auto_import) do
use ExUnitFixtures.AutoImport
end
ExUnit.Case.register_attribute __MODULE__, :fixtures
end
end
defmacro __before_compile__(_) do
quote do
@_processed_fixtures ExUnitFixtures.Imp.Preprocessing.preprocess_fixtures(
@__fixtures, @fixture_modules
)
setup_all do
{:ok, module_store} = ExUnitFixtures.Imp.FixtureStore.start_link
module_ref = make_ref()
ExUnitFixtures.Teardown.register_pid(module_ref, module_store)
on_exit fn ->
ExUnitFixtures.Teardown.run(module_ref)
end
{:ok, %{__ex_unit_fixtures: %{module_store: module_store,
module_ref: module_ref}}}
end
setup context do
%{__ex_unit_fixtures: fixture_context} = context
ExUnitFixtures.Teardown.register_pid(fixture_context[:module_ref])
fixture_names = context.registered.fixtures |> List.wrap |> Enum.flat_map(fn
x when is_atom(x) -> List.wrap(x)
x when is_binary(x) -> List.wrap(String.to_existing_atom(x))
x when is_tuple(x) -> Tuple.to_list(x)
end)
{:ok, ExUnitFixtures.Imp.create_fixtures(
fixture_names,
@_processed_fixtures,
%{module: fixture_context[:module_store],
session: ExUnitFixtures.SessionFixtureStore},
context
)}
end
end
end
end
|
lib/ex_unit_fixtures.ex
| 0.877903 | 0.701138 |
ex_unit_fixtures.ex
|
starcoder
|
defmodule Custodian.Bots do
@moduledoc """
The Bots context provides a boundary into the `Custodian.Bots.Bot` schema. It
provides functions for listing, creating, updating, and deleting bots.
"""
import Ecto.Query, warn: false
alias Custodian.Repo
alias Custodian.Bots.Bot
@doc """
Returns the list of bots.
## Examples
iex> list_bots()
[%Bot{}, ...]
"""
@spec list_bots :: [Bot.t()]
def list_bots do
Repo.all(Bot)
end
@doc """
Gets a single bot.
Raises `Ecto.NoResultsError` if the bot does not exist.
## Examples
iex> get_bot!(123)
%Bot{}
iex> get_bot!(456)
** (Ecto.NoResultsError)
"""
@spec get_bot!(String.t()) :: Bot.t()
def get_bot!(id), do: Repo.get!(Bot, id)
@doc """
Gets a single bot by some clause.
Raises `Ecto.NoResultsError` if the bot does not exist.
## Examples
iex> get_bot_by!(repo_id: 123)
%Bot{}
iex> get_bot_by!(repo_id: 456)
** (Ecto.NoResultsError)
"""
@spec get_bot_by!(Keyword.t()) :: Bot.t()
def get_bot_by!(clauses), do: Repo.get_by!(Bot, clauses)
@doc """
Creates a bot.
## Examples
iex> create_bot(%{field: value})
{:ok, %Bot{}}
iex> create_bot(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
@spec create_bot(map) :: Bot.t()
def create_bot(attrs \\ %{}) do
%Bot{}
|> Bot.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a bot.
## Examples
iex> update_bot(bot, %{field: new_value})
{:ok, %Bot{}}
iex> update_bot(bot, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
@spec update_bot(Bot.t(), map) :: Bot.t()
def update_bot(%Bot{} = bot, attrs) do
bot
|> Bot.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a Bot.
## Examples
iex> delete_bot(bot)
{:ok, %Bot{}}
iex> delete_bot(bot)
{:error, %Ecto.Changeset{}}
"""
@spec delete_bot(Bot.t()) :: Bot.t()
def delete_bot(%Bot{} = bot) do
Repo.delete(bot)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking bot changes.
## Examples
iex> change_bot(bot)
%Ecto.Changeset{source: %Bot{}}
"""
@spec change_bot(Bot.t()) :: Ecto.Changeset.t()
def change_bot(%Bot{} = bot) do
Bot.changeset(bot, %{})
end
end
|
lib/custodian/bots/bots.ex
| 0.85738 | 0.4953 |
bots.ex
|
starcoder
|
defmodule SPARQL.Client do
@moduledoc """
A SPARQL protocol client.
The [SPARQL Protocol](https://www.w3.org/TR/sparql11-protocol/) defines how the operations
specified in the SPARQL query and update specs can be requested by a client from a
SPARQL service via HTTP.
This modules provides dedicated functions for the various forms of SPARQL query and update
operations and generic `query/3` and `update/3` for the query and update operations.
For a general introduction you may refer to the guides on the [homepage](https://rdf-elixir.dev).
## Raw-mode
The query functions can be called with a `SPARQL.Query` struct or a SPARQL query as a raw string.
By default, a SPARQL query string will be parsed into a `SPARQL.Query` struct for validation
purposes before the string is send via an HTTP request to the SPARQL protocol service endpoint.
This parsing step can be omitted by setting `:raw_mode` option to `true` on the dedicated
functions for the various SPARQL operation forms.
"SELECT * { ?s ?p ?o .}"
|> SPARQL.Client.select("http://example.com/sparql", raw_mode: true)
On the generic `SPARQL.Client.query/3` this raw-mode is not supported, since the parsing is
needed there to determine the query form which determines which result to expect.
For SPARQL update operations the picture is a little different. The SPARQL.ex package doesn't
provide parsing of SPARQL updates (yet), but except for `INSERT` and `DELETE` updates this isn't
actually needed, since all elements of the updates can be provided directly to the respective
functions for the update forms, which will generate valid SPARQL updates.
RDF.Graph.new({EX.S, EX.p, EX.O})
|> SPARQL.Client.insert_data("http://example.com/sparql")
You can still provide hand-written update strings to these functions, but due to the lack of
SPARQL update parsing the raw-mode is mandatory then. For the `INSERT` and `DELETE` update
forms this the only way to request them for now.
\"""
PREFIX dc: <http://purl.org/dc/elements/1.1/>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
INSERT
{ GRAPH <http://example/bookStore2> { ?book ?p ?v } }
WHERE
{ GRAPH <http://example/bookStore>
{ ?book dc:date ?date .
FILTER ( ?date > "1970-01-01T00:00:00-02:00"^^xsd:dateTime )
?book ?p ?v
} }
\"""
|> SPARQL.Client.insert("http://example.com/sparql", raw_mode: true)
## Specifying custom headers
Custom headers for the HTTP request to the SPARQL service can be specified with the `headers`
option and a map.
SPARQL.Client.query(query, "http://some.company.org/private/sparql",
headers: %{"Authorization" => "Basic XXX=="})
## Specifying Tesla adapter specific options
The keyword list provided under the `request_opts` options, will be passed as the `opts` option
value to the `Tesla.request/2` function.
This allows for example to set the timeout value for the Hackney adapter like this:
```elixir
SPARQL.Client.query(query, "http://example.com/sparql",
request_opts: [adapter: [recv_timeout: 30_000]])
```
## Other options
- `max_redirects`: the number of redirects to follow before the operation fails (default: `5`)
## Application configuration of default values
Several default values for the options of the operations can be configured via the
Mix application environment.
Here's an example configuration showing all available configuration options:
config :sparql_client,
protocol_version: "1.1",
query_request_method: :get,
update_request_method: :directly,
query_result_format: %{
select: :json,
ask: :json,
construct: :turtle,
describe: :turtle
},
http_headers: %{"Authorization" => "Basic YWxhZGRpbjpvcGVuc2VzYW1l"},
tesla_request_opts: [adapter: [recv_timeout: 30_000]],
max_redirects: 3,
raw_mode: true
The `http_headers` can also be set to a function receiving the `SPARQL.Client.Request`
struct and the computed default headers:
defmodule SomeModule do
def http_header_config(request, _headers) do
if request.sparql_operation_type == SPARQL.Client.Update do
%{"Authorization" => "Basic YWxhZGRpbjpvcGVuc2VzYW1l"}
else
%{}
end
end
config :sparql_client,
http_headers: &SomeModule.http_header_config/2,
"""
alias __MODULE__
alias SPARQL.Client.Request
@general_options_schema [
headers: [
type: {:custom, __MODULE__, :validate_headers, []},
subsection: "Specifying custom headers"
],
request_opts: [
type: :keyword_list,
subsection: "Specifying Tesla adapter specific options"
],
max_redirects: [
type: :pos_integer,
doc: "The number of redirects to follow before the HTTP request fails."
],
raw_mode: [
type: :boolean,
doc:
"Allows disabling of the processing of query strings, passing them through as-is to the SPARQL endpoint.",
subsection: "Raw-mode"
]
]
@query_options_schema @general_options_schema ++
[
protocol_version: [
type: {:in, ["1.0", "1.1"]},
subsection: "Specifying the request method"
],
request_method: [
type: {:in, [:get, :post]},
subsection: "Specifying the request method"
],
accept_header: [
type: :string
],
result_format: [
type:
{:in,
(SPARQL.result_formats() ++ RDF.Serialization.formats())
|> Enum.map(fn format -> format.name end)},
subsection: "Specifying the response format"
],
default_graph: [
subsection: "Specifying an RDF Dataset"
],
named_graph: [
subsection: "Specifying an RDF Dataset"
]
]
@doc """
Executes any form of a SPARQL query operation against a service endpoint.
The query can either be given as string or as an already parsed `SPARQL.Query`.
"SELECT * WHERE { ?s ?p ?o }"
|> SPARQL.Client.query(query, "http://dbpedia.org/sparql")
with %SPARQL.Query{} = query <- SPARQL.Query.new("SELECT * WHERE { ?s ?p ?o }") do
SPARQL.Client.query(query, "http://dbpedia.org/sparql")
end
For the execution of queries in raw-mode see the [module documentation](`SPARQL.Client`)
The result is in the success case returned in a `:ok` tuple or in error cases in an `:error`
tuple with an error message or in case of a non-200 response by the SPARQL service with a
`SPARQL.Client.HTTPError`.
The type of the result returned depends on the query form:
- `SELECT` queries will return a `SPARQL.Query.Result` struct
- `ASK` queries will return a `SPARQL.Query.Result` struct with the boolean
result in the `results` field
- `CONSTRUCT` and `DESCRIBE` queries will return an RDF data structure
## Specifying the request method
The SPARQL 1.1 protocol spec defines [three methods](https://www.w3.org/TR/sparql11-protocol/#query-operation)
to perform a SPARQL query operation via HTTP, which can be specified via the
`request_method` and `protocol_version` options:
1. query via GET: by setting the options as `request_method: :get` and `protocol_version: "1.1"`
2. query via URL-encoded POST: by setting the options as `request_method: :post` and `protocol_version: "1.0"`
3. query via POST directly: by setting the options as `request_method: :post` and `protocol_version: "1.1"`
In order to work with SPARQL 1.0 services out-of-the-box the second method,
query via URL-encoded POST, is the default.
To perform previous query via GET, you would have to call it like this:
SPARQL.Client.query(query, "http://dbpedia.org/sparql",
request_method: :get, protocol_version: "1.1")
## Specifying the response format
The `SPARQL.Client` can handle all of the specified result formats for SPARQL
tuple results (JSON, XML, CSV and TSV) and for `CONSTRUCT` and `DESCRIBE` queries
all RDF serialization formats supported by [RDF.ex](https://github.com/rdf-elixir/rdf-ex)
can be handled.
If no custom `Accept` header is specified, all accepted formats for the resp.
query form will be set automatically, with
- JSON being the preferred format for `SELECT` and `ASK` queries
- Turtle being the preferred format for `CONSTRUCT` and `DESCRIBE` queries
Although the returned result is mostly independent from the actually returned
response format from the service, you might want to set it manually with the
`result_format` and the name of the format
SPARQL.Client.query(query, "http://some.company.org/private/sparql",
result_format: :xml)
These are the names of the supported formats:
- tuple result formats: `:json, :xml, :csv, :tsv`
- RDF result formats: `:turtle, :ntriples, :nquads, :jsonld`
When a `result_format` is specified the `Accept` header is set to the corresponding
media type. You might however still want to overwrite the `Accept` header, for
example when a SPARQL service uses a non-standard media type for a format.
Note that, when providing a custom non-standard `Accept` header the `result_format`
option is mandatory.
## Specifying an RDF Dataset
The RDF dataset to be queried can be specified [as described in the spec](https://www.w3.org/TR/sparql11-protocol/#dataset)
via the the `default_graph` and `named_graph` options and either a single graph
name or lists of graphs.
SPARQL.Client.query(query, "http://some.company.org/private/sparql",
default_graph: "http://www.example/sparql/",
named_graph: [
"http://www.other.example/sparql/",
"http://www.another.example/sparql/"
])
"""
def query(query, endpoint, opts \\ [])
def query(%SPARQL.Query{} = query, endpoint, opts) do
do_query(query.form, query.query_string, endpoint, opts)
end
def query(query_string, endpoint, opts) do
if Keyword.get(opts, :raw_mode) do
raise """
The generic SPARQL.Client.query/3 function can not be used in raw-mode since
it needs to parse the query to determine the query form.
Please use one of the dedicated functions like SPARQL.Client.select/3 etc.
"""
end
with %SPARQL.Query{} = query <- SPARQL.Query.new(query_string) do
query(query, endpoint, opts)
end
end
SPARQL.Client.Query.forms()
|> Enum.each(fn query_form ->
@doc """
Executes a SPARQL `#{query_form |> to_string() |> String.upcase()}` query operation against a service endpoint.
See documentation of the generic `query/3` function and the [module documentation](`SPARQL.Client`) for the available options.
"""
def unquote(query_form)(query, endpoint, opts \\ [])
def unquote(query_form)(%SPARQL.Query{form: unquote(query_form)} = query, endpoint, opts) do
do_query(unquote(query_form), query.query_string, endpoint, opts)
end
def unquote(query_form)(%SPARQL.Query{form: form}, _, _) do
raise "expected a #{unquote(query_form) |> to_string() |> String.upcase()} query, got: #{
form |> to_string() |> String.upcase()
} query"
end
def unquote(query_form)(query_string, endpoint, opts) do
if raw_mode?(opts) do
do_query(unquote(query_form), query_string, endpoint, opts)
else
with %SPARQL.Query{} = query <- SPARQL.Query.new(query_string) do
unquote(query_form)(query, endpoint, opts)
end
end
end
end)
defp do_query(form, query, endpoint, opts) do
with {:ok, options} <- NimbleOptions.validate(opts, @query_options_schema),
{:ok, request} <- Request.build(Client.Query, form, query, endpoint, options),
{:ok, request} <- Request.call(request, options) do
{:ok, request.result}
else
{:error, %NimbleOptions.ValidationError{message: message}} -> {:error, message}
error -> error
end
end
@update_options_schema @general_options_schema ++
[
request_method: [
type: {:in, [:direct, :url_encoded]},
subsection: "Specifying the request method"
]
]
@doc """
Executes any form of a SPARQL update operation against a service endpoint.
In case of this generic function, updates can be given only as string and executed in raw-mode
(see the [module documentation](`SPARQL.Client`) for a description of the raw-mode)
\"""
PREFIX dc: <http://purl.org/dc/elements/1.1/>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
INSERT
{ GRAPH <http://example/bookStore2> { ?book ?p ?v } }
WHERE
{ GRAPH <http://example/bookStore>
{ ?book dc:date ?date .
FILTER ( ?date > "1970-01-01T00:00:00-02:00"^^xsd:dateTime )
?book ?p ?v
} }
\"""
|> SPARQL.Client.update("http://example.com/sparql", raw_mode: true)
The result for all updates is either `:ok` or an `:error` tuple in error cases with an error
message or in case of a non-2XX response by the SPARQL service with a `SPARQL.Client.HTTPError`.
## Specifying the request method
The SPARQL 1.1 protocol spec defines [two methods](https://www.w3.org/TR/sparql11-protocol/#update-operation)
to perform a SPARQL update operation via HTTP, which can be specified via the
`request_method` option:
1. update via URL-encoded POST: by setting the options `request_method: :url_encoded`
2. update via POST directly: by setting the options `request_method: :direct` (default)
"""
def update(update, endpoint, opts \\ []) do
unvalidated_update(nil, update, endpoint, opts)
end
@doc """
Executes a SPARQL `INSERT` update operation against a service endpoint.
See documentation of the generic `update/3` function and the [module documentation](`SPARQL.Client`) for the available options.
"""
def insert(update, endpoint, opts \\ []) do
unvalidated_update(:insert, update, endpoint, opts)
end
@doc """
Executes a SPARQL `DELETE` update operation against a service endpoint.
See documentation of the generic `update/3` function and the [module documentation](`SPARQL.Client`) for the available options.
"""
def delete(update, endpoint, opts \\ []) do
unvalidated_update(:delete, update, endpoint, opts)
end
@doc """
Executes a SPARQL `INSERT DATA` update operation against a service endpoint.
The `INSERT DATA` update can either be given as string (only in raw-mode; see the
[module documentation](`SPARQL.Client`) for more information on the raw-mode) or
by providing the data to be inserted directly via an RDF.ex data structure
(`RDF.Graph`, `RDF.Description` or `RDF.Dataset`).
RDF.Graph.new({EX.S, EX.p, EX.O})
|> SPARQL.Client.insert_data("http://example.com/sparql")
See documentation of the generic `update/3` function and the [module documentation](`SPARQL.Client`) for the available options.
"""
def insert_data(data_or_update, endpoint, opts \\ []) do
update_data(:insert_data, data_or_update, endpoint, opts)
end
@doc """
Executes a SPARQL `DELETE DATA` update operation against a service endpoint.
The `DELETE DATA` update can either be given as string (only in raw-mode; see the
[module documentation](`SPARQL.Client`) for more information on the raw-mode) or
by providing the data to be deleted directly via an RDF.ex data structure
(`RDF.Graph`, `RDF.Description` or `RDF.Dataset`).
RDF.Graph.new({EX.S, EX.p, EX.O})
|> SPARQL.Client.delete_data("http://example.com/sparql")
See documentation of the generic `update/3` function and the [module documentation](`SPARQL.Client`) for the available options.
"""
def delete_data(data_or_update, endpoint, opts \\ []) do
update_data(:delete_data, data_or_update, endpoint, opts)
end
defp update_data(form, %rdf{} = data, endpoint, opts)
when rdf in [RDF.Graph, RDF.Description, RDF.Dataset] do
with {:ok, update_string} <- Client.Update.Builder.update_data(form, data, opts) do
do_update(form, update_string, endpoint, opts)
end
end
defp update_data(form, update, endpoint, opts) when is_binary(update) do
unvalidated_update(form, update, endpoint, opts)
end
@doc """
Executes a SPARQL `LOAD` update operation against a service endpoint.
The URL from to be loaded must be specified with the `:from` option. The graph name
to which the data should be loaded can be given with the `:to` option. Both options
expect an URI as a value which can be given as a string, `RDF.IRI` or vocabulary namespace term.
SPARQL.Client.load("http://example.com/sparql", from: "http://example.com/Resource")
SPARQL.Client.load("http://example.com/sparql", from: EX.Resource, to: EX.Graph)
The update operation can be run in `SILENT` mode by setting the `:silent` option to `true`.
See documentation of the generic `update/3` function and the [module documentation](`SPARQL.Client`) for the available options.
"""
def load(endpoint, opts) when is_list(opts) do
{from, opts} = pop_required_keyword(opts, :from)
{to, opts} = Keyword.pop(opts, :to)
{silent, opts} = Keyword.pop(opts, :silent)
with {:ok, update_string} <- Client.Update.Builder.load(from, to, silent) do
do_update(:load, update_string, endpoint, opts)
end
end
@doc """
Executes a SPARQL `LOAD` update operation against a service endpoint.
This version only allows execution of `LOAD` update given as string in raw-mode (see the
[module documentation](`SPARQL.Client`) for more information on the raw-mode).
"LOAD <http://example.com/Resource>"
|> SPARQL.Client.load("http://example.com/sparql", raw_mode: true)
See `load/2` for how to execute a `LOAD` update with an automatically build update string.
See documentation of the generic `update/3` function and the [module documentation](`SPARQL.Client`) for the available options.
"""
def load(update, endpoint, opts) do
if Keyword.has_key?(opts, :from) or Keyword.has_key?(opts, :to) or
Keyword.has_key?(opts, :silent) do
raise ArgumentError,
"load/3 does not support the :from, :to and :silent options; use load/2 instead"
end
update_data(:load, update, endpoint, opts)
end
~w[create clear drop]a
|> Enum.each(fn form ->
form_keyword = form |> to_string() |> String.upcase()
@doc """
Executes a SPARQL `#{form_keyword}` update operation against a service endpoint.
The graph name must be specified with the `:graph` option either as a string, `RDF.IRI`,
vocabulary namespace term or one of the special values `:default`, `:named`, `:all`.
SPARQL.Client.#{form}("http://example.com/sparql", graph: "http://example.com/Graph")
SPARQL.Client.#{form}("http://example.com/sparql", graph: EX.Graph)
The update operation can be run in `SILENT` mode by setting the `:silent` option to `true`.
See documentation of the generic `update/3` function and the [module documentation](`SPARQL.Client`) for the available options.
"""
def unquote(form)(endpoint, opts) when is_list(opts) do
{graph, opts} = pop_required_keyword(opts, :graph)
{silent, opts} = Keyword.pop(opts, :silent)
with {:ok, update_string} <- apply(Client.Update.Builder, unquote(form), [graph, silent]) do
do_update(unquote(form), update_string, endpoint, opts)
end
end
@doc """
Executes a SPARQL `#{form_keyword}` update operation against a service endpoint.
This version only allows execution of `#{form_keyword}` updates given as string in raw-mode (see the
[module documentation](`SPARQL.Client`) for more information on the raw-mode).
"#{form_keyword} <http://example.com/Graph>"
|> SPARQL.Client.#{form}("http://example.com/sparql", raw_mode: true)
See `#{form}/2` for how to execute a `#{form_keyword}` update with an automatically build update string.
See documentation of the generic `update/3` function and the [module documentation](`SPARQL.Client`) for the available options.
"""
def unquote(form)(update, endpoint, opts) do
if Keyword.has_key?(opts, :graph) or Keyword.has_key?(opts, :silent) do
raise ArgumentError,
"#{unquote(form)}/3 does not support the :graph and :silent options; use #{
unquote(form)
}/2 instead"
end
update_data(unquote(form), update, endpoint, opts)
end
end)
~w[copy move add]a
|> Enum.each(fn form ->
form_keyword = form |> to_string() |> String.upcase()
@doc """
Executes a SPARQL `#{form_keyword}` update operation against a service endpoint.
The source graph must be specified with the `:graph` option and the destination graph with the
`:to` option either as a string, `RDF.IRI`, vocabulary namespace term for the graph name or
`:default` for the default graph.
SPARQL.Client.#{form}("http://example.com/sparql",
from: "http://example.com/Graph1", to: "http://example.com/Graph2")
SPARQL.Client.#{form}("http://example.com/sparql",
from: :default, to: EX.Graph)
The update operation can be run in `SILENT` mode by setting the `:silent` option to `true`.
See documentation of the generic `update/3` function and the [module documentation](`SPARQL.Client`) for the available options.
"""
def unquote(form)(endpoint, opts) when is_list(opts) do
{from, opts} = pop_required_keyword(opts, :from)
{to, opts} = pop_required_keyword(opts, :to)
{silent, opts} = Keyword.pop(opts, :silent)
with {:ok, update_string} <- apply(Client.Update.Builder, unquote(form), [from, to, silent]) do
do_update(unquote(form), update_string, endpoint, opts)
end
end
@doc """
Executes a SPARQL `#{form_keyword}` update operation against a service endpoint.
This version only allows execution of `#{form_keyword}` updates given as string in raw-mode (see the
[module documentation](`SPARQL.Client`) for more information on the raw-mode).
"#{form_keyword} GRAPH <http://example.com/Graph1> TO GRAPH <http://example.com/Graph2>"
|> SPARQL.Client.#{form}("http://example.com/sparql", raw_mode: true)
See `#{form}/2` for how to execute a `#{form_keyword}` update with an automatically build update string.
See documentation of the generic `update/3` function and the [module documentation](`SPARQL.Client`) for the available options.
"""
def unquote(form)(update, endpoint, opts) do
if Keyword.has_key?(opts, :from) or Keyword.has_key?(opts, :to) or
Keyword.has_key?(opts, :silent) do
raise ArgumentError,
"#{unquote(form)}/3 does not support the :from, :to and :silent options; use #{
unquote(form)
}/2 instead"
end
update_data(unquote(form), update, endpoint, opts)
end
end)
defp unvalidated_update(form, update, endpoint, opts) do
unless raw_mode?(opts) do
raise """
An update options is passed directly as a string. Validation of updates is not implemented yet.
Please run them in raw-mode, by providing the raw_mode: true option.
"""
end
do_update(form, update, endpoint, opts)
end
defp do_update(form, update_string, endpoint, opts) do
with {:ok, options} <- NimbleOptions.validate(opts, @update_options_schema),
{:ok, request} <- Request.build(Client.Update, form, update_string, endpoint, options),
{:ok, _request} <- Request.call(request, options) do
:ok
else
{:error, %NimbleOptions.ValidationError{message: message}} -> {:error, message}
error -> error
end
end
@doc false
def validate_headers(map) when is_map(map), do: {:ok, map}
def validate_headers(other),
do: {:error, "expected :headers to be a map, got: #{inspect(other)}"}
defp default_raw_mode do
Application.get_env(:sparql_client, :raw_mode, false)
end
defp raw_mode?(opts) do
Keyword.get(opts, :raw_mode, default_raw_mode())
end
defp pop_required_keyword(opts, key) do
case Keyword.pop(opts, key) do
{nil, _} -> raise "missing required keyword option #{inspect(key)}"
result -> result
end
end
end
|
lib/sparql_client.ex
| 0.888831 | 0.84607 |
sparql_client.ex
|
starcoder
|
defmodule Remedy.Snowflake do
@moduledoc """
`Ecto.Type` compatible Discord Snowflake type.
Discord utilizes Twitter's snowflake format for uniquely identifiable descriptors (IDs). These IDs are guaranteed to be unique across all of Discord, except in some unique scenarios in which child objects share their parent's ID.
Snowflakes consist of a timestamp as well as metadata. Converting to another timestamp method will produce a valid and accurate timestamp. However, converting a value from a snowflake is a destructive operation and cannot be reversed.
iex> snowflake = 927056337051992064
...> butchered_snowflake = snowflake |> Remedy.ISO8601.to_iso8601() |> Remedy.Snowflake.to_snowflake()
...> butchered_snowflake == snowflake
false
While the utilities exist to execute such functionality, care should be taken.
For example:
- Converting an ISO8601 string to a snowflake for the purpose of pagination is reasonably safe to do.
- Using a message's snowflake ID in a filtering operation is also safe.
- Converting a DateTime struct to a snowflake to attempt to get a message's ID is not.
## Pagination
Discord typically uses snowflake IDs in many of the API routes for pagination. The standardized pagination paradigm utilized is one in which you can specify IDs before and after in combination with limit to retrieve a desired page of results. You will want to refer to the specific endpoint documentation for details.
## Casting
The following are examples of valid inputs for casting. Regardless of the format provided, values will be cast to an `t:integer/0` value for storage.
#### Decimal Integer
927056337051992064
#### ISO8601 String
"2019-01-01T00:00:00Z"
"""
import Remedy.TimeHelpers
use Ecto.Type
use Unsafe.Generator, handler: :unwrap, docs: false
@typedoc """
A Discord Snowflake Type.
"""
@type t() :: 0x400000..0xFFFFFFFFFFFFFFFF
@typedoc """
Castable to Discord Snowflake.
"""
@type c() :: t() | ISO8601.t() | DateTime.t() | integer()
@doc false
@impl true
@spec type :: :integer
def type, do: :integer
@spec cast(any) :: :error | {:ok, nil | t()}
@doc false
@impl true
@unsafe {:cast, [:value]}
def cast(value)
def cast(nil), do: {:ok, nil}
def cast(value), do: to_snowflake(value) |> casted()
defp casted(:error), do: :error
defp casted(snowflake), do: {:ok, snowflake}
@doc false
@impl true
@unsafe {:dump, [:snowflake]}
def dump(nil), do: {:ok, nil}
def dump(value) when is_snowflake(value), do: {:ok, to_snowflake(value)}
def dump(_value), do: :error
@doc false
@impl true
def load(value) when is_snowflake(value), do: {:ok, value}
@doc false
@impl true
def equal?(term1, term2), do: to_snowflake(term1) == to_snowflake(term2)
@doc false
@impl true
def embed_as(_value), do: :dump
defp unwrap({:ok, body}), do: body
defp unwrap({:error, _}), do: raise(ArgumentError)
end
|
lib/remedy/types/snowflake.ex
| 0.850546 | 0.526465 |
snowflake.ex
|
starcoder
|
defmodule SimpleGraphqlClient do
import SimpleGraphqlClient.HttpClient
import SimpleGraphqlClient.Parser
import SimpleGraphqlClient.Subscriber
alias SimpleGraphqlClient.Response
@moduledoc """
SimpleGraphqlClient is a graphql client, focused on simplicity and ease of use.
## Usage
### Query/Mutation example
```elixir
iex>
query = "query users($name: String){users(name: $name){name}}"
SimpleGraphqlClient.graphql_request(query, %{name: "Boris"})
# Will produce
{:ok,
%SimpleGraphqlClient.Response{
body: {:ok, %{"data" => %{"users" => []}}},
headers: [],
status_code: 200
}
}
```
### Subscription example
```elixir
sub_query = "
subscription testsub {
userAdded{
email
}
}
"
SimpleGraphqlClient.absinthe_subscribe(sub_query, %{}, &IO.inputs/1)
# Will produce
%{"userAdded" => %{"email" => "<EMAIL>"}}
```
## More examples
You can find more examples in `test_app/test/graphql` folder
## Configuration
For configuration i suggest to write your own wrappers of &graphql_request/3 or any subscribe function. If you want to pass Authorization parametrs to WS connection, please encode them into url.
"""
@doc """
Execute request to graphql endpoint
* query - any valid graphql query
* variables - pass a map with variables to pass alongside with query
* opts - url and list of additional headers e.g for authorization
## Usage
```elixir
SimpleGraphqlClient.graphql_request(query, %{name: "Boris"}, %{url: "http://example.com/graphql", headers: token: "1234"})
```
"""
@spec graphql_request(binary, map | nil, keyword) ::
{:ok, Response.t()} | {:error, Response.t() | any}
def graphql_request(query, variables \\ nil, opts \\ []) do
query
|> send_request(variables, opts)
|> parse_response
end
@doc """
Subcribe to absinthe subscription.
* query - any vailidd graphql query
* variables - pass a map with variables to pass alongside with query
* callback_or_dest - pass here a callback function or destination to receive message with fulfillment data
* opts - url and list of additional headers e.g for authorization
## Usage
```
SimpleGraphqlClient.absinthe_subscribe(sub_query, %{}, &IO.inputs/1) # Or you can pid/name as last argumen to receive message with fulfillment data
```
"""
@spec absinthe_subscribe(binary, map | nil, keyword) :: :ok | {:error, any}
def absinthe_subscribe(query, variables, callback_or_dest, opts \\ []) do
query
|> absinthe_sub(variables, callback_or_dest, opts)
end
end
|
lib/simple_graphql_client.ex
| 0.723798 | 0.572185 |
simple_graphql_client.ex
|
starcoder
|
defmodule Axon.Initializers do
@moduledoc """
Parameter initializers.
Parameter initializers are used to initialize the weights
and biases of a neural network. Because most deep learning
optimization algorithms are iterative, they require an initial
point to iterate from.
Sometimes the initialization of a model can determine whether
or not a model converges. In some cases, the initial point is
unstable, and therefore the model has no chance of converging
using common first-order optimization methods. In cases where
the model will converge, initialization can have a significant
impact on how quickly the model converges.
Most initialization strategies are built from intuition and
heuristics rather than theory. It's commonly accepted that
the parameters of different layers should be different -
motivating the use of random initialization for each layer's
parameters. Usually, only the weights of a layer are initialized
using a random distribution - while the biases are initialized
to a uniform constant (like 0).
Most initializers use Gaussian (normal) or uniform distributions
with variations on scale. The output scale of an initializer
should generally be large enough to avoid information loss but
small enough to avoid exploding values. The initializers in
this module have a default scale known to work well with
the initialization strategy.
All of the functions in this module are implemented as
numerical functions and can be JIT or AOT compiled with
any supported `Nx` compiler.
"""
# TODO: Add random keys
import Nx.Defn
import Axon.Shared
@doc """
Initializes parameters to 0.
## Examples
iex> init_fn = Axon.Initializers.zeros()
iex> init_fn.({2, 2}, {:f, 32})
#Nx.Tensor<
f32[2][2]
[
[0.0, 0.0],
[0.0, 0.0]
]
>
"""
def zeros() do
fn shape, type ->
zeros_impl(shape: shape, type: type)
end
end
defnp zeros_impl(opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}])
Nx.broadcast(Nx.tensor(0, type: opts[:type]), opts[:shape])
end
@doc """
Initializes parameters to 1.
## Examples
iex> init_fn = Axon.Initializers.ones()
iex> init_fn.({2, 2}, {:f, 32})
#Nx.Tensor<
f32[2][2]
[
[1.0, 1.0],
[1.0, 1.0]
]
>
"""
def ones() do
fn shape, type ->
ones_impl(shape: shape, type: type)
end
end
defnp ones_impl(opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}])
Nx.broadcast(Nx.tensor(1, type: opts[:type]), opts[:shape])
end
@doc """
Initializes parameters to value.
## Examples
iex> init_fn = Axon.Initializers.full(1.00)
iex> init_fn.({2, 2}, {:f, 32})
#Nx.Tensor<
f32[2][2]
[
[1.0, 1.0],
[1.0, 1.0]
]
>
"""
def full(value) do
fn shape, type ->
full_impl(value, shape: shape, type: type)
end
end
defnp full_impl(value, opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}])
Nx.as_type(Nx.broadcast(value, opts[:shape]), opts[:type])
end
@doc """
Initializes parameters to an identity matrix.
## Examples
iex> init_fn = Axon.Initializers.identity()
iex> init_fn.({2, 2}, {:f, 32})
#Nx.Tensor<
f32[2][2]
[
[1.0, 0.0],
[0.0, 1.0]
]
>
"""
def identity() do
fn shape, type ->
identity_impl(shape: shape, type: type)
end
end
defnp identity_impl(opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}])
Nx.eye(opts[:shape], type: opts[:type])
end
@doc """
Initializes parameters with a random uniform distribution.
## Options
* `:scale` - scale of the output distribution. Defaults to `1.0e-2`
## Examples
iex> init_fn = Axon.Initializers.uniform()
iex> t = init_fn.({2, 2}, {:f, 32})
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:f, 32}
iex> init_fn = Axon.Initializers.uniform(scale: 1.0e-3)
iex> t = init_fn.({2, 2}, {:bf, 16})
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:bf, 16}
"""
def uniform(opts \\ []) do
fn shape, type ->
scale = opts[:scale] || 1.0e-2
uniform_impl(shape: shape, type: type, scale: scale)
end
end
defnp uniform_impl(opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}, scale: 1.0e-2])
shape = Nx.shape(opts[:shape])
Nx.random_uniform(shape, Nx.negate(opts[:scale]), opts[:scale], type: opts[:type])
end
@doc """
Initializes parameters with a random normal distribution.
## Options
* `:mean` - mean of the output distribution. Defaults to `0.0`
* `:scale` - scale of the output distribution. Defaults to `1.0e-2`
## Examples
iex> init_fn = Axon.Initializers.normal()
iex> t = init_fn.({2, 2}, {:f, 32})
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:f, 32}
iex> init_fn = Axon.Initializers.normal(mean: 1.0, scale: 1.0)
iex> t = init_fn.({2, 2}, {:bf, 16})
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:bf, 16}
"""
def normal(opts \\ []) do
fn shape, type ->
scale = opts[:scale] || 1.0e-2
mean = opts[:mean] || 0.0
normal_impl(shape: shape, type: type, scale: scale, mean: mean)
end
end
defnp normal_impl(opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}, scale: 1.0e-2, mean: 0.0])
Nx.random_normal(opts[:shape], opts[:mean], opts[:scale], type: opts[:type])
end
@doc """
Initializes parameters with the Lecun uniform initializer.
The Lecun uniform initializer is equivalent to calling
`Axon.Initializers.variance_scaling` with `mode: :fan_in`
and `distribution: :uniform`.
## Options
* `:scale` - scale of the output distribution. Defaults to `1.0`
## Examples
iex> init_fn = Axon.Initializers.lecun_uniform()
iex> t = init_fn.({2, 2}, {:f, 32})
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:f, 32}
iex> init_fn = Axon.Initializers.lecun_uniform(scale: 1.0e-3)
iex> t = init_fn.({2, 2}, {:bf, 16})
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:bf, 16}
## References
* [Efficient BackProp](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
def lecun_uniform(opts \\ []) do
fn shape, type ->
scale = opts[:scale] || 1.0
lecun_uniform_impl(shape: shape, type: type, scale: scale)
end
end
defnp lecun_uniform_impl(opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}, scale: 1.0])
variance_scaling_impl(
shape: opts[:shape],
type: opts[:type],
scale: opts[:scale],
mode: :fan_in,
distribution: :uniform
)
end
@doc """
Initializes parameters with the Lecun normal initializer.
The Lecun normal initializer is equivalent to calling
`Axon.Initializers.variance_scaling` with `mode: :fan_in`
and `distribution: :truncated_normal`.
## Options
* `:scale` - scale of the output distribution. Defaults to `1.0`
## Examples
iex> init_fn = Axon.Initializers.lecun_normal()
iex> t = init_fn.({2, 2}, {:f, 32})
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:f, 32}
iex> init_fn = Axon.Initializers.lecun_normal(scale: 1.0e-3)
iex> t = init_fn.({2, 2}, {:bf, 16})
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:bf, 16}
## References
* [Efficient BackProp](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
def lecun_normal(opts \\ []) do
fn shape, type ->
scale = opts[:scale] || 1.0
lecun_normal_impl(shape: shape, type: type, scale: scale)
end
end
defnp lecun_normal_impl(opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}, scale: 1.0])
variance_scaling_impl(
shape: opts[:shape],
type: opts[:type],
scale: opts[:scale],
mode: :fan_in,
distribution: :truncated_normal
)
end
@doc """
Initializes parameters with the Glorot uniform initializer.
The Glorot uniform initializer is equivalent to calling
`Axon.Initializers.variance_scaling` with `mode: :fan_avg`
and `distribution: :uniform`.
The Glorot uniform initializer is also called the Xavier
uniform initializer.
## Options
* `:scale` - scale of the output distribution. Defaults to `1.0`
## Examples
iex> init_fn = Axon.Initializers.glorot_uniform()
iex> t = init_fn.({2, 2}, {:f, 32})
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:f, 32}
iex> init_fn = Axon.Initializers.glorot_uniform(scale: 1.0e-3)
iex> t = init_fn.({2, 2}, {:bf, 16})
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:bf, 16}
## References
* [Understanding the difficulty of training deep feedforward neural networks](http://proceedings.mlr.press/v9/glorot10a.html)
"""
def glorot_uniform(opts \\ []) do
fn shape, type ->
scale = opts[:scale] || 1.0
glorot_uniform_impl(shape: shape, type: type, scale: scale)
end
end
defnp glorot_uniform_impl(opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}, scale: 1.0])
variance_scaling_impl(
shape: opts[:shape],
type: opts[:type],
scale: opts[:scale],
mode: :fan_avg,
distribution: :uniform
)
end
@doc """
Initializes parameters with the Glorot normal initializer.
The Glorot normal initializer is equivalent to calling
`Axon.Initializers.variance_scaling` with `mode: :fan_avg`
and `distribution: :truncated_normal`.
The Glorot normal initializer is also called the Xavier
normal initializer.
## Options
* `:scale` - scale of the output distribution. Defaults to `1.0`
## Examples
iex> init_fn = Axon.Initializers.glorot_normal()
iex> t = init_fn.({2, 2}, {:f, 32})
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:f, 32}
iex> init_fn = Axon.Initializers.glorot_normal(scale: 1.0e-3)
iex> t = init_fn.({2, 2}, {:bf, 16})
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:bf, 16}
## References
* [Understanding the difficulty of training deep feedforward neural networks](http://proceedings.mlr.press/v9/glorot10a.html)
"""
def glorot_normal(opts \\ []) do
fn shape, type ->
scale = opts[:scale] || 1.0
glorot_normal_impl(shape: shape, type: type, scale: scale)
end
end
defnp glorot_normal_impl(opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}, scale: 1.0])
variance_scaling_impl(
shape: opts[:shape],
type: opts[:type],
scale: opts[:scale],
mode: :fan_avg,
distribution: :truncated_normal
)
end
@doc """
Initializes parameters with the He uniform initializer.
The He uniform initializer is equivalent to calling
`Axon.Initializers.variance_scaling` with `mode: :fan_ni`
and `distribution: :uniform`.
## Options
* `:scale` - scale of the output distribution. Defaults to `2.0`
## Examples
iex> init_fn = Axon.Initializers.he_uniform()
iex> t = init_fn.({2, 2}, {:f, 32})
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:f, 32}
iex> init_fn = Axon.Initializers.he_uniform(scale: 1.0e-3)
iex> t = init_fn.({2, 2}, {:bf, 16})
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:bf, 16}
## References
* [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html)
"""
def he_uniform(opts \\ []) do
fn shape, type ->
scale = opts[:scale] || 2.0
he_uniform_impl(shape: shape, type: type, scale: scale)
end
end
defnp he_uniform_impl(opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}, scale: 2.0])
variance_scaling_impl(
shape: opts[:shape],
type: opts[:type],
scale: opts[:scale],
mode: :fan_in,
distribution: :uniform
)
end
@doc """
Initializes parameters with the He normal initializer.
The He normal initializer is equivalent to calling
`Axon.Initializers.variance_scaling` with `mode: :fan_in`
and `distribution: :truncated_normal`.
## Options
* `:scale` - scale of the output distribution. Defaults to `2.0`
## Examples
iex> init_fn = Axon.Initializers.he_normal()
iex> t = init_fn.({2, 2}, {:f, 32})
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:f, 32}
iex> init_fn = Axon.Initializers.he_normal(scale: 1.0e-3)
iex> t = init_fn.({2, 2}, {:bf, 16})
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:bf, 16}
## References
* [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html)
"""
def he_normal(opts \\ []) do
fn shape, type ->
scale = opts[:scale] || 2.0
he_normal_impl(shape: shape, type: type, scale: scale)
end
end
defnp he_normal_impl(opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}, scale: 2.0])
variance_scaling_impl(
shape: opts[:shape],
type: opts[:type],
scale: opts[:scale],
mode: :fan_in,
distribution: :truncated_normal
)
end
@doc """
Initializes parameters with variance scaling according to
the given distribution and mode.
Variance scaling adapts scale to the weights of the output
tensor.
## Options
* `:scale` - scale of the output distribution. Defaults to `1.0e-2`
* `:mode` - compute fan mode. One of `:fan_in`, `:fan_out`, or `:fan_avg`.
Defaults to `:fan_in`
* `:distribution` - output distribution. One of `:normal`, `:truncated_normal`,
or `:uniform`. Defaults to `:normal`
## Examples
iex> init_fn = Axon.Initializers.variance_scaling()
iex> t = init_fn.({2, 2}, {:f, 32})
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:f, 32}
iex> init_fn = Axon.Initializers.variance_scaling(mode: :fan_out, distribution: :truncated_normal)
iex> t = init_fn.({2, 2}, {:bf, 16})
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:bf, 16}
iex> init_fn = Axon.Initializers.variance_scaling(mode: :fan_out, distribution: :normal)
iex> t = init_fn.({64, 3, 32, 32}, {:f, 32})
iex> Nx.shape(t)
{64, 3, 32, 32}
iex> Nx.type(t)
{:f, 32}
"""
def variance_scaling(opts \\ []) do
fn shape, type ->
scale = opts[:scale] || 1.0
mode = opts[:mode] || :fan_in
distribution = opts[:distribution] || :normal
variance_scaling_impl(
shape: shape,
type: type,
scale: scale,
mode: mode,
distribution: distribution
)
end
end
defnp variance_scaling_impl(opts \\ []) do
opts =
keyword!(opts, [:shape, type: {:f, 32}, scale: 1.0, mode: :fan_in, distribution: :normal])
fans = transform(opts[:shape], &compute_fans/1)
denominator =
transform(
{fans, opts[:mode]},
fn
{{fan_in, _}, :fan_in} ->
fan_in
{{_, fan_out}, :fan_out} ->
fan_out
{{fan_in, fan_out}, :fan_avg} ->
(fan_in + fan_out) / 2.0
{{_, _}, mode} ->
raise ArgumentError, "invalid mode #{inspect(mode)} passed to variance_scaling/1"
end
)
variance = Nx.divide(Nx.tensor(opts[:scale], type: opts[:type]), Nx.max(denominator, 1.0))
var_opts = transform(opts, &Keyword.take(&1, [:shape, :type]))
transform(
{opts[:distribution], variance, var_opts},
fn
{:normal, variance, opts} ->
var_normal(variance, opts)
{:uniform, variance, opts} ->
var_uniform(variance, opts)
{:truncated_normal, variance, opts} ->
var_uniform(variance, opts)
{dist, _, _} ->
raise ArgumentError,
"invalid distribution #{inspect(dist)} passed to variance_scaling/1"
end
)
end
@doc """
Initializes a tensor with an orthogonal distribution.
For 2-D tensors, the initialization is generated through the QR decomposition of a random distribution
For tensors with more than 2 dimensions, a 2-D tensor with shape `{shape[0] * shape[1] * ... * shape[n-2], shape[n-1]}`
is initialized and then reshaped accordingly.
## Options
* `:distribution` - output distribution. One of [`:normal`, `:uniform`].
Defaults to `:normal`
## Examples
iex> init_fn = Axon.Initializers.orthogonal()
iex> t = init_fn.({3, 3}, {:f, 32})
iex> Nx.type(t)
{:f, 32}
iex> Nx.shape(t)
{3, 3}
iex> init_fn = Axon.Initializers.orthogonal()
iex> t = init_fn.({1, 2, 3, 4}, {:f, 64})
iex> Nx.type(t)
{:f, 64}
iex> Nx.shape(t)
{1, 2, 3, 4}
"""
def orthogonal(opts \\ []) do
fn shape, type ->
distribution = opts[:distribution] || :normal
orthogonal_impl(shape: shape, type: type, distribution: distribution)
end
end
defnp orthogonal_impl(opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}, distribution: :normal])
shape = opts[:shape]
distribution = opts[:distribution]
type = opts[:type]
assert_min_rank!("Axon.Initializers.orthogonal", "input_shape", shape, 2)
{{m, n}, random_seed} =
transform({shape, distribution, type}, fn {shape, distribution, type} ->
flat_shape =
if tuple_size(shape) > 2 do
tuple_list = shape |> Tuple.to_list() |> Enum.reverse()
n = hd(tuple_list)
m = Enum.reduce(tl(tuple_list), 1, &(&1 * &2))
{m, n}
else
shape
end
random_seed =
case distribution do
:uniform ->
Nx.random_uniform(flat_shape, type: type)
:normal ->
Nx.random_normal(flat_shape, type: type)
dist ->
raise ArgumentError,
"invalid distribution #{inspect(dist)} passed to orthogonal/1"
end
{flat_shape, random_seed}
end)
{q, _r} = Nx.LinAlg.qr(random_seed, mode: :complete)
q
|> Nx.slice([0, 0], [m, n])
|> Nx.reshape(shape)
end
# Variance scaling branches
defnp var_normal(variance, opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}])
shape = opts[:shape]
type = opts[:type]
sigma = Nx.sqrt(variance)
Nx.random_normal(shape, 0.0, sigma, type: type)
end
defnp var_uniform(variance, opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}])
shape = opts[:shape]
type = opts[:type]
limit = Nx.sqrt(3 * variance)
Nx.random_uniform(shape, -limit, limit, type: type)
end
defnp var_truncated(variance, opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}])
shape = opts[:shape]
type = opts[:type]
sigma =
variance
|> Nx.sqrt()
|> Nx.divide(0.87962566103423978)
Nx.clip(Nx.random_normal(shape, 0.0, sigma, type: type), -2, 2)
end
defp compute_fans(shape) do
rank = Nx.rank(shape)
{fan_in, fan_out} =
cond do
rank < 1 ->
{1, 1}
rank == 1 ->
{elem(shape, 0), elem(shape, 0)}
rank == 2 ->
{elem(shape, 0), elem(shape, 1)}
true ->
receptive_field_size = Nx.size(shape) / elem(shape, 0) / elem(shape, 1)
fan_in = elem(shape, 0) * receptive_field_size
fan_out = elem(shape, 1) * receptive_field_size
{fan_in, fan_out}
end
{fan_in, fan_out}
end
end
|
lib/axon/initializers.ex
| 0.870652 | 0.805058 |
initializers.ex
|
starcoder
|
defmodule Nectar.Variant do
use Nectar.Web, :model
use Arc.Ecto.Schema
schema "variants" do
field :is_master, :boolean, default: false
field :sku, :string
field :weight, :decimal
field :height, :decimal
field :width, :decimal
field :depth, :decimal
field :discontinue_on, Ecto.Date
field :cost_price, :decimal
field :cost_currency, :string
field :image, Nectar.VariantImage.Type
field :total_quantity, :integer, default: 0
field :add_count, :integer, virtual: true
field :bought_quantity, :integer, default: 0
field :buy_count, :integer, virtual: true
field :restock_count, :integer, virtual: true
belongs_to :product, Nectar.Product
has_many :variant_option_values, Nectar.VariantOptionValue, on_delete: :delete_all, on_replace: :delete
has_many :option_values, through: [:variant_option_values, :option_value]
has_many :line_items, Nectar.LineItem
timestamps()
extensions()
end
@required_fields ~w(is_master discontinue_on cost_price)a
@optional_fields ~w(sku weight height width depth cost_currency add_count)a
@doc """
Creates a changeset based on the `model` and `params`.
If no params are provided, an invalid changeset is returned
with no validation performed.
"""
def changeset(model, params \\ %{}) do
model
|> cast(params, @required_fields ++ @optional_fields)
|> validate_required(@required_fields)
|> Validations.Date.validate_not_past_date(:discontinue_on)
|> validate_number(:add_count, greater_than: 0)
|> update_total_quantity
end
@required_fields ~w(cost_price)a
@optional_fields ~w(add_count discontinue_on sku)a
def create_master_changeset(model, params \\ %{}) do
model
|> cast(params, @required_fields ++ @optional_fields)
|> validate_required(@required_fields)
|> update_total_quantity
|> put_change(:is_master, true)
|> validate_number(:add_count, greater_than: 0)
|> cast_attachments(params, ~w(), ~w(image))
end
@required_fields ~w(cost_price discontinue_on)a
@optional_fields ~w(add_count)a
def update_master_changeset(model, product, params \\ %{}) do
model
|> cast(params, @required_fields ++ @optional_fields)
|> validate_required(@required_fields)
|> Validations.Date.validate_not_past_date(:discontinue_on)
|> validate_discontinue_gt_available_on(product)
|> update_total_quantity
|> put_change(:is_master, true)
|> validate_number(:add_count, greater_than: 0)
|> check_is_master_changed
# Even if changset is invalid, cast_attachments does it work :(
|> cast_attachments(params, ~w(), ~w(image))
end
defp check_is_master_changed(changeset) do
if get_change(changeset, :is_master) do
add_error(changeset, :is_master, "appears to assign another variant as master variant")
|> add_error(:base, "Please check whether your Master Variant is deleted :(")
else
changeset
end
end
def create_variant_changeset(model, product, params \\ %{}) do
changeset(model, params)
|> validate_discontinue_gt_available_on(product)
|> put_change(:is_master, false)
|> cast_attachments(params, ~w(), ~w(image))
|> cast_assoc(:variant_option_values, required: true, with: &Nectar.VariantOptionValue.from_variant_changeset/2)
end
def update_variant_changeset(model, product, params \\ %{}) do
changeset(model, params)
|> validate_discontinue_gt_available_on(product)
|> validate_not_master
# Even if changset is invalid, cast_attachments does it work :(
|> cast_attachments(params, [:image])
|> cast_assoc(:variant_option_values, required: true, with: &Nectar.VariantOptionValue.from_variant_changeset/2)
end
defp validate_not_master(changeset) do
if changeset.data.is_master do
add_error(changeset, :is_master, "can't be updated")
|> add_error(:base, "Please go to Product Edit Page to update master variant")
else
changeset
end
end
@required_fields ~w(buy_count)a
@optional_fields ~w()a
def buy_changeset(model, params \\ %{}) do
model
|> cast(params, @required_fields ++ @optional_fields)
|> validate_required(@required_fields)
|> validate_number(:buy_count, greater_than: 0)
|> increment_bought_quantity
end
@required_fields ~w(restock_count)a
@optional_fields ~w()
def restocking_changeset(model, params) do
model
|> cast(params, @required_fields ++ @optional_fields)
|> validate_required(@required_fields)
|> validate_number(:restock_count, greater_than: 0)
|> decrement_bought_quantity
end
defp update_total_quantity(model) do
quantity_to_add = model.changes[:add_count]
if quantity_to_add do
put_change(model, :total_quantity, model.data.total_quantity + quantity_to_add)
else
model
end
end
defp increment_bought_quantity(model) do
quantity_to_add = model.changes[:buy_count]
if quantity_to_add do
put_change(model, :bought_quantity, (model.data.bought_quantity || 0) + quantity_to_add)
else
model
end
end
defp decrement_bought_quantity(model) do
quantity_to_subtract = model.changes[:restock_count]
if quantity_to_subtract do
put_change(model, :bought_quantity, (model.data.bought_quantity || 0) - quantity_to_subtract)
else
model
end
end
def available_quantity(%Nectar.Variant{total_quantity: total_quantity, bought_quantity: bought_quantity}) when is_nil(bought_quantity) do
total_quantity
end
def available_quantity(%Nectar.Variant{total_quantity: total_quantity, bought_quantity: bought_quantity}) do
total_quantity - bought_quantity
end
def display_name(variant) do
product = variant.product
"#{product.name}(#{variant.sku})"
end
defp validate_discontinue_gt_available_on(changeset, product) do
changeset
|> Validations.Date.validate_gt_date(:discontinue_on, product.available_on)
end
def sufficient_quantity_available?(variant, requested_quantity) do
available_quantity(variant) >= requested_quantity
end
def discontinued?(variant) do
discontinue_on = variant.discontinue_on
if discontinue_on do
case Ecto.Date.compare(discontinue_on, Ecto.Date.utc) do
:lt -> true
_ -> false
end
else
false
end
end
def availability_status(variant, requested_quantity \\ 0) do
cond do
discontinued?(variant) ->
:discontinued
not sufficient_quantity_available?(variant, requested_quantity) ->
available = available_quantity(variant)
if available > 0 do
{:insufficient_quantity, available}
else
:out_of_stock
end
true ->
:ok
end
end
end
|
web/models/variant.ex
| 0.725843 | 0.42662 |
variant.ex
|
starcoder
|
defmodule AWS.GlobalAccelerator do
@moduledoc """
AWS Global Accelerator
This is the *AWS Global Accelerator API Reference*. This guide is for
developers who need detailed information about AWS Global Accelerator API
actions, data types, and errors. For more information about Global
Accelerator features, see the [AWS Global Accelerator Developer
Guide](https://docs.aws.amazon.com/global-accelerator/latest/dg/Welcome.html).
AWS Global Accelerator is a service in which you create *accelerators* to
improve availability and performance of your applications for local and
global users. Global Accelerator directs traffic to optimal endpoints over
the AWS global network. This improves the availability and performance of
your internet applications that are used by a global audience. Global
Accelerator is a global service that supports endpoints in multiple AWS
Regions, which are listed in the [AWS Region
Table](https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/).
<important> Global Accelerator is a global service that supports endpoints
in multiple AWS Regions but you must specify the US West (Oregon) Region to
create or update accelerators.
</important> By default, Global Accelerator provides you with static IP
addresses that you associate with your accelerator. (Instead of using the
IP addresses that Global Accelerator provides, you can configure these
entry points to be IPv4 addresses from your own IP address ranges that you
bring to Global Accelerator.) The static IP addresses are anycast from the
AWS edge network and distribute incoming application traffic across
multiple endpoint resources in multiple AWS Regions, which increases the
availability of your applications. Endpoints can be Network Load Balancers,
Application Load Balancers, EC2 instances, or Elastic IP addresses that are
located in one AWS Region or multiple Regions.
Global Accelerator uses the AWS global network to route traffic to the
optimal regional endpoint based on health, client location, and policies
that you configure. The service reacts instantly to changes in health or
configuration to ensure that internet traffic from clients is directed to
only healthy endpoints.
Global Accelerator includes components that work together to help you
improve performance and availability for your applications:
<dl> <dt>Static IP address</dt> <dd> By default, AWS Global Accelerator
provides you with a set of static IP addresses that are anycast from the
AWS edge network and serve as the single fixed entry points for your
clients. Or you can configure these entry points to be IPv4 addresses from
your own IP address ranges that you bring to Global Accelerator (BYOIP).
For more information, see [Bring Your Own IP Addresses
(BYOIP)](https://docs.aws.amazon.com/global-accelerator/latest/dg/using-byoip.html)
in the *AWS Global Accelerator Developer Guide*. If you already have load
balancers, EC2 instances, or Elastic IP addresses set up for your
applications, you can easily add those to Global Accelerator to allow the
resources to be accessed by the static IP addresses.
<important> The static IP addresses remain assigned to your accelerator for
as long as it exists, even if you disable the accelerator and it no longer
accepts or routes traffic. However, when you *delete* an accelerator, you
lose the static IP addresses that are assigned to it, so you can no longer
route traffic by using them. You can use IAM policies with Global
Accelerator to limit the users who have permissions to delete an
accelerator. For more information, see [Authentication and Access
Control](https://docs.aws.amazon.com/global-accelerator/latest/dg/auth-and-access-control.html)
in the *AWS Global Accelerator Developer Guide*.
</important> </dd> <dt>Accelerator</dt> <dd> An accelerator directs traffic
to optimal endpoints over the AWS global network to improve availability
and performance for your internet applications that have a global audience.
Each accelerator includes one or more listeners.
</dd> <dt>DNS name</dt> <dd> Global Accelerator assigns each accelerator a
default Domain Name System (DNS) name, similar to
`a1234567890abcdef.awsglobalaccelerator.com`, that points to your Global
Accelerator static IP addresses. Depending on the use case, you can use
your accelerator's static IP addresses or DNS name to route traffic to your
accelerator, or set up DNS records to route traffic using your own custom
domain name.
</dd> <dt>Network zone</dt> <dd> A network zone services the static IP
addresses for your accelerator from a unique IP subnet. Similar to an AWS
Availability Zone, a network zone is an isolated unit with its own set of
physical infrastructure. When you configure an accelerator, by default,
Global Accelerator allocates two IPv4 addresses for it. If one IP address
from a network zone becomes unavailable due to IP address blocking by
certain client networks, or network disruptions, then client applications
can retry on the healthy static IP address from the other isolated network
zone.
</dd> <dt>Listener</dt> <dd> A listener processes inbound connections from
clients to Global Accelerator, based on the protocol and port that you
configure. Each listener has one or more endpoint groups associated with
it, and traffic is forwarded to endpoints in one of the groups. You
associate endpoint groups with listeners by specifying the Regions that you
want to distribute traffic to. Traffic is distributed to optimal endpoints
within the endpoint groups associated with a listener.
</dd> <dt>Endpoint group</dt> <dd> Each endpoint group is associated with a
specific AWS Region. Endpoint groups include one or more endpoints in the
Region. You can increase or reduce the percentage of traffic that would be
otherwise directed to an endpoint group by adjusting a setting called a
*traffic dial*. The traffic dial lets you easily do performance testing or
blue/green deployment testing for new releases across different AWS
Regions, for example.
</dd> <dt>Endpoint</dt> <dd> An endpoint is a Network Load Balancer,
Application Load Balancer, EC2 instance, or Elastic IP address. Traffic is
routed to endpoints based on several factors, including the geo-proximity
to the user, the health of the endpoint, and the configuration options that
you choose, such as endpoint weights. For each endpoint, you can configure
weights, which are numbers that you can use to specify the proportion of
traffic to route to each one. This can be useful, for example, to do
performance testing within a Region.
</dd> </dl>
"""
@doc """
Advertises an IPv4 address range that is provisioned for use with your AWS
resources through bring your own IP addresses (BYOIP). It can take a few
minutes before traffic to the specified addresses starts routing to AWS
because of propagation delays. To see an AWS CLI example of advertising an
address range, scroll down to **Example**.
To stop advertising the BYOIP address range, use [
WithdrawByoipCidr](https://docs.aws.amazon.com/global-accelerator/latest/api/WithdrawByoipCidr.html).
For more information, see [Bring Your Own IP Addresses
(BYOIP)](https://docs.aws.amazon.com/global-accelerator/latest/dg/using-byoip.html)
in the *AWS Global Accelerator Developer Guide*.
"""
def advertise_byoip_cidr(client, input, options \\ []) do
request(client, "AdvertiseByoipCidr", input, options)
end
@doc """
Create an accelerator. An accelerator includes one or more listeners that
process inbound connections and direct traffic to one or more endpoint
groups, each of which includes endpoints, such as Network Load Balancers.
To see an AWS CLI example of creating an accelerator, scroll down to
**Example**.
<important> Global Accelerator is a global service that supports endpoints
in multiple AWS Regions but you must specify the US West (Oregon) Region to
create or update accelerators.
</important>
"""
def create_accelerator(client, input, options \\ []) do
request(client, "CreateAccelerator", input, options)
end
@doc """
Create an endpoint group for the specified listener. An endpoint group is a
collection of endpoints in one AWS Region. A resource must be valid and
active when you add it as an endpoint.
To see an AWS CLI example of creating an endpoint group, scroll down to
**Example**.
"""
def create_endpoint_group(client, input, options \\ []) do
request(client, "CreateEndpointGroup", input, options)
end
@doc """
Create a listener to process inbound connections from clients to an
accelerator. Connections arrive to assigned static IP addresses on a port,
port range, or list of port ranges that you specify. To see an AWS CLI
example of creating a listener, scroll down to **Example**.
"""
def create_listener(client, input, options \\ []) do
request(client, "CreateListener", input, options)
end
@doc """
Delete an accelerator. Before you can delete an accelerator, you must
disable it and remove all dependent resources (listeners and endpoint
groups). To disable the accelerator, update the accelerator to set
`Enabled` to false.
<important> When you create an accelerator, by default, Global Accelerator
provides you with a set of two static IP addresses. Alternatively, you can
bring your own IP address ranges to Global Accelerator and assign IP
addresses from those ranges.
The IP addresses are assigned to your accelerator for as long as it exists,
even if you disable the accelerator and it no longer accepts or routes
traffic. However, when you *delete* an accelerator, you lose the static IP
addresses that are assigned to the accelerator, so you can no longer route
traffic by using them. As a best practice, ensure that you have permissions
in place to avoid inadvertently deleting accelerators. You can use IAM
policies with Global Accelerator to limit the users who have permissions to
delete an accelerator. For more information, see [Authentication and Access
Control](https://docs.aws.amazon.com/global-accelerator/latest/dg/auth-and-access-control.html)
in the *AWS Global Accelerator Developer Guide*.
</important>
"""
def delete_accelerator(client, input, options \\ []) do
request(client, "DeleteAccelerator", input, options)
end
@doc """
Delete an endpoint group from a listener.
"""
def delete_endpoint_group(client, input, options \\ []) do
request(client, "DeleteEndpointGroup", input, options)
end
@doc """
Delete a listener from an accelerator.
"""
def delete_listener(client, input, options \\ []) do
request(client, "DeleteListener", input, options)
end
@doc """
Releases the specified address range that you provisioned to use with your
AWS resources through bring your own IP addresses (BYOIP) and deletes the
corresponding address pool. To see an AWS CLI example of deprovisioning an
address range, scroll down to **Example**.
Before you can release an address range, you must stop advertising it by
using
[WithdrawByoipCidr](https://docs.aws.amazon.com/global-accelerator/latest/api/WithdrawByoipCidr.html)
and you must not have any accelerators that are using static IP addresses
allocated from its address range.
For more information, see [Bring Your Own IP Addresses
(BYOIP)](https://docs.aws.amazon.com/global-accelerator/latest/dg/using-byoip.html)
in the *AWS Global Accelerator Developer Guide*.
"""
def deprovision_byoip_cidr(client, input, options \\ []) do
request(client, "DeprovisionByoipCidr", input, options)
end
@doc """
Describe an accelerator. To see an AWS CLI example of describing an
accelerator, scroll down to **Example**.
"""
def describe_accelerator(client, input, options \\ []) do
request(client, "DescribeAccelerator", input, options)
end
@doc """
Describe the attributes of an accelerator. To see an AWS CLI example of
describing the attributes of an accelerator, scroll down to **Example**.
"""
def describe_accelerator_attributes(client, input, options \\ []) do
request(client, "DescribeAcceleratorAttributes", input, options)
end
@doc """
Describe an endpoint group. To see an AWS CLI example of describing an
endpoint group, scroll down to **Example**.
"""
def describe_endpoint_group(client, input, options \\ []) do
request(client, "DescribeEndpointGroup", input, options)
end
@doc """
Describe a listener. To see an AWS CLI example of describing a listener,
scroll down to **Example**.
"""
def describe_listener(client, input, options \\ []) do
request(client, "DescribeListener", input, options)
end
@doc """
List the accelerators for an AWS account. To see an AWS CLI example of
listing the accelerators for an AWS account, scroll down to **Example**.
"""
def list_accelerators(client, input, options \\ []) do
request(client, "ListAccelerators", input, options)
end
@doc """
Lists the IP address ranges that were specified in calls to
[ProvisionByoipCidr](https://docs.aws.amazon.com/global-accelerator/latest/api/ProvisionByoipCidr.html),
including the current state and a history of state changes.
To see an AWS CLI example of listing BYOIP CIDR addresses, scroll down to
**Example**.
"""
def list_byoip_cidrs(client, input, options \\ []) do
request(client, "ListByoipCidrs", input, options)
end
@doc """
List the endpoint groups that are associated with a listener. To see an AWS
CLI example of listing the endpoint groups for listener, scroll down to
**Example**.
"""
def list_endpoint_groups(client, input, options \\ []) do
request(client, "ListEndpointGroups", input, options)
end
@doc """
List the listeners for an accelerator. To see an AWS CLI example of listing
the listeners for an accelerator, scroll down to **Example**.
"""
def list_listeners(client, input, options \\ []) do
request(client, "ListListeners", input, options)
end
@doc """
List all tags for an accelerator. To see an AWS CLI example of listing tags
for an accelerator, scroll down to **Example**.
For more information, see [Tagging in AWS Global
Accelerator](https://docs.aws.amazon.com/global-accelerator/latest/dg/tagging-in-global-accelerator.html)
in the *AWS Global Accelerator Developer Guide*.
"""
def list_tags_for_resource(client, input, options \\ []) do
request(client, "ListTagsForResource", input, options)
end
@doc """
Provisions an IP address range to use with your AWS resources through bring
your own IP addresses (BYOIP) and creates a corresponding address pool.
After the address range is provisioned, it is ready to be advertised using
[
AdvertiseByoipCidr](https://docs.aws.amazon.com/global-accelerator/latest/api/AdvertiseByoipCidr.html).
To see an AWS CLI example of provisioning an address range for BYOIP,
scroll down to **Example**.
For more information, see [Bring Your Own IP Addresses
(BYOIP)](https://docs.aws.amazon.com/global-accelerator/latest/dg/using-byoip.html)
in the *AWS Global Accelerator Developer Guide*.
"""
def provision_byoip_cidr(client, input, options \\ []) do
request(client, "ProvisionByoipCidr", input, options)
end
@doc """
Add tags to an accelerator resource. To see an AWS CLI example of adding
tags to an accelerator, scroll down to **Example**.
For more information, see [Tagging in AWS Global
Accelerator](https://docs.aws.amazon.com/global-accelerator/latest/dg/tagging-in-global-accelerator.html)
in the *AWS Global Accelerator Developer Guide*.
"""
def tag_resource(client, input, options \\ []) do
request(client, "TagResource", input, options)
end
@doc """
Remove tags from a Global Accelerator resource. When you specify a tag key,
the action removes both that key and its associated value. To see an AWS
CLI example of removing tags from an accelerator, scroll down to
**Example**. The operation succeeds even if you attempt to remove tags from
an accelerator that was already removed.
For more information, see [Tagging in AWS Global
Accelerator](https://docs.aws.amazon.com/global-accelerator/latest/dg/tagging-in-global-accelerator.html)
in the *AWS Global Accelerator Developer Guide*.
"""
def untag_resource(client, input, options \\ []) do
request(client, "UntagResource", input, options)
end
@doc """
Update an accelerator. To see an AWS CLI example of updating an
accelerator, scroll down to **Example**.
<important> Global Accelerator is a global service that supports endpoints
in multiple AWS Regions but you must specify the US West (Oregon) Region to
create or update accelerators.
</important>
"""
def update_accelerator(client, input, options \\ []) do
request(client, "UpdateAccelerator", input, options)
end
@doc """
Update the attributes for an accelerator. To see an AWS CLI example of
updating an accelerator to enable flow logs, scroll down to **Example**.
"""
def update_accelerator_attributes(client, input, options \\ []) do
request(client, "UpdateAcceleratorAttributes", input, options)
end
@doc """
Update an endpoint group. A resource must be valid and active when you add
it as an endpoint.
To see an AWS CLI example of updating an endpoint group, scroll down to
**Example**.
"""
def update_endpoint_group(client, input, options \\ []) do
request(client, "UpdateEndpointGroup", input, options)
end
@doc """
Update a listener. To see an AWS CLI example of updating listener, scroll
down to **Example**.
"""
def update_listener(client, input, options \\ []) do
request(client, "UpdateListener", input, options)
end
@doc """
Stops advertising an address range that is provisioned as an address pool.
You can perform this operation at most once every 10 seconds, even if you
specify different address ranges each time. To see an AWS CLI example of
withdrawing an address range for BYOIP so it will no longer be advertised
by AWS, scroll down to **Example**.
It can take a few minutes before traffic to the specified addresses stops
routing to AWS because of propagation delays.
For more information, see [Bring Your Own IP Addresses
(BYOIP)](https://docs.aws.amazon.com/global-accelerator/latest/dg/using-byoip.html)
in the *AWS Global Accelerator Developer Guide*.
"""
def withdraw_byoip_cidr(client, input, options \\ []) do
request(client, "WithdrawByoipCidr", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "globalaccelerator"}
host = build_host("globalaccelerator", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "GlobalAccelerator_V20180706.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/global_accelerator.ex
| 0.894375 | 0.573858 |
global_accelerator.ex
|
starcoder
|
defmodule Redix.PubSub do
@moduledoc """
Interface for the Redis pub/sub functionality.
The rest of this documentation will assume the reader knows how pub/sub works
in Redis and knows the meaning of the following Redis commands:
* `SUBSCRIBE` and `UNSUBSCRIBE`
* `PSUBSCRIBE` and `PUNSUBSCRIBE`
* `PUBLISH`
## Usage
Each `Redix.PubSub` process is able to subcribe to/unsubscribe from multiple
Redis channels/patterns, and is able to handle multiple Elixir processes subscribing
each to different channels/patterns.
A `Redix.PubSub` process can be started via `Redix.PubSub.start_link/2`; such
a process holds a single TCP (or SSL) connection to the Redis server.
`Redix.PubSub` has a message-oriented API. Subscribe operations are synchronous and return
a reference that can then be used to match on all messages sent by the `Redix.PubSub` process.
When `Redix.PubSub` registers a subscriptions, the subscriber process will receive a
confirmation message:
{:ok, pubsub} = Redix.PubSub.start_link()
{:ok, ref} = Redix.PubSub.subscribe(pubsub, "my_channel", self())
receive do message -> message end
#=> {:redix_pubsub, ^pubsub, ^ref, :subscribed, %{channel: "my_channel"}}
When the `:subscribed` message is received, it's guaranteed that the `Redix.PubSub` process has
subscribed to the given channel. This means that after a subscription, messages published to
a channel are delivered to all Elixir processes subscribed to that channel via `Redix.PubSub`:
# Someone publishes "hello" on "my_channel"
receive do message -> message end
#=> {:redix_pubsub, ^pubsub, ^ref, :message, %{channel: "my_channel", payload: "hello"}}
It's advised to wait for the subscription confirmation for a channel before doing any
other operation involving that channel.
Note that unsubscription confirmations are delivered right away even if the `Redix.PubSub`
process is still subscribed to the given channel: this is by design, as once a process
is unsubscribed from a channel it won't receive messages anyways, even if the `Redix.PubSub`
process still receives them.
Messages are also delivered as a confirmation of an unsubscription as well as when the
`Redix.PubSub` connection goes down. See the "Messages" section below.
## Messages
Most of the communication with a PubSub connection is done via (Elixir) messages: the
subscribers of these messages will be the processes specified at subscription time (in
`subscribe/3` or `psubscribe/3`). All `Redix.PubSub` messages have the same form: they're a
five-element tuple that looks like this:
{:redix_pubsub, pubsub_pid, subscription_ref, message_type, message_properties}
where:
* `pubsub_pid` is the pid of the `Redix.PubSub` process that sent this message.
* `subscription_ref` is the reference returned by `subscribe/3` or `psubscribe/3`.
* `message_type` is the type of this message, such as `:subscribed` for subscription
confirmations, `:message` for pub/sub messages, and so on.
* `message_properties` is a map of data related to that that varies based on `message_type`.
Given this format, it's easy to match on all Redix pub/sub messages for a subscription
as `{:redix_pubsub, _, ^subscription_ref, _, _}`.
### List of possible message types and properties
The following is a comprehensive list of possible message types alongside the properties
that each can have.
* `:subscribe` - sent as confirmation of subscription to a channel (via `subscribe/3` or
after a disconnection and reconnection). One `:subscribe` message is received for every
channel a process subscribed to. `:subscribe` messages have the following properties:
* `:channel` - the channel the process has been subscribed to.
* `:psubscribe` - sent as confirmation of subscription to a pattern (via `psubscribe/3` or
after a disconnection and reconnection). One `:psubscribe` message is received for every
pattern a process subscribed to. `:psubscribe` messages have the following properties:
* `:pattern` - the pattern the process has been subscribed to.
* `:unsubscribe` - sent as confirmation of unsubscription from a channel (via
`unsubscribe/3`). `:unsubscribe` messages are received for every channel a
process unsubscribes from. `:unsubscribe` messages havethe following properties:
* `:channel` - the channel the process has unsubscribed from.
* `:punsubscribe` - sent as confirmation of unsubscription from a pattern (via
`unsubscribe/3`). `:unsubscribe` messages are received for every pattern a
process unsubscribes from. `:unsubscribe` messages havethe following properties:
* `:pattern` - the pattern the process has unsubscribed from.
* `:message` - sent to subscribers to a given channel when a message is published on
that channel. `:message` messages have the following properties:
* `:channel` - the channel the message was published on
* `:payload` - the contents of the message
* `:pmessage` - sent to subscribers to a given pattern when a message is published on
a channel that matches that pattern. `:pmessage` messages have the following properties:
* `:channel` - the channel the message was published on
* `:pattern` - the original pattern that matched the channel
* `:payload` - the contents of the message
* `:disconnected` messages - sent to all subscribers to all channels/patterns when the
connection to Redis is interrupted. `:disconnected` messages have the following properties:
* `:error` - the reason for the disconnection, a `Redix.ConnectionError`
exception struct (that can be raised or turned into a message through
`Exception.message/1`).
## Reconnections
`Redix.PubSub` tries to be resilient to failures: when the connection with
Redis is interrupted (for whatever reason), it will try to reconnect to the
Redis server. When a disconnection happens, `Redix.PubSub` will notify all
clients subscribed to all channels with a `{:redix_pubsub, pid, subscription_ref, :disconnected,
_}` message (more on the format of messages above). When the connection goes
back up, `Redix.PubSub` takes care of actually re-subscribing to the
appropriate channels on the Redis server and subscribers are notified with a
`{:redix_pubsub, pid, subscription_ref, :subscribed | :psubscribed, _}` message, the same as
when a client subscribes to a channel/pattern.
Note that if `exit_on_disconnection: true` is passed to
`Redix.PubSub.start_link/2`, the `Redix.PubSub` process will exit and not send
any `:disconnected` messages to subscribed clients.
## Sentinel support
Works exactly the same as for normal `Redix` connections. See the documentation for `Redix`
for more information.
## Examples
This is an example of a workflow using the PubSub functionality; it uses
[Redix](https://github.com/whatyouhide/redix) as a Redis client for publishing
messages.
{:ok, pubsub} = Redix.PubSub.start_link()
{:ok, client} = Redix.start_link()
Redix.PubSub.subscribe(pubsub, "my_channel", self())
#=> {:ok, ref}
# We wait for the subscription confirmation
receive do
{:redix_pubsub, ^pubsub, ^ref, :subscribed, %{channel: "my_channel"}} -> :ok
end
Redix.command!(client, ~w(PUBLISH my_channel hello)
receive do
{:redix_pubsub, ^pubsub, ^ref, :message, %{channel: "my_channel"} = properties} ->
properties.payload
end
#=> "hello"
Redix.PubSub.unsubscribe(pubsub, "foo", self())
#=> :ok
# We wait for the unsubscription confirmation
receive do
{:redix_pubsub, ^pubsub, ^ref, :unsubscribed, _} -> :ok
end
"""
@type subscriber() :: pid() | port() | atom() | {atom(), node()}
@type connection() :: :gen_statem.server_ref()
alias Redix.StartOptions
@doc """
Starts a pub/sub connection to Redis.
This function returns `{:ok, pid}` if the PubSub process is started successfully.
The actual TCP/SSL connection to the Redis server may happen either synchronously,
before `start_link/2` returns, or asynchronously: this behaviour is decided by
the `:sync_connect` option (see below).
This function accepts one argument, either a Redis URI as a string or a list of options.
## Redis URI
In case `uri_or_opts` is a Redis URI, it must be in the form:
redis://[:password@]host[:port][/db]
Here are some examples of valid URIs:
redis://localhost
redis://:secret@localhost:6397
redis://example.com:6380/1
Usernames before the password are ignored, so the these two URIs are
equivalent:
redis://:secret@localhost
redis://myuser:secret@localhost
The only mandatory thing when using URIs is the host. All other elements
(password, port, database) are optional and their default value can be found
in the "Options" section below.
## Options
The following options can be used to specify the parameters used to connect to
Redis (instead of a URI as described above):
* `:host` - (string) the host where the Redis server is running. Defaults to
`"localhost"`.
* `:port` - (integer) the port on which the Redis server is
running. Defaults to `6379`.
* `:password` - (string) the password used to connect to Redis. Defaults to
`nil`, meaning no password is used. When this option is provided, all Redix
does is issue an `AUTH` command to Redis in order to authenticate.
* `:database` - (integer or string) the database to connect to. Defaults to
`nil`, meaning don't connect to any database (Redis connects to database
`0` by default). When this option is provided, all Redix does is issue a
`SELECT` command to Redis in order to select the given database.
* `:socket_opts` - (list of options) this option specifies a list of options
that are passed to `:gen_tcp.connect/4` when connecting to the Redis
server. Some socket options (like `:active` or `:binary`) will be
overridden by `Redix.PubSub` so that it functions properly. Defaults to
`[]`.
* `:sync_connect` - (boolean) decides whether Redix should initiate the TCP
connection to the Redis server *before* or *after* returning from
`start_link/2`. This option also changes some reconnection semantics; read
the ["Reconnections" page](http://hexdocs.pm/redix/reconnections.html) in
the docs for `Redix` for more information.
* `:backoff_initial` - (integer) the initial backoff time (in milliseconds),
which is the time that will be waited by the `Redix.PubSub` process before
attempting to reconnect to Redis after a disconnection or failed first
connection. See the ["Reconnections"
page](http://hexdocs.pm/redix/reconnections.html) in the docs for `Redix`
for more information.
* `:backoff_max` - (integer) the maximum length (in milliseconds) of the
time interval used between reconnection attempts. See the ["Reconnections"
page](http://hexdocs.pm/redix/reconnections.html) in the docs for `Redix`
for more information.
* `:exit_on_disconnection` - (boolean) if `true`, the Redix server will exit
if it fails to connect or disconnects from Redis. Note that setting this
option to `true` means that the `:backoff_initial` and `:backoff_max` options
will be ignored. Defaults to `false`.
* `:log` - (keyword list) a keyword list of `{action, level}` where `level` is
the log level to use to log `action`. The possible actions and their default
values are:
* `:disconnection` (defaults to `:error`) - logged when the connection to
Redis is lost
* `:failed_connection` (defaults to `:error`) - logged when Redix can't
establish a connection to Redis
* `:reconnection` (defaults to `:info`) - logged when Redix manages to
reconnect to Redis after the connection was lost
* `:name` - Redix is bound to the same registration rules as a `GenServer`. See the
`GenServer` documentation for more information.
* `:ssl` - (boolean) if `true`, connect through SSL, otherwise through TCP. The
`:socket_opts` option applies to both SSL and TCP, so it can be used for things
like certificates. See `:ssl.connect/4`. Defaults to `false`.
* `:sentinels` - (list of options) exactly the same as the `:sentinel` options in
`Redix.start_link/1`.
## Examples
iex> Redix.PubSub.start_link()
{:ok, #PID<...>}
iex> Redix.PubSub.start_link(host: "example.com", port: 9999, password: "<PASSWORD>")
{:ok, #PID<...>}
iex> Redix.PubSub.start_link([database: 3], [name: :redix_3])
{:ok, #PID<...>}
"""
@spec start_link(String.t() | keyword()) :: :gen_statem.start_ret()
def start_link(uri_or_opts \\ [])
def start_link(uri) when is_binary(uri) do
uri |> Redix.URI.opts_from_uri() |> start_link()
end
def start_link(opts) when is_list(opts) do
opts = StartOptions.sanitize(opts)
case Keyword.pop(opts, :name) do
{nil, opts} ->
:gen_statem.start_link(Redix.PubSub.Connection, opts, [])
{atom, opts} when is_atom(atom) ->
:gen_statem.start_link({:local, atom}, Redix.PubSub.Connection, opts, [])
{{:global, _term} = tuple, opts} ->
:gen_statem.start_link(tuple, Redix.PubSub.Connection, opts, [])
{{:via, via_module, _term} = tuple, opts} when is_atom(via_module) ->
:gen_statem.start_link(tuple, Redix.PubSub.Connection, opts, [])
{other, _opts} ->
raise ArgumentError, """
expected :name option to be one of the following:
* nil
* atom
* {:global, term}
* {:via, module, term}
Got: #{inspect(other)}
"""
end
end
@doc """
Same as `start_link/1` but using both a Redis URI and a list of options.
In this case, options specified in `opts` have precedence over values specified by `uri`.
For example, if `uri` is `redix://example1.com` but `opts` is `[host: "example2.com"]`, then
`example2.com` will be used as the host when connecting.
"""
@spec start_link(String.t(), keyword()) :: :gen_statem.start_ret()
def start_link(uri, opts) when is_binary(uri) and is_list(opts) do
uri |> Redix.URI.opts_from_uri() |> Keyword.merge(opts) |> start_link()
end
@doc """
Stops the given pub/sub process.
This function is synchronous and blocks until the given pub/sub connection
frees all its resources and disconnects from the Redis server. `timeout` can
be passed to limit the amount of time allowed for the connection to exit; if
it doesn't exit in the given interval, this call exits.
## Examples
iex> Redix.PubSub.stop(conn)
:ok
"""
@spec stop(connection()) :: :ok
def stop(conn, timeout \\ :infinity) do
:gen_statem.stop(conn, :normal, timeout)
end
@doc """
Subscribes `subscriber` to the given channel or list of channels.
Subscribes `subscriber` (which can be anything that can be passed to `send/2`)
to `channels`, which can be a single channel or a list of channels.
For each of the channels in `channels` which `subscriber` successfully
subscribes to, a message will be sent to `subscriber` with this form:
{:redix_pubsub, pid, subscription_ref, :subscribed, %{channel: channel}}
See the documentation for `Redix.PubSub` for more information about the format
of messages.
## Examples
iex> Redix.subscribe(conn, ["foo", "bar"], self())
{:ok, subscription_ref}
iex> flush()
{:redix_pubsub, ^conn, ^subscription_ref, :subscribed, %{channel: "foo"}}
{:redix_pubsub, ^conn, ^subscription_ref, :subscribed, %{channel: "bar"}}
:ok
"""
@spec subscribe(connection(), String.t() | [String.t()], subscriber) :: {:ok, reference()}
def subscribe(conn, channels, subscriber \\ self()) do
:gen_statem.call(conn, {:subscribe, List.wrap(channels), subscriber})
end
@doc """
Subscribes `subscriber` to the given pattern or list of patterns.
Works like `subscribe/3` but subscribing `subscriber` to a pattern (or list of
patterns) instead of regular channels.
Upon successful subscription to each of the `patterns`, a message will be sent
to `subscriber` with the following form:
{:redix_pubsub, pid, ^subscription_ref, :psubscribed, %{pattern: pattern}}
See the documentation for `Redix.PubSub` for more information about the format
of messages.
## Examples
iex> Redix.psubscribe(conn, "ba*", self())
:ok
iex> flush()
{:redix_pubsub, ^conn, ^subscription_ref, :psubscribe, %{pattern: "ba*"}}
:ok
"""
@spec psubscribe(connection(), String.t() | [String.t()], subscriber) :: {:ok, reference}
def psubscribe(conn, patterns, subscriber \\ self()) do
:gen_statem.call(conn, {:psubscribe, List.wrap(patterns), subscriber})
end
@doc """
Unsubscribes `subscriber` from the given channel or list of channels.
This function basically "undoes" what `subscribe/3` does: it unsubscribes
`subscriber` from the given channel or list of channels.
Upon successful unsubscription from each of the `channels`, a message will be
sent to `subscriber` with the following form:
{:redix_pubsub, pid, ^subscription_ref, :unsubscribed, %{channel: channel}}
See the documentation for `Redix.PubSub` for more information about the format
of messages.
## Examples
iex> Redix.unsubscribe(conn, ["foo", "bar"], self())
:ok
iex> flush()
{:redix_pubsub, ^conn, ^subscription_ref, :unsubscribed, %{channel: "foo"}}
{:redix_pubsub, ^conn, ^subscription_ref, :unsubscribed, %{channel: "bar"}}
:ok
"""
@spec unsubscribe(connection(), String.t() | [String.t()], subscriber) :: :ok
def unsubscribe(conn, channels, subscriber \\ self()) do
:gen_statem.call(conn, {:unsubscribe, List.wrap(channels), subscriber})
end
@doc """
Unsubscribes `subscriber` from the given pattern or list of patterns.
This function basically "undoes" what `psubscribe/3` does: it unsubscribes
`subscriber` from the given pattern or list of patterns.
Upon successful unsubscription from each of the `patterns`, a message will be
sent to `subscriber` with the following form:
{:redix_pubsub, pid, ^subscription_ref, :punsubscribed, %{pattern: pattern}}
See the documentation for `Redix.PubSub` for more information about the format
of messages.
## Examples
iex> Redix.punsubscribe(conn, "foo_*", self())
:ok
iex> flush()
{:redix_pubsub, ^conn, ^subscription_ref, :punsubscribed, %{pattern: "foo_*"}}
:ok
"""
@spec punsubscribe(connection(), String.t() | [String.t()], subscriber) :: :ok
def punsubscribe(conn, patterns, subscriber \\ self()) do
:gen_statem.call(conn, {:punsubscribe, List.wrap(patterns), subscriber})
end
end
|
lib/redix/pubsub.ex
| 0.882599 | 0.655515 |
pubsub.ex
|
starcoder
|
defmodule Ecto.Adapters.Poolboy do
@moduledoc """
Start a pool of connections using `poolboy`.
### Options
* `:size` - The number of connections to keep in the pool (default: 10)
* `:lazy` - When true, connections to the repo are lazily started (default: true)
* `:max_overflow` - The maximum overflow of connections (default: 0) (see poolboy docs)
"""
alias Ecto.Adapters.Poolboy.Worker
@behaviour Ecto.Adapters.Pool
@doc """
Starts a pool of connections for the given connection module and options.
* `conn_mod` - The connection module, see `Ecto.Adapters.Connection`
* `opts` - The options for the pool and the connections
"""
def start_link(conn_mod, opts) do
{:ok, _} = Application.ensure_all_started(:poolboy)
{pool_opts, conn_opts} = split_opts(opts)
:poolboy.start_link(pool_opts, {conn_mod, conn_opts})
end
@doc false
def checkout(pool, timeout) do
checkout(pool, :run, timeout)
end
@doc false
def checkin(pool, worker, _) do
:poolboy.checkin(pool, worker)
end
@doc false
def open_transaction(pool, timeout) do
checkout(pool, :transaction, timeout)
end
@doc false
def close_transaction(pool, worker, _) do
try do
Worker.checkin(worker)
after
:poolboy.checkin(pool, worker)
end
end
@doc false
def break(pool, worker, timeout) do
try do
Worker.break(worker, timeout)
after
:poolboy.checkin(pool, worker)
end
end
## Helpers
defp split_opts(opts) do
{pool_opts, conn_opts} = Keyword.split(opts, [:name, :size, :max_overflow])
{pool_name, pool_opts} = Keyword.pop(pool_opts, :name)
pool_opts = pool_opts
|> Keyword.put_new(:size, 10)
|> Keyword.put_new(:max_overflow, 0)
pool_opts = [worker_module: Worker] ++ pool_opts
if pool_name do
pool_opts = [name: {:local, pool_name}] ++ pool_opts
end
{pool_opts, conn_opts}
end
defp checkout(pool, fun, timeout) do
case :timer.tc(fn() -> do_checkout(pool, fun, timeout) end) do
{queue_time, {:ok, worker, mod_conn}} ->
{:ok, worker, mod_conn, queue_time}
{_queue_time, {:error, _} = error} ->
error
end
end
defp do_checkout(pool, fun, timeout) do
try do
:poolboy.checkout(pool, :true, timeout)
catch
:exit, {:noproc, _} ->
{:error, :noproc}
else
worker ->
do_checkout(pool, worker, fun, timeout)
end
end
defp do_checkout(pool, worker, fun, timeout) do
try do
Worker.checkout(worker, fun, timeout)
catch
class, reason ->
stack = System.stacktrace()
:poolboy.checkin(pool, worker)
:erlang.raise(class, reason, stack)
else
{:ok, mod_conn} ->
{:ok, worker, mod_conn}
{:error, err} ->
:poolboy.checkin(pool, worker)
raise err
end
end
end
|
lib/ecto/adapters/poolboy.ex
| 0.771413 | 0.49048 |
poolboy.ex
|
starcoder
|
defmodule ExDns.Message.Header do
@moduledoc """
Manages the header of a DNS message
4.1.1. Header section format
The header contains the following fields:
1 1 1 1 1 1
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| ID |
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
|QR| Opcode |AA|TC|RD|RA| Z | RCODE |
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| QDCOUNT |
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| ANCOUNT |
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| NSCOUNT |
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| ARCOUNT |
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
where:
ID A 16 bit identifier assigned by the program that
generates any kind of query. This identifier is copied
the corresponding reply and can be used by the requester
to match up replies to outstanding queries.
QR A one bit field that specifies whether this message is a
query (0), or a response (1).
OPCODE A four bit field that specifies kind of query in this
message. This value is set by the originator of a query
and copied into the response. The values are:
0 a standard query (QUERY)
1 an inverse query (IQUERY)
2 a server status request (STATUS)
3-15 reserved for future use
AA Authoritative Answer - this bit is valid in responses,
and specifies that the responding name server is an
authority for the domain name in question section.
Note that the contents of the answer section may have
multiple owner names because of aliases. The AA bit
corresponds to the name which matches the query name, or
the first owner name in the answer section.
TC TrunCation - specifies that this message was truncated
due to length greater than that permitted on the
transmission channel.
RD Recursion Desired - this bit may be set in a query and
is copied into the response. If RD is set, it directs
the name server to pursue the query recursively.
Recursive query support is optional.
RA Recursion Available - this be is set or cleared in a
response, and denotes whether recursive query support is
available in the name server.
Z Reserved for future use. Must be zero in all queries
and responses.
RCODE Response code - this 4 bit field is set as part of
responses. The values have the following
interpretation:
0 No error condition
1 Format error - The name server was
unable to interpret the query.
2 Server failure - The name server was
unable to process this query due to a
problem with the name server.
3 Name Error - Meaningful only for
responses from an authoritative name
server, this code signifies that the
domain name referenced in the query does
not exist.
4 Not Implemented - The name server does
not support the requested kind of query.
5 Refused - The name server refuses to
perform the specified operation for
policy reasons. For example, a name
server may not wish to provide the
information to the particular requester,
or a name server may not wish to perform
a particular operation (e.g., zone
transfer) for particular data.
6-15 Reserved for future use.
QDCOUNT an unsigned 16 bit integer specifying the number of
entries in the question section.
ANCOUNT an unsigned 16 bit integer specifying the number of
resource records in the answer section.
NSCOUNT an unsigned 16 bit integer specifying the number of name
server resource records in the authority records
section.
ARCOUNT an unsigned 16 bit integer specifying the number of
resource records in the additional records section.
"""
alias ExDns.Message
@keys [:id, :qr, :oc, :aa, :tc, :rd, :ra, :ad, :cd, :rc, :qc, :anc, :auc, :adc]
@enforce_keys @keys
defstruct @keys
@type t :: [
id: non_neg_integer(),
qr: non_neg_integer(),
oc: non_neg_integer(),
aa: non_neg_integer(),
tc: non_neg_integer(),
rd: non_neg_integer(),
ra: non_neg_integer(),
ad: non_neg_integer(),
cd: non_neg_integer(),
rc: non_neg_integer(),
qc: non_neg_integer(),
anc: non_neg_integer(),
auc: non_neg_integer(),
adc: non_neg_integer()
]
@doc """
Decodes the header of a DNS message
"""
@spec decode(message :: binary()) ::
{:ok, t(), binary()} | {:error, :invalid_dns_message_header}
def decode(
<<id::size(16), qr::size(1), oc::size(4), aa::size(1), tc::size(1), rd::size(1),
ra::size(1), 0::size(1), ad::size(1), cd::size(1), rc::size(4), qc::size(16),
anc::size(16), auc::size(16), adc::size(16), rest::binary>>
) do
header = %Message.Header{
id: id,
qr: qr,
oc: oc,
aa: aa,
tc: tc,
rd: rd,
ra: ra,
ad: ad,
cd: cd,
rc: rc,
qc: qc,
anc: anc,
auc: auc,
adc: adc
}
{:ok, header, rest}
end
def decode(_) do
{:error, :invalid_dns_message_header}
end
@doc """
Set the authoritative flag in a header
"""
def put_authoritative(%Message.Header{} = header) do
%Message.Header{header | aa: 1}
end
@doc """
Returns a boolean indicating if a message with this
header will be an authoritative response
"""
def authoritative?(%Message.Header{aa: 1}), do: true
def authoritative?(%Message.Header{aa: 0}), do: false
@doc """
Set the response bit in a header
Sets the header to indicate that a message with
this header is a response message (not a query message)
"""
def set_response(%Message.Header{} = header) do
%Message.Header{header | qr: 1}
end
@doc """
Returns a boolean indicating if a message with this
header will be an response message
"""
def response?(%Message.Header{qr: 1}), do: true
def response?(%Message.Header{qr: 0}), do: false
@doc """
Set the query bit in a header
Sets the header to indicate that a message with
this header is a query message (not a response message)
"""
def set_query(%Message.Header{} = header) do
%Message.Header{header | qr: 0}
end
@doc """
Returns a boolean indicating if a message with this
header will be an query message
"""
def query?(%Message.Header{qr: 0}), do: true
def query?(%Message.Header{qr: 1}), do: false
@doc """
Returns the decoded opcode for a DNS message header.
OPCODE A four bit field that specifies kind of query in this
message. This value is set by the originator of a query
and copied into the response. The values are:
0 a standard query (QUERY)
1 an inverse query (IQUERY) - OBSOLETE
2 a server status request (STATUS)
3 reserved for future use
4 notify
5 update
6-15 reserved for future use
"""
def opcode(%Message.Header{oc: 0}), do: :query
def opcode(%Message.Header{oc: 1}), do: :inverse_query
def opcode(%Message.Header{oc: 2}), do: :status
def opcode(%Message.Header{oc: 4}), do: :notify
def opcode(%Message.Header{oc: 5}), do: :update
@doc """
Returns whether thhis DNS message is a query or a response
QR A one bit field that specifies whether this message is a
query (0), or a response (1).
"""
def message_type(%Message.Header{qr: 0}), do: :query
def message_type(%Message.Header{qr: 1}), do: :response
end
|
lib/ex_dns/message/header.ex
| 0.780328 | 0.438304 |
header.ex
|
starcoder
|
defmodule Helios.Registry.Distribution.Strategy do
@moduledoc """
This module implements the interface for custom distribution strategies.
The default strategy used by Helios.Registry is a consistent hash ring implemented
via the `libring` library.
Custom strategies are expected to return a datastructure or pid which will be
passed along to any functions which need to manipulate the current distribution state.
This can be either a plain datastructure (as is the case with the libring-based strategy),
or a pid which your strategy module then uses to call a process in your own supervision tree.
For efficiency reasons, it is highly recommended to use plain datastructures rather than a
process for storing the distribution state, because it has the potential to become a bottleneck otherwise,
however this is really up to the needs of your situation, just know that you can go either way.
Strategy can be set in configuration, like so:
## Config example
config :my_app, MyApp.Endpoint,
registry:[
distribution_strategy: {Helios.Registry.Distribution.Ring, :init, [
nodes: [~r/my_node@/]
]}
]
where `distibution_strategy` is requres {m, f, a} triplet that will be called using `Kernel.apply/3`
during registry startup
"""
@type reason :: String.t()
@type strategy :: term
@type weight :: pos_integer
@type nodelist :: [node() | {node(), weight}]
@type key :: term
@type t :: strategy
@doc """
Adds a node to the state of the current distribution strategy.
"""
@callback add_node(strategy, node) :: strategy | {:error, reason}
@doc """
Adds a node to the state of the current distribution strategy,
and give it a specific weighting relative to other nodes.
"""
@callback add_node(strategy, node, weight) :: strategy | {:error, reason}
@doc """
Adds a list of nodes to the state of the current distribution strategy.
The node list can be composed of both node names (atoms) or tuples containing
a node name and a weight for that node.
"""
@callback add_nodes(strategy, nodelist) :: strategy | {:error, reason}
@doc """
Removes a node from the state of the current distribution strategy.
"""
@callback remove_node(strategy, node) :: strategy | {:error, reason}
@doc """
Maps a key to a specific node via the current distribution strategy.
"""
@callback key_to_node(strategy, key) :: node() | :undefined
end
|
lib/helios/registry/distribution/strategy.ex
| 0.892369 | 0.613468 |
strategy.ex
|
starcoder
|
defmodule Vox.Transform do
@moduledoc """
The transform struct represents a coordinate transformation. Transforms
can be stacked.
"""
defstruct [:origin, :data]
@type t :: %__MODULE__{ data: Vox.Data.t, origin: Vox.Data.origin }
@side_x [:left, :right]
@side_y [:bottom, :top]
@side_z [:front, :back]
@doc """
Create a new transformation.
"""
@spec new(Vox.Data.t, Vox.Data.origin) :: t
def new(data, origin), do: %__MODULE__{ data: data, origin: origin }
@doc false
def transform(a, b) do
{ x, w } = transform_w(a, b)
{ y, h } = transform_h(a, b)
{ z, d } = transform_d(a, b)
{ [w, h, d], { x, y, z } }
end
defp get_side(a) when a in @side_x, do: @side_x
defp get_side(a) when a in @side_y, do: @side_y
defp get_side(a) when a in @side_z, do: @side_z
defp with_side([a, b], { x, _, _ }) when x in [a, b], do: { 0, x }
defp with_side([a, b], { _, y, _ }) when y in [a, b], do: { 1, y }
defp with_side([a, b], { _, _, z }) when z in [a, b], do: { 2, z }
defp transform_w({ x, _, _ }, { x, _, _ }), do: { 0, &resolve_nop/2 }
defp transform_w({ x1, _, _ }, { x2, _, _ }) when ((x1 in @side_x and x2 in @side_x) or (x1 in @side_y and x2 in @side_y) or (x1 in @side_z and x2 in @side_z)), do: { 0, &resolve_w/2 }
defp transform_w(old, { x2, _, _ }) do
new_side = get_side(x2)
case with_side(new_side, old) do
{ n, ^x2 } -> { n, &resolve_nop/2 }
{ n, _ } -> { n, &resolve_n(&1, &2, n) }
end
end
defp transform_h({ _, y, _ }, { _, y, _ }), do: { 1, &resolve_nop/2 }
defp transform_h({ _, y1, _ }, { _, y2, _ }) when ((y1 in @side_x and y2 in @side_x) or (y1 in @side_y and y2 in @side_y) or (y1 in @side_z and y2 in @side_z)), do: { 1, &resolve_h/2 }
defp transform_h(old, { _, y2, _ }) do
new_side = get_side(y2)
case with_side(new_side, old) do
{ n, ^y2 } -> { n, &resolve_nop/2 }
{ n, _ } -> { n, &resolve_n(&1, &2, n) }
end
end
defp transform_d({ _, _, z }, { _, _, z }), do: { 2, &resolve_nop/2 }
defp transform_d({ _, _, z1 }, { _, _, z2 }) when ((z1 in @side_x and z2 in @side_x) or (z1 in @side_y and z2 in @side_y) or (z1 in @side_z and z2 in @side_z)), do: { 2, &resolve_d/2 }
defp transform_d(old, { _, _, z2 }) do
new_side = get_side(z2)
case with_side(new_side, old) do
{ n, ^z2 } -> { n, &resolve_nop/2 }
{ n, _ } -> { n, &resolve_n(&1, &2, n) }
end
end
@axes [x: 0, y: 1, z: 2]
@doc false
for { x, xi } <- @axes, { y, yi } <- @axes, { z, zi } <- @axes, (x != y) and (x != z) and (y != z) do
def swizzle({ x, y, z }, unquote(Macro.escape({ xi, yi, zi }))), do: { unquote({ x, [], nil }), unquote({ y, [], nil }), unquote({ z, [], nil }) }
end
@doc false
def resolve(point, size, [fun|ops]), do: resolve(fun.(point, size), size, ops)
def resolve(point, _, []), do: point
defp resolve_nop(point, _), do: point
defp resolve_n(point, size, 0), do: resolve_w(point, size)
defp resolve_n(point, size, 1), do: resolve_h(point, size)
defp resolve_n(point, size, 2), do: resolve_d(point, size)
defp resolve_w({ x, y, z }, { w, _, _ }), do: { (w - x) - 1, y, z }
defp resolve_h({ x, y, z }, { _, h, _ }), do: { x, (h - y) - 1, z }
defp resolve_d({ x, y, z }, { _, _, d }), do: { x, y, (d - z) - 1 }
defimpl Vox.Data, for: __MODULE__ do
def origin(%{ origin: origin }), do: origin
def models(%{ origin: origin, data: data }) do
{ transformations, order } = Vox.Transform.transform(Vox.Data.origin(data), origin)
Vox.Data.models(data)
|> Enum.map(fn model = %{ size: size } ->
voxels = Enum.map(model.voxels, fn { point, voxel } ->
{ Vox.Transform.swizzle(Vox.Transform.resolve(point, size, transformations), order), voxel }
end)
%{ model | size: Vox.Transform.swizzle(size, order), voxels: Map.new(voxels) }
end)
end
def impl(_, _), do: nil
end
end
|
lib/vox/transform.ex
| 0.83924 | 0.815233 |
transform.ex
|
starcoder
|
defmodule MeshxConsul do
@readme File.read!("docs/README.md") |> String.split("<!-- MDOC !-->") |> Enum.fetch!(1)
@moduledoc """
#{@readme}
## Configuration options
#### Consul Agent
`MeshxConsul` requires Consul Agent API endpoint address and ACL token to manage services and upstreams. Additionally environment variables for command starting proxy binary should be configured here.
List of shell environment variables, command options and http request headers supported by Envoy Proxy: [consul.io](https://www.consul.io/commands/connect/envoy), Consul Connect Proxy: [consul.io](https://www.consul.io/commands/connect/proxy).
* `:cli_env` - shell environment variables that will be passed with command starting sidecar service proxy binary. Variables are defined as tuple, first element being variable name and second variable value. Environment variables can be used as alternative to proxy command arguments. Environment variables are preferred over command arguments when passing secrets, eg. Consul ACL token. Example:
```elixir
cli_env: [
{"CONSUL_HTTP_ADDR", "unix:///run/consul/http.sock"},
{"CONSUL_HTTP_TOKEN", ""}
]
```
Default: `[]`.
* `:uri` - `%URI{}` scheme which should be used when accessing Consul agent http(s) API endpoint. Default: `%URI{scheme: "http", host: ""}`.
<br>
`MeshxConsul` uses `:httpc.request/4` function when accessing Consul agent http endpoint; some `:httpc` configuration is required.
* `:httpc_opts` - (required) option passed directly to `:httpc.set_options/1`. It specifies options `:httpc` will use for subsequent http(s) requests.
Example:
```elixir
httpc_opts: [
ipfamily: :local,
unix_socket: '/run/consul/http.sock'
]
```
* `:httpc_headers` - `:httpc` http request headers used when running `:httpc.request/4`.
Example:
```elixir
httpc_headers: [{'X-Consul-Token', ''}]
```
Default: [].
* `:httpc_request_http_options` - `:httpc` http request options, passed as 3rd argument of `:httpc.request/4`. Default: [].
* `:httpc_request_options` - `:httpc` options, passed as 4th argument of `:httpc.request/4`. Default: [].
#### Templates
* `service_template` - default value for `template` argument of `start/4` function. Check `start/4` description below for details. Default: [].
* `upstream_template` - default value for `template` argument of `connect/3` function. Check `connect/4` description below for details. Default: [].
#### Proxy management
* `proxy_stdout_fun` - 3-arity function invoked when binary proxy command will provide `stdout` output.
First function argument is proxy service ID, second argument is output device as atom `in [:stdout, :stderr]`. Last third argument is message generated by proxy command. Example:
```elixir
fn _service_id, _dev, msg -> IO.inspect(msg) end
```
Default: function sending formatted args to `Logger.debug/2`.
* `proxy_stderr_fun` - as above `proxy_stdout_fun`, invoked when proxy command generates `stderr` output.
Default: function sending formatted args to `Logger.error/2`.
* `proxy_down_fun` - 5-arity function invoked when binary proxy command dies. Function arguments are as follows:
1. proxy service ID,
2. pid of process which was running proxy command,
3. OS pid of process which was running proxy command,
4. reason command died,
5. number of proxy command restarts so far.
Example:
```elixir
fn _service_id, _pid, _ospid, reason, _restarts -> IO.inspect(reason) end
```
Default: function sending formatted args to `Logger.error/2`.
* `max_proxy_restarts` - when binary proxy command dies it is automatically restarted; option specifies maximum number of allowed command restarts. Default: `5`.
#### TCP port generation
* `tcp_address` - option used to specify TCP address and associated ports range that will be used to automatically find new unused TCP port number when preparing mesh service or upstream endpoint with `start/4` or `connect/3`. It accepts keyword list: `[ip: ip, port_range: range]`. `ip` specifies network interface address which will be used by endpoint. `ip` should be defined as tuple and in most situations it should point at loopback interface: `{127, 0, 0, 1}`. TCP traffic passing here is unencrypted, it means that unauthorized users should never have access to this interface. It never should be a public interface, even in private networks. `port_range` specifies range in which available TCP ports will be allocated. Service ports are starting from lower range limit and are increasing, upstream ports are decreasing from upper range limit. Default: `[ip: {127, 0, 0, 1}, port_range: 1024..65535]`.
## Templates
`MeshxConsul` is using customized Mustache template system [[wikipedia](https://en.wikipedia.org/wiki/Mustache_(template_system))] to render following items:
* registration data when registering service or upstream with Consul agent,
* binary proxy command when starting sidecar proxy,
* service TTL health check worker ID.
Original Mustache system implementation assumes that rendered templates are defined as strings.
`MeshxConsul` prefers structured data (maps and lists) as templates, with individual template fields defined as strings and being Mustache rendered.
To allow variables escaping in structured data, `$` notation is added to standard Mustache specification. For example (see `"int"` template key):
```elixir
hash_params = %{"string_key" => "123abc", "int_key" => 123}
template = %{"string" => "{{string_key}}", "int" => "{{$int_key$}}", "static" => "static_string"}
```
will be rendered by `MeshxConsul` Mustache extended version to:
```elixir
%{"string" => "123abc", "int" => 123, "static" => "static_string"}
```
"""
@typedoc """
Mesh endpoint address for user service providers and upstream clients.
**Note:** UDS (Unix Domain Socket) support should be available in Consul 1.10?, see [pull #9981](https://github.com/hashicorp/consul/pull/9981).
"""
@type address() ::
{:tcp, ip :: :inet.ip_address(), port :: :inet.port_number()}
| {:uds, path :: String.t()}
@behaviour Meshx.ServiceMesh
require Logger
alias MeshxConsul.{App.C, Dummy, Proxy, Ttl}
alias MeshxConsul.Service.{Mustache, Template, Reg, Ets, GenTcpPort}
@doc """
Consul configuration info for `service_id`.
Function returns result of Consul API `GET` query at path `/agent/service/:service_id`.
```elixir
iex(1)> MeshxConsul.start({"service1", "service1-mynode-myhost"})
{:ok, "service1-mynode-myhost", {:tcp, {127, 0, 0, 1}, 1024}}
iex(2)> MeshxConsul.info("service1-mynode-myhost")
{:ok,
%{
"Address" => "",
"ContentHash" => "aaaaaa0000000000",
"Datacenter" => "my-dc",
"EnableTagOverride" => false,
"ID" => "service1-mynode-myhost",
"Meta" => %{},
"Port" => 0,
"Service" => "service1",
"Tags" => [],
"Weights" => %{"Passing" => 1, "Warning" => 1}
}}
```
"""
@spec info(service_id :: String.t() | atom()) :: {:ok, info :: map()} | {:error, error :: term()} | term()
def info(service_id), do: Reg.config(service_id)
@doc """
List services registered on current node.
```elixir
iex(1)> MeshxConsul.start({"service1", "service1-mynode-myhost"})
{:ok, "service1-mynode-myhost", {:tcp, {127, 0, 0, 1}, 1024}}
iex(2)> MeshxConsul.list
["service1-mynode-myhost"]
```
"""
@spec list() :: [String.t()]
def list(), do: Ets.list()
@doc """
List upstreams registered with default proxy service.
Default proxy service ID: "upstream-" concatenated with host name.
"""
@spec list_upstream() :: [String.t()]
def list_upstream() do
{_name, id} = Template.default_proxy()
list_upstream(id)
end
@doc """
List upstreams registered with proxy `service_id`.
```elixir
iex(1)> MeshxConsul.connect(["service1", :service2])
{:ok,
[
ok: {:tcp, {127, 0, 0, 1}, 65535},
ok: {:tcp, {127, 0, 0, 1}, 65534}
]}
iex(2)> MeshxConsul.list
["upstream-h11"]
iex(3)> MeshxConsul.list_upstream
["service1", "service2"]
iex(4)> MeshxConsul.list_upstream("upstream-h11")
["service1", "service2"]
iex(5)> MeshxConsul.list_upstream("not_existing")
{:error, :service_not_owned}
```
"""
@spec list_upstream(service_id :: String.t() | atom()) :: [String.t()]
def list_upstream(service_id) do
case Reg.get_upstreams(to_string(service_id)) do
{:ok, upstreams, _proxy_name, _proxy_id, _proxy_conf} -> Enum.map(upstreams, fn u -> Map.fetch!(u, "DestinationName") end)
e -> e
end
end
@doc """
Prepares mesh service endpoint when starting new user service provider.
## Basic use
```elixir
iex(1)> MeshxConsul.start(:service1)
{:ok, "service1-h11", {:tcp, {127, 0, 0, 1}, 1024}}
```
If successful function returns tuple with registered service ID and mesh service endpoint `address()`. Service ID by default is service name concatenated with host name. User can start service providing both service name and service ID:
```elixir
iex(1)> MeshxConsul.start({"service1", "service1-mynode-myhost"})
{:ok, "service1-mynode-myhost", {:tcp, {127, 0, 0, 1}, 1024}}
```
If service with same service ID was already registered by current node and service is healthy in Consul agent registry, function will return `{:ok, :already_started}`.
If service with same service ID is registered with Consul agent but registration was not executed by current node function will return by default `{:error, :service_not_owned}`. User can force service re-registration with current node by setting `force_registration?` to `true`. If service was registered by current node but cannot be found in Consul agent registry function will return `{:error, :invalid_state}`.
If `timeout` is set greater than `0`, `start/4` will wait `timeout` milliseconds for service to have "passing" state in Consul agent after registration. If service is not healthy and alive after `timeout` function will return `{:error, :service_alive_timeout}`. If `timeout` is to `0` this check will be skipped.
## Customization
Consul agent documentation suggested reading:
* [service registration options](https://www.consul.io/docs/discovery/services),
* [health checks](https://www.consul.io/docs/discovery/checks),
* [Connect commands](https://www.consul.io/commands/connect).
If `params` function argument defines service as atom, string or `{name, id}` tuple following Mustache hash is created:
```elixir
# 1. Generate new mesh service endpoint address:
{:tcp, ip, port} = MeshxConsul.Service.GenTcpPort.new(:lo)
# 2a. Build Mustache hash if name is given:
%{"name" => name, "id" => name <> "-" <> hostname, "address" => ip, "port" => port}
# 2b. Build Mustache hash if {name, id} is given:
%{"name" => name, "id" => id, "address" => ip, "port" => port}
# Example Mustache hash:
%{"name" => "service1", "id" => "service1-my-hostname", "address" => "127.0.0.1", "port" => 1024}
```
If `params` argument is defined by user as `map()`, keys `"address"` and `"port"` are used to inject automatically generated TCP port address similarly to code on snippet above. Automatic address injection can be cancelled by assigning both `"address"` and `"port"` keys some values, `"address"` must be not empty string and `"port"` any integer value, eg.: `%{"address" => "undefined", "port" => -999_999}`. If user provided values for both `"address"` and `"port"`, they will be fetched from input map and used to build function result `{:ok, service_id, address()}` tuple.
If `template` is not defined as function argument, value of `config/config.exs` `:service_template` key will be used.
If user `template` does not contain all required keys `[:registration, :ttl, :proxy]`, missing keys will be taken from following built-in defaults:
```elixir
[
registration: %{
"ID" => "{{id}}",
"Name" => "{{name}}",
"Checks" => [
%{
"Name" => "TTL check",
"CheckID" => "ttl:{{id}}",
"TTL" => "10s"
}
],
"Connect" => %{
"SidecarService" => %{
"Proxy" => %{
"LocalServiceAddress" => "{{address}}",
"LocalServicePort" => "{{$port$}}"
}
}
}
},
ttl: %{
id: "ttl:{{id}}",
status: "passing",
ttl: 5_000
},
proxy: ["/bin/sh", "-c", "consul connect proxy -log-level err -sidecar-for {{id}}"]
]
```
### Example:
```elixir
# start service using Envoy Proxy instead of default Consul Connect Proxy:
iex(1)> MeshxConsul.start("service1", proxy: ["/bin/sh", "-c", "consul connect envoy -sidecar-for {{id}} -- -l error"])
{:ok, "service1-h11", {:tcp, {127, 0, 0, 1}, 1024}}
```
"""
@spec start(
params ::
(name :: atom() | String.t())
| {name :: atom() | String.t(), id :: atom() | String.t()}
| map(),
template ::
[
registration: map(),
ttl: nil | %{id: String.t(), status: String.t(), ttl: pos_integer()},
proxy: nil | [String.t()]
],
force_registration? :: boolean(),
timeout :: non_neg_integer()
) ::
{:ok, service_id :: String.t(), addr :: address()}
| {:ok, :already_started}
| {:error, :invalid_state}
| {:error, :service_not_owned}
| {:error, :service_alive_timeout}
| term()
def start(params, template \\ C.service_template(), force_registration? \\ false, timeout \\ 5000)
def start(name, template, force_registration?, timeout) when is_atom(name) or is_bitstring(name) do
name = to_string(name)
params = %{"name" => name, "id" => name <> "-" <> Template.default_nodename()}
start(params, template, force_registration?, timeout)
end
def start({name, id}, template, force_registration?, timeout)
when (is_atom(name) or is_bitstring(name)) and (is_atom(id) or is_bitstring(id)) do
params = %{"name" => to_string(name), "id" => to_string(id)}
start(params, template, force_registration?, timeout)
end
def start(params, template, force_registration?, timeout) do
{params, address} = build_address(params, :lo)
svc = Template.validate_service!(template)
svc_reg = Keyword.fetch!(svc, :registration)
ttl_check = Keyword.fetch!(svc, :ttl)
proxy_cmd = Keyword.fetch!(svc, :proxy)
with {:ok, reg} <- Mustache.render2map(svc_reg, params),
{:ok, service_id} <- fetch_id(reg) do
owned? = Ets.has_service?(service_id)
passing? = Reg.passing?(service_id)
cond do
owned? and passing? ->
{:ok, :already_started}
owned? and !passing? ->
{:error, :invalid_state}
!owned? and passing? and !force_registration? ->
{:error, :service_not_owned}
true ->
with {:ok, ttl} <- Mustache.ext_render2map(ttl_check, params, :atoms),
{:ok, proxy} <- Mustache.ext_render(proxy_cmd, params),
:ok <- Reg.register_service(reg, true),
{:ok, _pid} <- Ttl.start(service_id, ttl),
{:ok, _pid} <- Proxy.start(service_id, proxy),
:ok <- wait_for_service(service_id, timeout),
true <- Ets.insert(service_id) do
{:ok, service_id, address}
else
err ->
Ets.delete(service_id)
Proxy.stop(service_id)
Ttl.stop(service_id)
Reg.deregister_proxy(service_id)
Reg.deregister_service(service_id)
log_start_err(params, template, address, force_registration?, err)
err
end
end
else
err ->
log_start_err(params, template, address, force_registration?, err)
err
end
end
@doc """
Prepares mesh upstream endpoint for new user upstream client connection.
## Basic use
Basic use of `connect/3` requires `upstream_params` argument to be a list of upstream names defined as strings or atoms:
```elixir
iex(1)> MeshxConsul.connect(["service1", :service2])
{:ok,
[
ok: {:tcp, {127, 0, 0, 1}, 65535},
ok: {:tcp, {127, 0, 0, 1}, 65534}
]}
```
Function returns list of tuples (keyword list), one tuple result per each `upstream_params` element, preserved ordering. Tuple elements are `{:ok, address()}` if upstream addition was successful or `{:error, reason}` if operation failed for given upstream. If upstream is already registered with `proxy`, function will return `{:ok, address()}` with mesh upstream endpoint `address()` fetched from Consul `proxy` sidecar registration.
## Customization
Customization of `template` function argument requires understanding of [Consul upstream service configuration options](https://www.consul.io/docs/connect/registration/service-registration#upstream-configuration-reference).
If `upstream_params` list element is defined as upstream name (atom or string), it is used to create following Mustache hash:
```elixir
# 1. Generate new mesh upstream endpoint address:
{:tcp, ip, port} = MeshxConsul.Service.GenTcpPort.new(:hi)
# 2. Build Mustache hash:
%{"name" => to_string(upstream_name), "address" => ip, "port" => port}
# Example Mustache hash:
%{"name" => "service1", "address" => "127.0.0.1", "port" => 65535}
```
If `upstream_params` element is defined by user as `map()`, keys `"address"` and `"port"` are used to inject automatically generated TCP port address similarly to code on snippet above. Automatic address injection can be cancelled by assigning both `"address"` and `"port"` keys some values, `"address"` must be not empty string and `"port"` any integer value, eg.: `%{"address" => "undefined", "port" => -999_999}`. If user provided values for both `"address"` and `"port"`, they will be fetched from input map and used to build result `{:ok, address()}` tuple for given upstream.
If Mustache upstream `template` is not defined as function argument, `:upstream_template` option value defined in `config/config.exs` will be used. If both are undefined (`nil`, empty map or empty list), following built-in upstream Mustache `template` will be used:
```elixir
%{
"DestinationName" => "{{name}}",
"LocalBindAddress" => "{{address}}",
"LocalBindPort" => "{{$port$}}"
}
```
Using Mustache hash from previous snippet, above template would register with `proxy` sidecar service following upstream:
```elixir
{
"DestinationType":"service",
"DestinationName":"service1",
"LocalBindAddress":"127.0.0.1",
"LocalBindPort":65535,
"MeshGateway":{}
}
```
**Note**: fields required by Consul in upstream registration `template`: `"DestinationName" (string)` and `"LocalBindPort" (int)`.
Last `connect/3` function argument `proxy` specifies service registered with sidecar-proxy as a tuple `{proxy_service_name, proxy_service_id}`. Sidecar proxy service will be used as parent service for all upstreams in `upstream_params`. If proxy service name/id is not provided, it will be generated by concatenation of prefix `"upstream-"` with host name. If proxy service doesn't exist, new service will be started by running `MeshxConsul.start({proxy_service_name, proxy_service_id})`. If `start/4` fails, generated error will cascade to `connect/3` and upstreams will not be added to service mesh.
Running `MeshxConsul.connect(["service1"])` on host `h11` should register following services with Consul agent:
```elixir
{
"upstream-h11": {
"ID": "upstream-h11",
"Service": "upstream-h11",
"Tags": [],
"Meta": {},
"Port": 0,
"Address": "",
"Weights": {
"Passing": 1,
"Warning": 1
},
"EnableTagOverride": false,
"Datacenter": "my-dc"
},
"upstream-h11-sidecar-proxy": {
"Kind": "connect-proxy",
"ID": "upstream-h11-sidecar-proxy",
"Service": "upstream-h11-sidecar-proxy",
"Tags": [],
"Meta": {},
"Port": 21001,
"Address": "",
"Weights": {
"Passing": 1,
"Warning": 1
},
"EnableTagOverride": false,
"Proxy": {
"DestinationServiceName": "upstream-h11",
"DestinationServiceID": "upstream-h11",
"LocalServiceAddress": "127.0.0.1",
"LocalServicePort": 1024,
"Upstreams": [
{
"DestinationType": "service",
"DestinationName": "service1",
"LocalBindAddress": "127.0.0.1",
"LocalBindPort": 65535,
"MeshGateway": {}
}
],
"MeshGateway": {},
"Expose": {}
},
"Datacenter": "my-dc"
}
}
```
"""
@spec connect(
upstream_params :: [upstream :: atom() | String.t() | map()],
template :: map(),
proxy :: nil | {proxy_service_name :: String.t() | atom(), proxy_service_id :: String.t() | atom()}
) ::
{:ok, []}
| {:ok, [{:ok, addr :: address()} | {:error, err :: term()}]}
| {:error, :invalid_state}
| {:error, :service_not_owned}
| term()
def connect(
upstream_params,
template \\ C.upstream_template(),
{proxy_service_name, proxy_service_id} = _proxy \\ Template.default_proxy()
)
when is_list(upstream_params) do
proxy_service_name = to_string(proxy_service_name)
proxy_service_id = to_string(proxy_service_id)
case start({proxy_service_name, proxy_service_id}) do
{:ok, :already_started} ->
add_upstreams(upstream_params, template, proxy_service_id)
{:ok, proxy_service_id, address} ->
Dummy.start(proxy_service_id, address)
add_upstreams(upstream_params, template, proxy_service_id)
e ->
e
end
end
@doc """
Stops service `service_id` started with `start/4`.
Function reverses actions performed by `start/4`:
* proxy binary command is terminated,
* TTL health check worker is stopped,
* service is deregistered with Consul agent.
If service was not started by current node function returns: `{:error, :service_not_owned}`.
```elixir
iex(1)> MeshxConsul.start(:service1)
{:ok, "service1-h11", {:tcp, {127, 0, 0, 1}, 1024}}
iex(2)> MeshxConsul.stop("service1-h11")
:ok
iex(3)> MeshxConsul.stop("service1-h11")
{:error, :service_not_owned}
```
"""
@spec stop(service_id :: String.t() | atom()) :: :ok | {:error, :service_not_owned}
def stop(service_id) do
service_id = to_string(service_id)
if Ets.has_service?(service_id) do
Ets.delete(service_id)
Proxy.stop(service_id)
Ttl.stop(service_id)
Reg.deregister_proxy(service_id)
Reg.deregister_service(service_id)
else
{:error, :service_not_owned}
end
end
@doc """
Disconnects mesh `upstreams` endpoints created earlier with `connect/3`.
Function will deregister `upstreams` list from `proxy_service_id` parent sidecar proxy service:
```elixir
iex(1)> MeshxConsul.connect(["service1", "service2"])
{:ok,
[
ok: {:tcp, {127, 0, 0, 1}, 65535},
ok: {:tcp, {127, 0, 0, 1}, 65534}
]}
iex(2)> MeshxConsul.list_upstream
["service1", "service2"]
iex(3)> MeshxConsul.disconnect(["service1", "service2", "service3"])
{:ok, ["service2", "service1"]}
iex(4)> MeshxConsul.list_upstream
[]
```
Function returns list of disconnected upstreams.
If `proxy_service_id` is not provided as function argument, default proxy ID will be used: "upstream-" concatenated with host name.
Deregistering upstream from sidecar proxy service doesn't close established connections. Closing existing connections can be done by:
* cold proxy restart: set `restart_proxy?` function argument to true,
* hot proxy restart if supported by proxy, example script for Envoy: [[github](https://github.com/envoyproxy/envoy/blob/main/restarter/hot-restarter.py)].
"""
@spec disconnect(
upstreams :: [upstream :: atom() | String.t()],
proxy_service_id :: nil | atom() | String.t(),
restart_proxy? :: boolean()
) ::
{:ok, []}
| {:ok, [deleted_upstream_name :: String.t()]}
| (err :: term())
def disconnect(upstreams, proxy_service_id \\ nil, restart_proxy? \\ false) when is_list(upstreams) do
proxy_service_id =
if is_nil(proxy_service_id) do
{_proxy_service_name, proxy_service_id} = Template.default_proxy()
proxy_service_id
else
to_string(proxy_service_id)
end
with {:ok, current_upstreams, proxy_name, _proxy_id, proxy_conf} <- Reg.get_upstreams(proxy_service_id),
{new_upstreams, deleted} <- del_upstreams(current_upstreams, upstreams),
false <- Enum.empty?(deleted),
:ok <- update_upstreams(proxy_conf, proxy_name, new_upstreams) do
if restart_proxy?, do: Proxy.restart(proxy_service_id)
{:ok, deleted}
else
true -> {:ok, []}
err -> err
end
end
defp add_upstreams(params, template, proxy_service_id) do
case Reg.get_upstreams(proxy_service_id) do
{:ok, curr_u, proxy_name, _proxy_id, proxy_conf} ->
tpl = Template.validate_upstream!(template)
curr_u_names = Enum.map(curr_u, fn u -> Map.fetch!(u, "DestinationName") end)
{new_upstreams, addresses} = add_u(params, tpl, curr_u, curr_u_names)
with false <- curr_u == new_upstreams,
:ok <- update_upstreams(proxy_conf, proxy_name, new_upstreams) do
{:ok, addresses}
else
true -> {:ok, addresses}
e -> e
end
e ->
e
end
end
defp add_u(params, tpl, curr_u, curr_u_names, added \\ [])
defp add_u([u | tail], tpl, curr_u, curr_u_names, added) when is_atom(u) or is_bitstring(u),
do:
add_u(
[%{"name" => to_string(u)} | tail],
tpl,
curr_u,
curr_u_names,
added
)
defp add_u([u | tail], tpl, curr_u, curr_u_names, added) do
tmp_u = maybe_inject_fake_address(u)
case Mustache.render2map(tpl, tmp_u) do
{:ok, tmp_render} ->
name = Map.fetch!(tmp_render, "DestinationName")
case Enum.find_index(curr_u_names, fn n -> n == name end) do
nil ->
{param, address} = build_address(u, :hi)
case Mustache.render2map(tpl, param) do
{:ok, render} ->
name = Map.fetch!(render, "DestinationName")
add_u(tail, tpl, curr_u ++ [render], curr_u_names ++ [name], added ++ [{:ok, address}])
err ->
add_u(tail, tpl, curr_u, curr_u_names, added ++ [{:error, err}])
end
index ->
existing_u = Enum.at(curr_u, index)
{:ok, ip} = Map.fetch!(existing_u, "LocalBindAddress") |> to_charlist() |> :inet.parse_address()
port = Map.fetch!(existing_u, "LocalBindPort")
add_u(tail, tpl, curr_u, curr_u_names, added ++ [{:ok, {:tcp, ip, port}}])
end
{:error, e} ->
{:error, e}
end
end
defp add_u([], _tpl, curr_u, _curr_u_names, added), do: {curr_u, added}
defp del_upstreams(c_upstreams, upstreams, new_upstreams \\ [], deleted \\ [])
defp del_upstreams([u | tail], upstreams, new_upstreams, deleted) do
u_name = Map.fetch!(u, "DestinationName")
if Enum.member?(upstreams, u_name),
do: del_upstreams(tail, upstreams, new_upstreams, [u_name] ++ deleted),
else: del_upstreams(tail, upstreams, [u] ++ new_upstreams, deleted)
end
defp del_upstreams([], _upstreams, new_upstreams, deleted), do: {new_upstreams, deleted}
defp update_upstreams(proxy_conf, proxy_name, new_upstreams) do
proxy_conf
|> Map.drop(["ContentHash", "Datacenter", "Service"])
|> Map.put("Name", proxy_name)
|> put_in(["Proxy", "Upstreams"], new_upstreams)
|> Reg.register_service(false)
end
defp fetch_id(service_reg) do
service_id = Map.get(service_reg, "ID") || Map.get(service_reg, "Name")
if is_nil(service_id), do: {:error, :missing_service_name}, else: {:ok, service_id}
end
defp log_start_err(params, template, address, force_registration?, err),
do:
Logger.error("""
[#{__MODULE__}]: Problem starting service using registration params:
#{inspect(params)}
registration template:
#{inspect(template)}
address: #{inspect(address)},
force registration: #{inspect(force_registration?)}
Error:
#{inspect(err)}
""")
defp build_address(params, tcp_range) do
with {:ok, address} <- Map.fetch(params, "address"),
{:ok, port} <- Map.fetch(params, "port"),
true <- is_bitstring(address) and address != "" and is_integer(port) do
{params, {:tcp, address, port}}
else
_ ->
{:tcp, ip, port} = address = GenTcpPort.new(tcp_range)
{Map.put(params, "address", :inet.ntoa(ip) |> to_string()) |> Map.put("port", port), address}
end
end
defp maybe_inject_fake_address(u) when is_map(u) do
with {:ok, address} <- Map.fetch(u, "address"),
{:ok, port} <- Map.fetch(u, "port"),
true <- is_bitstring(address) and address != "" and is_integer(port) do
u
else
_ ->
Map.put(u, "address", "") |> Map.put("port", -999_999)
end
end
@healthy_retries 100
defp wait_for_service(_service_id, 0), do: :ok
defp wait_for_service(service_id, timeout, retry \\ 0) do
if Reg.passing?(service_id) do
:ok
else
if retry < @healthy_retries do
Process.sleep(round(timeout / @healthy_retries))
wait_for_service(service_id, timeout, retry + 1)
else
{:error, :service_alive_timeout}
end
end
end
end
|
lib/mesh_consul.ex
| 0.860911 | 0.856392 |
mesh_consul.ex
|
starcoder
|
defmodule ShEx.ShapeMap do
@moduledoc """
A finite set of `ShEx.ShapeMap.Association`s used to specify the nodes on which validations should be performed and for the result of validations.
A ShapeMap can be either created with `ShEx.ShapeMap.new/1` or loaded from a
string representation in the standard ShapeMap format with `ShEx.ShapeMap.decode/2`
or a JSON-based format `ShEx.ShapeMap.from_json/2`.
The set of associations can be accessed with the `associations/1` function as
a list. `ShEx.ShapeMap` also implements the `Enumerable` protocol over this
set of associations, so you can use it with all of Elixir's `Enum` functions.
After the validation the associations get partitioned into two fields on the
`ShEx.ShapeMap` struct: `conformant` and `nonconformant`.
see <https://shexspec.github.io/shape-map/>
"""
defmodule Association do
@moduledoc """
A ShapeMap association specifies the shape a node must conform to and contains the results of a validation.
It is a structure consisting of the following fields:
- `node`: an RDF node, or a triple pattern which is used to select RDF nodes
- `shape`: label of a shape expression or the atom `:start` for the start shape expression
The following fields are just filled in the case of a result ShapeMap, i.e.
after the validation:
- `status`: `:conformant` if `node` conforms the `shape`, otherwise `:nonconformant`
- `reason`: a list of `ShEx.Violation` structs stating the reasons for failure or success
- `app_info`: currently not used
ShapeMap associations should not be created manually, but will be created
implicitly on `ShEx.ShapeMap.new/1` or `ShEx.ShapeMap.add/2`.
"""
defstruct [
:node,
:shape,
:status,
:reason,
:app_info
]
@type status :: :conformant | :nonconformant | nil
@doc false
def new(association)
# This is for the JSON-encoded ShapeMap format from the test suite
def new({node, %{"shape" => shape, "result" => result}}) do
%__MODULE__{
new(node, shape)
| status:
if result == false do
:nonconformant
else
:conformant
end
}
end
def new({node, shape}), do: new(node, shape)
def new(%ShEx.ShapeMap.Association{} = association), do: association
def new(%{node: node, shape: shape}), do: new(node, shape)
# This is for the JSON-encoded ShapeMap format from the test suite
def new(%{"node" => node, "shape" => shape}), do: new(node, shape)
@doc false
def new(node, shape) do
%__MODULE__{
node: coerce_node(node),
shape: coerce_shape(shape)
}
end
defp coerce_node({subject, predicate, object}) do
{
if(subject in [:focus, :_], do: subject, else: RDF.Statement.coerce_subject(subject)),
RDF.Statement.coerce_predicate(predicate),
if(object in [:focus, :_], do: object, else: RDF.Statement.coerce_object(object))
}
end
defp coerce_node(node) do
cond do
not is_atom(node) and RDF.term?(node) ->
node
is_atom(node) or (is_binary(node) and String.contains?(node, ":")) ->
RDF.iri(node)
true ->
RDF.Term.coerce(node)
end
end
defp coerce_shape(shape) do
cond do
# we allow maps to pass unchanged because we create intermediary associations containing shapes directly
is_map(shape) or (not is_atom(shape) and RDF.term?(shape)) ->
shape
shape in [:start, "START"] ->
:start
true ->
RDF.iri(shape)
end
end
@doc """
Return `true` if `association` is a query ShapeMap association, i.e. does not contain results.
Note: Every fixed ShapeMap association is also a query ShapeMap association.
"""
def query?(%__MODULE__{} = association),
do: is_tuple(association.node) and not result?(association)
@doc """
Return `true` if `association` is a fixed ShapeMap association, i.e. doesn't have triple pattern as node or contains results.
"""
def fixed?(%__MODULE__{} = association),
do: not (result?(association) or query?(association))
@doc """
Return `true` if `association` is a result ShapeMap association, i.e. contains results.
"""
def result?(%__MODULE__{status: status}), do: not is_nil(status)
@doc false
def conform(association)
def conform(%__MODULE__{status: nil} = association),
do: %__MODULE__{association | status: :conformant}
def conform(%__MODULE__{} = association),
do: association
@doc false
def violation(%__MODULE__{} = association, reasons, app_infos \\ nil) do
%__MODULE__{
association
| status: :nonconformant,
reason:
if is_list(reasons) do
reasons ++ List.wrap(association.reason)
else
[reasons | List.wrap(association.reason)]
end
# TODO: save app_infos
}
end
end
defstruct [:type, :conformant, :nonconformant]
@type type :: :fixed | :query | :result
@doc """
Creates an empty ShapeMap.
"""
def new() do
%__MODULE__{type: :fixed}
end
@doc """
Creates an ShapeMap with the `associations` given as an enumerable.
"""
def new(associations) do
Enum.reduce(associations, new(), &add(&2, &1))
end
@doc """
Loads a ShapeMap from the standard representation format.
Returns an `ok` resp. `error` tuple.
See <https://shexspec.github.io/shape-map/>
"""
defdelegate decode(content, opts \\ []), to: ShEx.ShapeMap.Decoder
@doc """
Loads a ShapeMap from the standard representation format and fails in the error case.
Same as `decode/2` but returns the ShapeMap directly (not in an `ok` tuple).
"""
def decode!(content, opts \\ []) do
case decode(content, opts) do
{:ok, shape_map} -> shape_map
{:error, error} -> raise error
end
end
@doc """
Loads a ShapeMap from a JSON representation.
This format is not clearly specified. It's currently used only to make test
suite pass, where this format is used.
"""
def from_json(content, options \\ []) do
with {:ok, json_objects} <- Jason.decode(content, options) do
{:ok, ShEx.ShapeMap.new(json_objects)}
end
end
@doc """
Adds a single or list of ShapeMap `associations` to `shape_map`.
"""
def add(shape_map, associations)
def add(shape_map, associations) when is_list(associations) do
Enum.reduce(associations, shape_map, &add(&2, &1))
end
def add(shape_map, {node, associations}) when is_list(associations) do
Enum.reduce(associations, shape_map, fn association, shape_map ->
add(shape_map, {node, association})
end)
end
def add(shape_map, association) do
association = Association.new(association)
shape_map
|> Map.update!(association.status || :conformant, fn
nil -> [association]
list -> [association | list]
end)
|> update_type(association)
end
defp update_type(%__MODULE__{type: :fixed, nonconformant: nonconformant} = shape_map, _)
when is_list(nonconformant) and length(nonconformant) > 0,
do: %__MODULE__{shape_map | type: :result}
defp update_type(%__MODULE__{type: :query, nonconformant: nonconformant}, _)
when is_list(nonconformant) and length(nonconformant) > 0,
do: raise("a result shape map can not contain triple patterns")
defp update_type(
%__MODULE__{type: :fixed} = shape_map,
%Association{node: node} = association
)
when is_tuple(node),
do: %__MODULE__{shape_map | type: :query} |> update_type(association)
defp update_type(shape_map, _), do: shape_map
@doc """
Returns all associations in `shape_map` as a list.
"""
def associations(shape_map) do
List.wrap(shape_map.conformant) ++ List.wrap(shape_map.nonconformant)
end
@doc """
Returns if all association in `shape_map` were conformant after a validation.
Note: A non-result ShapeMap is always conformant.
"""
def conformant?(shape_map)
def conformant?(%__MODULE__{nonconformant: nil}), do: true
def conformant?(%__MODULE__{nonconformant: []}), do: true
def conformant?(%__MODULE__{}), do: false
@doc """
Return `true` if `shape_map` is a fixed ShapeMap, i.e. doesn't contain triple patterns (query ShapeMap) or results (result ShapeMap).
"""
def fixed?(shape_map)
def fixed?(%__MODULE__{type: :fixed}), do: true
def fixed?(%__MODULE__{type: type}) when type in ~w[query result]a, do: false
def fixed?(_), do: nil
@doc """
Return `true` if `shape_map` is a query ShapeMap, i.e. does not contain results (result ShapeMap).
Note: Every fixed ShapeMap is also a query ShapeMap.
"""
def query?(shape_map)
def query?(%__MODULE__{type: type}) when type in ~w[fixed query]a, do: true
def query?(%__MODULE__{type: :result}), do: false
def query?(_), do: nil
@doc """
Return `true` if `shape_map` is a result ShapeMap.
"""
def result?(shape_map)
def result?(%__MODULE__{type: :result}), do: true
def result?(%__MODULE__{type: type}) when type in ~w[fixed query]a, do: false
def result?(_), do: nil
@doc """
Converts a query ShapeMap into a fixed ShapeMap by resolving all triple patterns against the given `graph`.
"""
def to_fixed(shape_map, graph)
def to_fixed(%__MODULE__{type: :query} = shape_map, graph) do
{:ok,
shape_map
|> Stream.flat_map(&resolve_triple_pattern(&1, graph))
|> MapSet.new()
|> new()}
end
def to_fixed(%__MODULE__{type: :fixed} = shape_map, _),
do: {:ok, shape_map}
def to_fixed(%__MODULE__{type: :result}, _),
do: {:error, "a result shape map is not convertible to a fixed shape map"}
defp resolve_triple_pattern(
%ShEx.ShapeMap.Association{node: triple_pattern, shape: shape},
graph
)
when is_tuple(triple_pattern) do
triple_pattern
|> do_resolve_triple_pattern(graph)
|> Enum.map(fn node -> ShEx.ShapeMap.Association.new(node, shape) end)
end
defp resolve_triple_pattern(%ShEx.ShapeMap.Association{} = association, _),
do: {:ok, association}
defp do_resolve_triple_pattern({:focus, predicate, :_}, graph) do
graph
|> Stream.map(fn
{subject, ^predicate, _} -> subject
_ -> nil
end)
|> post_process_query()
end
defp do_resolve_triple_pattern({:_, predicate, :focus}, graph) do
graph
|> Stream.map(fn
{_, ^predicate, object} -> object
_ -> nil
end)
|> post_process_query()
end
defp do_resolve_triple_pattern({subject, predicate, :focus}, graph) do
if description = RDF.Graph.description(graph, subject) do
RDF.Description.get(description, predicate, [])
else
[]
end
end
defp do_resolve_triple_pattern({:focus, predicate, object}, graph) do
graph
|> Stream.map(fn
{subject, ^predicate, ^object} -> subject
_ -> nil
end)
|> post_process_query()
end
defp post_process_query(nodes) do
nodes
|> MapSet.new()
|> MapSet.delete(nil)
|> MapSet.to_list()
end
defimpl Enumerable do
def reduce(shape_map, acc, fun),
do: shape_map |> ShEx.ShapeMap.associations() |> Enumerable.reduce(acc, fun)
def member?(shape_map, association),
do: {:ok, association in ShEx.ShapeMap.associations(shape_map)}
def count(shape_map),
do: {:ok, shape_map |> ShEx.ShapeMap.associations() |> length()}
def slice(_shape_map), do: {:error, __MODULE__}
end
end
|
lib/shex/shape_map.ex
| 0.879961 | 0.849097 |
shape_map.ex
|
starcoder
|
defmodule Timex.DateFormat do
@moduledoc """
Date formatting and parsing.
This module provides an interface and core implementation for converting date
values into strings (formatting) or the other way around (parsing) according
to the specified template.
Multiple template formats are supported, each one provided by a separate
module. One can also implement custom formatters for use with this module.
"""
alias Timex.DateTime
alias Timex.DateFormat.Formatters.Formatter
alias Timex.DateFormat.Formatters.StrftimeFormatter
alias Timex.Parsers.DateFormat.Parser
alias Timex.Parsers.DateFormat.StrftimeParser
@doc """
Converts date values to strings according to the given template (aka format string).
"""
@spec format(%DateTime{}, String.t) :: {:ok, String.t} | {:error, String.t}
defdelegate format(%DateTime{} = date, format_string), to: Formatter
@doc """
Same as `format/2`, but takes a custom formatter.
"""
@spec format(%DateTime{}, String.t, atom) :: {:ok, String.t} | {:error, String.t}
def format(%DateTime{} = date, format_string, :default),
do: Formatter.format(date, format_string)
def format(%DateTime{} = date, format_string, :strftime),
do: Formatter.format(date, format_string, StrftimeFormatter)
defdelegate format(%DateTime{} = date, format_string, formatter), to: Formatter
@doc """
Raising version of `format/2`. Returns a string with formatted date or raises a `FormatError`.
"""
@spec format!(%DateTime{}, String.t) :: String.t | no_return
defdelegate format!(%DateTime{} = date, format_string), to: Formatter
@doc """
Raising version of `format/3`. Returns a string with formatted date or raises a `FormatError`.
"""
@spec format!(%DateTime{}, String.t, atom) :: String.t | no_return
def format!(%DateTime{} = date, format_string, :default),
do: Formatter.format!(date, format_string)
def format!(%DateTime{} = date, format_string, :strftime),
do: Formatter.format!(date, format_string, StrftimeFormatter)
defdelegate format!(%DateTime{} = date, format_string, formatter), to: Formatter
@doc """
Parses the date encoded in `string` according to the template.
"""
@spec parse(String.t, String.t) :: {:ok, %DateTime{}} | {:error, term}
defdelegate parse(date_string, format_string), to: Parser
@doc """
Parses the date encoded in `string` according to the template by using the
provided formatter.
"""
@spec parse(String.t, String.t, atom) :: {:ok, %DateTime{}} | {:error, term}
def parse(date_string, format_string, :default), do: Parser.parse(date_string, format_string)
def parse(date_string, format_string, :strftime), do: Parser.parse(date_string, format_string, StrftimeParser)
defdelegate parse(date_string, format_string, parser), to: Parser
@doc """
Raising version of `parse/2`. Returns a DateTime struct, or raises a `ParseError`.
"""
@spec parse!(String.t, String.t) :: %DateTime{} | no_return
defdelegate parse!(date_string, format_string), to: Parser
@doc """
Raising version of `parse/3`. Returns a DateTime struct, or raises a `ParseError`.
"""
@spec parse!(String.t, String.t, atom) :: %DateTime{} | no_return
def parse!(date_string, format_string, :default), do: Parser.parse!(date_string, format_string)
def parse!(date_string, format_string, :strftime), do: Parser.parse!(date_string, format_string, StrftimeParser)
defdelegate parse!(date_string, format_string, parser), to: Parser
@doc """
Verifies the validity of the given format string. The default formatter is assumed.
Returns `:ok` if the format string is clean, `{ :error, <reason> }` otherwise.
"""
@spec validate(String.t) :: :ok | {:error, term}
defdelegate validate(format_string), to: Formatter
@doc """
Verifies the validity of the given format string according to the provided
formatter.
Returns `:ok` if the format string is clean, `{ :error, <reason> }` otherwise.
"""
@spec validate(String.t, atom) :: :ok | {:error, term}
def validate(format_string, :default), do: Formatter.validate(format_string)
def validate(format_string, :strftime), do: Formatter.validate(format_string, StrftimeFormatter)
defdelegate validate(format_string, formatter), to: Formatter
end
|
lib/dateformat/dateformat.ex
| 0.921596 | 0.630287 |
dateformat.ex
|
starcoder
|
defmodule Re.Addresses.Neighborhoods do
@moduledoc """
Context for neighborhoods.
"""
import Ecto.Query
alias Re.{
Address,
Addresses.District,
Listing,
Repo,
Slugs
}
@all_query from(
a in Address,
join: l in Listing,
where: l.address_id == a.id and l.status == "active",
select: a.neighborhood,
distinct: a.neighborhood
)
def all, do: Repo.all(@all_query)
def get_description(address) do
case Repo.get_by(District,
state: address.state,
city: address.city,
name: address.neighborhood
) do
nil -> {:error, :not_found}
description -> {:ok, description}
end
end
def districts, do: Repo.all(from(d in District, where: d.status == "active"))
def get_district(params) do
case Repo.get_by(District, params) do
nil -> {:error, :not_found}
district -> {:ok, district}
end
end
@doc """
Temporary mapping to find nearby neighborhood
"""
def nearby("Botafogo"), do: "Humaitá"
def nearby("Copacabana"), do: "Ipanema"
def nearby("Flamengo"), do: "Laranjeiras"
def nearby("Gávea"), do: "Leblon"
def nearby("Humaitá"), do: "Botafogo"
def nearby("Ipanema"), do: "Copacabana"
def nearby("Itanhangá"), do: "São Conrado"
def nearby("Jardim Botânico"), do: "Lagoa"
def nearby("Lagoa"), do: "Humaitá"
def nearby("Laranjeiras"), do: "Flamengo"
def nearby("Leblon"), do: "Gávea"
def nearby("São Conrado"), do: "Itanhangá"
@covered_neighborhoods [
%{state: "RJ", neighborhood: "Humaitá", city: "Rio de Janeiro"},
%{state: "RJ", neighborhood: "Copacabana", city: "Rio de Janeiro"},
%{state: "RJ", neighborhood: "Botafogo", city: "Rio de Janeiro"},
%{state: "RJ", neighborhood: "Catete", city: "Rio de Janeiro"},
%{state: "RJ", neighborhood: "Cosme Velho", city: "Rio de Janeiro"},
%{state: "RJ", neighborhood: "Flamengo", city: "Rio de Janeiro"},
%{state: "RJ", neighborhood: "Gávea", city: "Rio de Janeiro"},
%{state: "RJ", neighborhood: "Ipanema", city: "Rio de Janeiro"},
%{
state: "RJ",
neighborhood: "Jardim Botânico",
city: "Rio de Janeiro"
},
%{state: "RJ", neighborhood: "Joá", city: "Rio de Janeiro"},
%{state: "RJ", neighborhood: "Lagoa", city: "Rio de Janeiro"},
%{state: "RJ", neighborhood: "Laranjeiras", city: "Rio de Janeiro"},
%{state: "RJ", neighborhood: "Leblon", city: "Rio de Janeiro"},
%{state: "RJ", neighborhood: "Leme", city: "Rio de Janeiro"},
%{state: "RJ", neighborhood: "São Conrado", city: "Rio de Janeiro"},
%{state: "RJ", neighborhood: "Urca", city: "Rio de Janeiro"},
%{state: "SP", neighborhood: "Perdizes", city: "São Paulo"},
%{state: "SP", neighborhood: "Vila Pompéia", city: "São Paulo"},
%{state: "SP", neighborhood: "Pompeia", city: "São Paulo"},
%{state: "SP", neighborhood: "Pinheiros", city: "São Paulo"},
%{state: "SP", neighborhood: "Sumaré", city: "São Paulo"},
%{state: "SP", neighborhood: "Sumarezinho", city: "São Paulo"},
%{state: "SP", neighborhood: "Vila Anglo Brasileira", city: "São Paulo"}
]
def is_covered(neighborhood) do
@covered_neighborhoods
|> sluggify_covered_neighborhoods()
|> MapSet.member?(sluggify_attributes(neighborhood))
end
defp sluggify_covered_neighborhoods(covered_neighborhoods) do
covered_neighborhoods
|> Enum.map(&sluggify_attributes(&1))
|> MapSet.new()
end
defp sluggify_attributes(neighborhoods) do
neighborhoods
|> Map.update!(:city, &Slugs.sluggify(&1))
|> Map.update!(:neighborhood, &Slugs.sluggify(&1))
|> Map.update!(:state, &Slugs.sluggify(&1))
end
end
|
apps/re/lib/addresses/neighborhoods.ex
| 0.675015 | 0.546617 |
neighborhoods.ex
|
starcoder
|
defmodule Definject.Check do
@moduledoc false
@uninjectable [:erlang, Kernel, Kernel.Utils]
def validate_deps(deps, {used_captures, used_mods}, {mod, name, arity}) do
outer_function = "#{mod}.#{name}/#{arity}"
used_captures = used_captures |> Enum.uniq()
used_mods = used_mods |> Enum.uniq()
strict = Map.get(deps, :strict, true)
deps = deps |> Map.drop([:strict])
if Application.get_env(:definject, :trace, false) do
IO.puts(
"Validating depedencies for #{deps |> Map.keys() |> inspect} against #{
{used_captures, used_mods} |> inspect
}"
)
end
for {key, value} <- deps do
with :ok <- validate_injectable(key),
:ok <- validate_used(key, {used_captures, used_mods}, strict: strict),
:ok <- validate_same_type(key, value),
:ok <- validate_same_arity(key, value) do
:ok
else
{:error, {:uninjectable_local, function}} ->
raise "Uninjectable local function #{function |> inspect}."
{:error, {:uninjectable_module, module}} ->
raise "Uninjectable module #{module |> inspect}. #{@uninjectable |> inspect} cannot be injected."
{:error, {:unused, key}} ->
raise "#{inspect(key)} is unused in #{outer_function}. Add `strict: false` to disable this."
{:error, :type_mismatch} ->
raise "Type mismatches between #{inspect(key)} and #{inspect(value)}."
{:error, :arity_mismatch} ->
raise "Function arity mismatches between #{inspect(key)} and #{inspect(value)}."
end
end
end
defp validate_injectable(capture) when is_function(capture) do
with :ok <- validate_type_is_external(capture) do
{:module, mod} = :erlang.fun_info(capture, :module)
validate_injectable(mod)
end
end
defp validate_injectable(mod) when is_atom(mod) do
if mod in @uninjectable do
{:error, {:uninjectable_module, mod}}
else
:ok
end
end
defp validate_type_is_external(capture) do
case :erlang.fun_info(capture, :type) do
{:type, :external} ->
:ok
{:type, :local} ->
{:error, {:uninjectable_local, capture}}
end
end
defp validate_used(_, _, strict: false) do
:ok
end
defp validate_used(key, {used_captures, used_mods}, strict: true) do
if key in (used_captures ++ used_mods) do
:ok
else
{:error, {:unused, key}}
end
end
defp validate_same_type(f1, f2) when is_function(f1) and is_function(f2), do: :ok
defp validate_same_type(m1, m2) when is_atom(m1) and is_atom(m2), do: :ok
defp validate_same_type(_, _), do: {:error, :type_mismatch}
defp validate_same_arity(m1, m2) when is_atom(m1) and is_atom(m2), do: :ok
defp validate_same_arity(f1, f2) when is_function(f1) and is_function(f2) do
{:arity, a1} = :erlang.fun_info(f1, :arity)
{:arity, a2} = :erlang.fun_info(f2, :arity)
if a1 == a2 do
:ok
else
{:error, :arity_mismatch}
end
end
end
|
lib/definject/check.ex
| 0.606032 | 0.42051 |
check.ex
|
starcoder
|
defmodule GGity.Geom.Bar do
@moduledoc false
alias GGity.{Draw, Geom, Plot}
@type t() :: %__MODULE__{}
@type record() :: map()
@type mapping() :: map()
defstruct data: nil,
mapping: nil,
stat: :count,
position: :stack,
key_glyph: :rect,
fill: "black",
alpha: 1,
bar_group_width: nil,
custom_attributes: nil
@spec new(mapping(), keyword()) :: Geom.Bar.t()
def new(mapping, options \\ []) do
struct(Geom.Bar, [{:mapping, mapping} | options])
end
@spec draw(Geom.Bar.t(), list(map()), Plot.t()) :: iolist()
def draw(%Geom.Bar{} = geom_bar, data, plot) do
number_of_levels = length(plot.scales.x.levels)
group_width = (plot.width - number_of_levels * (plot.scales.x.padding - 1)) / number_of_levels
geom_bar = struct(geom_bar, bar_group_width: group_width)
bars(geom_bar, data, plot)
end
defp bars(%Geom.Bar{} = geom_bar, data, plot) do
data
|> Enum.reject(fn row -> row[geom_bar.mapping[:y]] == 0 end)
|> Enum.group_by(fn row -> row[geom_bar.mapping[:x]] end)
|> Enum.with_index()
|> Enum.map(fn {{_x_value, group}, group_index} ->
bar_group(geom_bar, group, group_index, plot)
end)
end
defp bar_group(geom_bar, group_values, group_index, %Plot{scales: scales} = plot) do
scale_transforms =
geom_bar.mapping
|> Map.keys()
|> Enum.reduce(%{}, fn aesthetic, mapped ->
Map.put(mapped, aesthetic, Map.get(scales[aesthetic], :transform))
end)
transforms =
geom_bar
|> Map.take([:alpha, :fill])
|> Enum.reduce(%{}, fn {aesthetic, fixed_value}, fixed ->
Map.put(fixed, aesthetic, fn _value -> fixed_value end)
end)
|> Map.merge(scale_transforms)
count_rows = length(group_values)
sort_order =
case geom_bar.position do
:stack -> :desc
:dodge -> :asc
_unknown_adjustment -> :asc
end
group_values
|> Enum.sort_by(
fn row -> {row[geom_bar.mapping[:fill]], row[geom_bar.mapping[:alpha]]} end,
sort_order
)
|> Enum.reduce({0, 0, []}, fn row, {total_width, total_height, rects} ->
custom_attributes = GGity.Layer.custom_attributes(geom_bar, plot, row)
{
total_width + geom_bar.bar_group_width / count_rows,
total_height +
transforms.y.(row[geom_bar.mapping[:y]]) / plot.aspect_ratio,
[
Draw.rect(
[
x: position_adjust_x(geom_bar, row, group_index, total_width, plot),
y:
plot.area_padding + plot.width / plot.aspect_ratio -
position_adjust_y(geom_bar, row, total_height, plot),
width: position_adjust_bar_width(geom_bar, count_rows),
height: transforms.y.(row[geom_bar.mapping[:y]]) / plot.aspect_ratio,
fill: transforms.fill.(row[geom_bar.mapping[:fill]]),
fill_opacity: transforms.alpha.(row[geom_bar.mapping[:alpha]])
] ++
custom_attributes
)
| rects
]
}
end)
|> elem(2)
end
defp position_adjust_x(
%Geom.Bar{position: :stack} = geom_bar,
_row,
group_index,
_total_width,
plot
) do
plot.area_padding + group_index * (geom_bar.bar_group_width + plot.scales.x.padding)
end
defp position_adjust_x(
%Geom.Bar{position: :dodge} = geom_bar,
_row,
group_index,
total_width,
plot
) do
plot.area_padding + group_index * (geom_bar.bar_group_width + plot.scales.x.padding) +
total_width
end
defp position_adjust_y(%Geom.Bar{position: :stack} = geom_bar, row, total_height, plot) do
total_height + plot.scales.y.transform.(row[geom_bar.mapping[:y]]) / plot.aspect_ratio
end
defp position_adjust_y(%Geom.Bar{position: :dodge} = geom_bar, row, _total_height, plot) do
plot.scales.y.transform.(row[geom_bar.mapping[:y]]) / plot.aspect_ratio
end
defp position_adjust_bar_width(%Geom.Bar{position: :stack} = geom_bar, _count_rows) do
geom_bar.bar_group_width
end
defp position_adjust_bar_width(%Geom.Bar{position: :dodge} = geom_bar, count_rows) do
geom_bar.bar_group_width / count_rows
end
end
|
lib/ggity/geom/bar.ex
| 0.883964 | 0.579966 |
bar.ex
|
starcoder
|
defmodule Meeple.FogOfWar do
@moduledoc """
Fog of war, shows only the parts of the territory to the player, that are visible to him/her.
It also combines information from the territory with player specific informations.
state:
territory: module/pid of Territory
fog: visability
grid: combined information territory + player
"""
use Agent
alias Sim.Grid
alias Meeple.Territory
alias Meeple.Territory.One
alias Meeple.Territory.Test, as: TestTerritory
@full_visability 5
@only_vegetation 1
@terra_incognita 0
def start_link(args \\ []) do
Agent.start_link(
fn -> %{territory: args[:territory] || Territory, grid: nil, fog: nil} end,
name: args[:name] || __MODULE__
)
end
def create(name, pid \\ __MODULE__) do
Agent.get_and_update(pid, fn state ->
fog = create_fog(name)
grid = sync_grid(fog, state.territory)
{:ok, %{state | fog: fog, grid: grid}}
end)
end
def get(pid \\ __MODULE__) do
Agent.get(pid, &get_grid(&1))
end
def field(x, y, pid \\ __MODULE__) do
Agent.get(pid, &get_field(x, y, &1))
end
def update_grid(pid \\ __MODULE__) do
Agent.update(pid, fn %{territory: territory, fog: fog} = state ->
grid = sync_grid(fog, territory)
%{state | grid: grid}
end)
end
def update_field(x, y, pid \\ __MODULE__) do
Agent.get_and_update(pid, fn %{territory: territory, fog: fog, grid: grid} = state ->
visability = Grid.get(fog, x, y)
{field, grid} = update_field_from_territory(x, y, grid, visability, territory)
{field, %{state | grid: grid}}
end)
end
def discover(x, y, pid \\ __MODULE__) do
Agent.cast(pid, fn %{territory: territory, fog: fog, grid: grid} = state ->
fog = Grid.put(fog, x, y, @full_visability)
{_field, grid} = update_field_from_territory(x, y, grid, @full_visability, territory)
%{state | fog: fog, grid: grid}
end)
end
defp create_fog("test"), do: create_fog(TestTerritory, 3, 4)
defp create_fog("one"), do: create_fog(One, 15, 7)
defp create_fog(module, width, height) when is_atom(module) do
Grid.create(width, height, &module.create_fog/2)
end
defp get_grid(%{grid: nil}), do: raise("grid has not yet been created")
defp get_grid(%{grid: grid}), do: Grid.map(grid)
defp get_field(_x, _y, %{grid: nil}), do: raise("grid has not yet been created")
defp get_field(x, y, %{grid: grid}), do: Grid.get(grid, x, y)
defp sync_grid(fog, territory) do
Grid.create(Grid.width(fog), Grid.height(fog), fn x, y ->
fetch_field_from_territory(x, y, Grid.get(fog, x, y), territory)
end)
end
defp update_field_from_territory(x, y, grid, visability, territory) do
field = fetch_field_from_territory(x, y, visability, territory)
grid = Grid.put(grid, x, y, field)
{field, grid}
end
defp fetch_field_from_territory(x, y, visability, territory) do
get_field_from_territory(x, y, visability, territory)
|> Map.merge(%{visability: visability})
end
defp get_field_from_territory(x, y, visability, territory) do
case visability do
@full_visability ->
Territory.field(x, y, territory)
@only_vegetation ->
Territory.field(x, y, territory) |> extract_attributes([:pawns, :vegetation])
@terra_incognita ->
Territory.field(x, y, territory) |> extract_attributes([:pawns])
end
end
def extract_attributes(field, attributes) do
Enum.reduce(attributes, %{}, fn attr, f ->
if Map.has_key?(field, attr) do
Map.put(f, attr, field[attr])
else
f
end
end)
end
end
|
apps/meeple/lib/meeple/board/fog_of_war.ex
| 0.633183 | 0.63696 |
fog_of_war.ex
|
starcoder
|
defmodule DOMSegServer.Datasets do
@moduledoc """
The Datasets context.
"""
import Ecto.Query, warn: false
import DOMSegServer.Guards
alias DOMSegServer.Repo
alias DOMSegServer.Datasets.Dataset
@doc """
Returns the list of datasets.
## Examples
iex> list_datasets()
[%Dataset{}, ...]
"""
def list_datasets do
Repo.all(Dataset)
end
@doc """
Gets a single dataset.
Raises `Ecto.NoResultsError` if the Dataset does not exist.
## Examples
iex> get_dataset!(123)
%Dataset{}
iex> get_dataset!(456)
** (Ecto.NoResultsError)
"""
def get_dataset!(id) when is_uuid(id), do: Repo.get!(Dataset, id)
def get_dataset!(_id), do: nil
def get_dataset(id) when is_uuid(id), do: Repo.get(Dataset, id)
def get_dataset(_id), do: nil
@doc """
Creates a dataset.
## Examples
iex> create_dataset(%{field: value})
{:ok, %Dataset{}}
iex> create_dataset(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_dataset(attrs \\ %{}) do
%Dataset{}
|> Dataset.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a dataset.
## Examples
iex> update_dataset(dataset, %{field: new_value})
{:ok, %Dataset{}}
iex> update_dataset(dataset, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_dataset(%Dataset{} = dataset, attrs) do
dataset
|> Dataset.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a dataset.
## Examples
iex> delete_dataset(dataset)
{:ok, %Dataset{}}
iex> delete_dataset(dataset)
{:error, %Ecto.Changeset{}}
"""
def delete_dataset(%Dataset{} = dataset) do
Repo.delete(dataset)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking dataset changes.
## Examples
iex> change_dataset(dataset)
%Ecto.Changeset{data: %Dataset{}}
"""
def change_dataset(%Dataset{} = dataset, attrs \\ %{}) do
Dataset.changeset(dataset, attrs)
end
@doc """
Extract sample stats for the given dataset ID
"""
def get_dataset_stats(id) do
ds_query = from s in DOMSegServer.Samples.Sample, where: s.dataset_id == ^id
num_samples = Repo.aggregate(ds_query, :count)
query = from s in ds_query, distinct: true, select: s.url
num_urls = Repo.aggregate(query, :count)
query = from s in ds_query, distinct: true, select: s.user_id
num_users = Repo.aggregate(query, :count)
query = from s in ds_query, select: fragment("avg(length(?))", s.html)
avg_html_len = Repo.one(query)
%{
num_urls: num_urls,
num_users: num_users,
num_samples: num_samples,
avg_html_len: avg_html_len
}
end
end
|
server/lib/domsegserver/datasets.ex
| 0.841256 | 0.490419 |
datasets.ex
|
starcoder
|
defmodule ShEx.EachOf do
@moduledoc false
defstruct [
# tripleExprLabel?
:id,
# [tripleExpr{2,}]
:expressions,
# INTEGER?
:min,
# INTEGER?
:max,
# [SemAct+]?
:sem_acts,
# [Annotation+]?
:annotations
]
import ShEx.TripleExpression.Shared
def matches(each_of, triples, graph, schema, association, state) do
with {matched, remainder, match_count, violations} <-
find_matches(triples, each_of, graph, schema, association, state),
:ok <-
check_cardinality(
match_count,
ShEx.TripleExpression.min_cardinality(each_of),
each_of,
violations
) do
{:ok, matched, remainder}
else
violation ->
{:error, violation}
end
end
defp find_matches(triples, each_of, graph, schema, association, state) do
do_find_matches(
{:ok, [], triples, 0, []},
each_of.expressions,
ShEx.TripleExpression.max_cardinality(each_of),
graph,
schema,
association,
state
)
end
defp do_find_matches({:ok, matched, remainder, max, violations}, _, max, _, _, _, _),
do: {matched, remainder, max, violations}
defp do_find_matches(
{:ok, matched, remainder, match_count, violations},
expressions,
max,
graph,
schema,
association,
state
) do
expressions
|> Enum.reduce_while({:ok, matched, remainder, match_count + 1, violations}, fn
expression, {:ok, matched, remainder, match_count, violations} ->
ShEx.TripleExpression.matches(expression, remainder, graph, schema, association, state)
|> case do
{:ok, new_matched, new_remainder} ->
{:cont, {:ok, new_matched, new_remainder, match_count, violations}}
{:error, violation} ->
{:halt, {matched, remainder, match_count - 1, violations ++ List.wrap(violation)}}
end
end)
|> do_find_matches(expressions, max, graph, schema, association, state)
end
defp do_find_matches(acc, _, _, _, _, _, _), do: acc
defimpl ShEx.TripleExpression do
def matches(each_of, triples, graph, schema, association, state) do
ShEx.EachOf.matches(each_of, triples, graph, schema, association, state)
end
def min_cardinality(each_of), do: ShEx.TripleExpression.Shared.min_cardinality(each_of)
def max_cardinality(each_of), do: ShEx.TripleExpression.Shared.max_cardinality(each_of)
def predicates(each_of, state),
do: ShEx.TripleExpression.Shared.predicates_of_group(each_of, state)
def triple_constraints(each_of, state),
do: ShEx.TripleExpression.Shared.triple_constraints_of_group(each_of, state)
def required_arcs(each_of, state),
do: ShEx.TripleExpression.Shared.required_arcs_of_group(each_of, state)
end
defimpl ShEx.Operator do
def children(each_of) do
Enum.map(each_of.expressions, fn expression ->
if RDF.term?(expression) do
{:triple_expression_label, expression}
else
expression
end
end)
end
def triple_expression_label_and_operands(each_of),
do: {each_of.id, each_of.expressions}
end
end
|
lib/shex/shape_expressions/each_of.ex
| 0.579638 | 0.453141 |
each_of.ex
|
starcoder
|
defmodule Dogma.Reporter.JSON do
@moduledoc """
A machine readable format in JSON.
The JSON structure is like the following example:
{
"metadata": {
"dogma_version": "0.3.0",
"elixir_version": "1.0.5",
"erlang_version": "Erlang/OTP 10 [erts-7.0.3] [64-bit]",
"system_architecture": "x86_64-apple-darwin14.5.0"
},
"files": [{
"path": "lib/foo.ex",
"errors": []
}, {
"path": "lib/bar.ex",
"errors": [{
"line": 1,
"rule": "ModuleDoc",
"message": "Module without @moduledoc detected"
}, {
"line": 14,
"rule": "ComparisonToBoolean",
"message": "Comparison to a boolean is useless"
}
]
}],
"summary": {
"error_count": 2,
"inspected_file_count": 2
}
}
"""
use GenEvent
def handle_event({:finished, scripts}, _) do
IO.write finish(scripts)
{:ok, []}
end
def handle_event(_, _), do: {:ok, []}
@doc """
Runs at the end of the test suite, printing json.
"""
def finish(scripts) do
%{
metadata: metadata(),
files: Enum.map(scripts, &format/1),
summary: summary(scripts)
} |> Poison.encode!
end
defp metadata do
erl_version = :system_version
|> :erlang.system_info
|> to_string
architecture = :system_architecture
|> :erlang.system_info
|> to_string
%{
dogma_version: Dogma.version,
elixir_version: System.version,
erlang_version: erl_version,
system_architecture: architecture
}
end
defp format(script) do
%{
path: script.path,
errors: Enum.map(script.errors, &format_error/1)
}
end
defp format_error(error) do
%{
line: error.line,
rule: printable_name(error.rule),
message: error.message
}
end
defp printable_name(module) do
module
|> Module.split
|> List.last
end
defp summary(scripts) do
%{
offense_count: count_errors(scripts),
inspected_file_count: length(scripts)
}
end
defp count_errors(scripts) do
scripts
|> Enum.map(&(&1.errors))
|> List.flatten
|> length
end
end
|
lib/dogma/reporter/json.ex
| 0.746139 | 0.445168 |
json.ex
|
starcoder
|
defmodule Penelope.NLP.POSTagger do
@moduledoc """
The part-of-speech tagger transforms a tokenized sentence into a list of
`{token, pos_tag}` tuples. The tagger takes no responsibility for
tokenization; this means that callers must be careful to maintain the same
tokenization scheme between training and evaluating to ensure the best
results.
As this tagger does not ship with a pretrained model, it is both
language- and tagset-agnostic, though the default feature set used
(see `POSFeaturizer`) was designed for English.
See `POSTaggerTrainer.train/2` for an example
of how to train a new POS tagger model.
"""
alias Penelope.ML.Pipeline
@type model :: %{pos_tagger: [{atom, any}]}
@doc """
Fits the tagger model. Custom featurizers may be supplied.
"""
@spec fit(
context :: map,
x :: [tokens :: [String.t()]],
y :: [tags :: [String.t()]],
featurizers :: [{atom | String.t(), [any]}]
) :: model
def fit(context, x, y, featurizers \\ [{:pos_featurizer, []}]) do
pipeline = featurizers ++ [{:crf_tagger, []}]
%{pos_tagger: Pipeline.fit(context, x, y, pipeline)}
end
@doc """
Attaches part of speech tags to a list of tokens.
Example:
```
iex> POSTagger.tag(model, %{}, ["Judy", "saw", "her"])
[{"Judy", "NNP"}, {"saw", "VBD"}, {"her", "PRP$"}]
```
"""
@spec tag(model :: model, context :: map, tokens :: [String.t()]) :: [
{String.t(), String.t()}
]
def tag(model, context, tokens) do
[{tags, _probability}] =
Pipeline.predict_sequence(model.pos_tagger, context, [tokens])
Enum.zip(tokens, tags)
end
@doc """
Imports parameters from a serialized model.
"""
@spec compile(params :: map) :: model
def compile(params),
do: %{pos_tagger: Pipeline.compile(params["pos_tagger"])}
@doc """
Exports a runtime model to a serializable data structure.
"""
@spec export(model :: model) :: map
def export(model),
do: %{"pos_tagger" => Pipeline.export(model.pos_tagger)}
end
|
lib/penelope/nlp/pos_tagger.ex
| 0.925626 | 0.766468 |
pos_tagger.ex
|
starcoder
|
defmodule Projare.Project do
use Projare.Web, :model
@url_regex ~r"^(https?://)?([\da-z\.-]+)\.([a-z\.]{2,6})([/\w \.-]*)*/?$"
schema "projects" do
field :url, :string
field :title, :string
field :description, :string
field :stars_count, :integer, default: 0
field :comments_count, :integer, default: 0
field :starred, :boolean, virtual: true, default: false
field :commented, :boolean, virtual: true, default: false
belongs_to :author, Projare.User
belongs_to :category, Projare.Category
timestamps inserted_at: :created_at, updated_at: false
end
@required_fields ~w(url title description category_id)
@optional_fields ~w()
def changeset(model, params \\ :empty) do
model
|> cast(params, @required_fields, @optional_fields)
|> validate_format(:url, @url_regex)
|> validate_length(:title, max: 100)
|> validate_length(:description, min: 10)
|> assoc_constraint(:category)
end
def star_changeset(model, params \\ :empty) do
model
|> cast(params, ~w(stars_count), [])
|> validate_number(:stars_count, greater_than_or_equal_to: 0)
end
def comment_changeset(model, params \\ :empty) do
model
|> cast(params, ~w(comments_count), [])
|> validate_number(:comments_count, greater_than_or_equal_to: 0)
end
def for_params(query, params) when is_map(params) do
query
|> with_word(params["q"])
|> reverse_order(params["reversed"])
|> ranking_order(params["ranking"])
|> for_author(params["author_id"])
|> for_category(params["category_name"])
|> with_preloads
end
def for_category(query, name) when is_binary(name) do
from p in query,
join: c in assoc(p, :category),
where: c.normalized_name == ^name
end
def for_category(query, _), do: query
def for_author(query, id) when is_binary(id) or is_integer(id) do
from p in query, where: p.author_id == ^id
end
def for_author(query, _), do: query
def with_preloads(query) do
from p in query, preload: [:author, :category]
end
def with_word(query, nil), do: query
def with_word(query, word) do
from p in query,
where: ilike(p.title, ^("%#{word}%")) or ilike(p.description, ^("%#{word}%"))
end
def ranking_order(query, ranking) when ranking == true or ranking == "true",
do: from p in query, order_by: [desc: p.stars_count, desc: p.comments_count]
def ranking_order(query, _), do: query
def reverse_order(query, reversed) when reversed == true or reversed == "true",
do: from p in query, order_by: [desc: p.created_at]
def reverse_order(query, _), do: query
end
|
web/models/project.ex
| 0.54698 | 0.433202 |
project.ex
|
starcoder
|
defmodule Membrane.MP4.Muxer.CMAF do
@moduledoc """
Puts payloaded stream into [Common Media Application Format](https://www.wowza.com/blog/what-is-cmaf),
an MP4-based container commonly used in adaptive streaming over HTTP.
Multiple input streams are supported. If that is the case, they will be muxed into a single CMAF Track.
Given that all input streams need to have a keyframe at the beginning of each CMAF Segment, it is recommended
that all input streams are renditions of the same content.
If a stream contains non-key frames (like H264 P or B frames), they should be marked
with a `mp4_payload: %{key_frame?: false}` metadata entry.
"""
use Membrane.Filter
require Membrane.Logger
alias __MODULE__.{Header, Segment}
alias Membrane.{Buffer, Time}
alias Membrane.MP4.Payload.{AAC, AVC1}
alias Membrane.MP4.{Helper, Track}
def_input_pad :input,
availability: :on_request,
demand_unit: :buffers,
caps: Membrane.MP4.Payload
def_output_pad :output, caps: Membrane.CMAF.Track
def_options segment_duration: [
type: :time,
spec: Membrane.Time.t(),
default: 2 |> Time.seconds()
]
@impl true
def handle_init(options) do
state =
options
|> Map.from_struct()
|> Map.merge(%{
seq_num: 0,
# Caps waiting to be sent after receiving the next buffer. Holds the structure {caps_timestamp, caps}
awaiting_caps: nil,
pad_to_track_data: %{},
# ID for the next input track
next_track_id: 1,
samples: %{}
})
{:ok, state}
end
@impl true
def handle_pad_added(_pad, ctx, _state) when ctx.playback_state == :playing,
do:
raise(
"New tracks can be added to #{inspect(__MODULE__)} only before transition to state: :playing"
)
@impl true
def handle_pad_added(Pad.ref(:input, _id) = pad, _ctx, state) do
{track_id, state} = Map.get_and_update!(state, :next_track_id, &{&1, &1 + 1})
track_data = %{
id: track_id,
track: nil,
elapsed_time: 0,
end_timestamp: 0,
buffer_awaiting_duration: nil
}
state
|> put_in([:pad_to_track_data, pad], track_data)
|> put_in([:samples, pad], [])
|> then(&{:ok, &1})
end
@impl true
def handle_demand(:output, _size, _unit, _ctx, state) do
{pad, _elapsed_time} =
state.pad_to_track_data
|> Enum.map(fn {pad, track_data} -> {pad, track_data.end_timestamp} end)
|> Enum.reject(fn {_key, timestamp} -> is_nil(timestamp) end)
|> Enum.min_by(fn {_key, timestamp} -> Ratio.to_float(timestamp) end)
{{:ok, demand: {pad, 1}}, state}
end
@impl true
def handle_caps(pad, %Membrane.MP4.Payload{} = caps, ctx, state) do
state =
update_in(state, [:pad_to_track_data, pad], fn track_data ->
track =
caps
|> Map.from_struct()
|> Map.take([:width, :height, :content, :timescale])
|> Map.put(:id, track_data.id)
|> Track.new()
%{track_data | track: track}
end)
has_all_input_caps? =
Map.drop(ctx.pads, [:output, pad]) |> Map.values() |> Enum.all?(&(&1.caps != nil))
if has_all_input_caps? do
caps = generate_output_caps(state)
cond do
is_nil(ctx.pads.output.caps) ->
{{:ok, caps: {:output, caps}}, state}
caps != ctx.pads.output.caps ->
{:ok, %{state | awaiting_caps: {{:update_with_next, pad}, caps}}}
true ->
{:ok, state}
end
else
{:ok, state}
end
end
@impl true
def handle_process(Pad.ref(:input, _id) = pad, sample, ctx, state) do
use Ratio, comparison: true
state =
state
|> process_buffer_awaiting_duration(pad, sample)
|> update_awaiting_caps(pad)
{caps_action, segment} =
if is_nil(state.awaiting_caps) do
{[], Segment.Helper.get_segment(state, state.segment_duration)}
else
{duration, caps} = state.awaiting_caps
{[caps: {:output, caps}], Segment.Helper.get_discontinuity_segment(state, duration)}
end
case segment do
{:ok, segment, state} ->
{buffer, state} = generate_segment(segment, ctx, state)
actions = [buffer: {:output, buffer}] ++ caps_action ++ [redemand: :output]
{{:ok, actions}, state}
{:error, :not_enough_data} ->
{{:ok, redemand: :output}, state}
end
end
@impl true
def handle_end_of_stream(Pad.ref(:input, _track_id) = pad, ctx, state) do
sample = state.pad_to_track_data[pad].buffer_awaiting_duration
sample_metadata =
Map.put(sample.metadata, :duration, hd(state.samples[pad]).metadata.duration)
sample = %Buffer{sample | metadata: sample_metadata}
state = update_in(state, [:samples, pad], &[sample | &1])
processing_finished? =
ctx.pads |> Map.drop([:output, pad]) |> Map.values() |> Enum.all?(& &1.end_of_stream?)
if processing_finished? do
with {:ok, segment, state} <- Segment.Helper.take_all_samples(state) do
{buffer, state} = generate_segment(segment, ctx, state)
{{:ok, buffer: {:output, buffer}, end_of_stream: :output}, state}
else
{:error, :not_enough_data} -> {{:ok, end_of_stream: :output}, state}
end
else
state = put_in(state, [:pad_to_track_data, pad, :end_timestamp], nil)
{{:ok, redemand: :output}, state}
end
end
defp generate_output_caps(state) do
tracks = Enum.map(state.pad_to_track_data, fn {_pad, track_data} -> track_data.track end)
header = Header.serialize(tracks)
content_type =
tracks
|> Enum.map(fn
%{content: %AAC{}} -> :audio
%{content: %AVC1{}} -> :video
end)
|> then(fn
[item] -> item
list -> list
end)
%Membrane.CMAF.Track{
content_type: content_type,
header: header
}
end
defp generate_segment(acc, ctx, state) do
use Ratio, comparison: true
tracks_data =
Enum.map(acc, fn {pad, samples} ->
%{timescale: timescale} = ctx.pads[pad].caps
first_sample = hd(samples)
last_sample = List.last(samples)
samples = Enum.to_list(samples)
samples_table =
samples
|> Enum.map(fn sample ->
%{
sample_size: byte_size(sample.payload),
sample_flags: generate_sample_flags(sample.metadata),
sample_duration:
Helper.timescalify(
sample.metadata.duration,
timescale
)
|> Ratio.trunc()
}
end)
samples_data = Enum.map_join(samples, & &1.payload)
duration = last_sample.dts - first_sample.dts + last_sample.metadata.duration
%{
pad: pad,
id: state.pad_to_track_data[pad].id,
sequence_number: state.seq_num,
elapsed_time:
Helper.timescalify(state.pad_to_track_data[pad].elapsed_time, timescale)
|> Ratio.trunc(),
unscaled_duration: duration,
duration: Helper.timescalify(duration, timescale),
timescale: timescale,
samples_table: samples_table,
samples_data: samples_data
}
end)
payload = Segment.serialize(tracks_data)
# Duration of the tracks will never be exactly the same. To minimize the error and avoid its magnification over time,
# duration of the segment is assumed to be the average of tracks' durations.
duration =
tracks_data
|> Enum.map(&Ratio.to_float(&1.unscaled_duration))
|> then(&(Enum.sum(&1) / length(&1)))
|> floor()
buffer = %Buffer{payload: payload, metadata: %{duration: duration}}
# Update elapsed time counters for each track
state =
Enum.reduce(tracks_data, state, fn %{unscaled_duration: duration, pad: pad}, state ->
update_in(state, [:pad_to_track_data, pad, :elapsed_time], &(&1 + duration))
end)
|> Map.update!(:seq_num, &(&1 + 1))
{buffer, state}
end
defp generate_sample_flags(metadata) do
key_frame? = metadata |> Map.get(:mp4_payload, %{}) |> Map.get(:key_frame?, true)
is_leading = 0
depends_on = if key_frame?, do: 2, else: 1
is_depended_on = 0
has_redundancy = 0
padding_value = 0
non_sync = if key_frame?, do: 0, else: 1
degradation_priority = 0
<<0::4, is_leading::2, depends_on::2, is_depended_on::2, has_redundancy::2, padding_value::3,
non_sync::1, degradation_priority::16>>
end
# Update the duration of the awaiting sample and insert the current sample into the queue
defp process_buffer_awaiting_duration(state, pad, sample) do
use Ratio
prev_sample = state.pad_to_track_data[pad].buffer_awaiting_duration
if is_nil(prev_sample) do
put_in(state, [:pad_to_track_data, pad, :buffer_awaiting_duration], sample)
else
duration = Ratio.to_float(sample.dts - prev_sample.dts)
prev_sample_metadata = Map.put(prev_sample.metadata, :duration, duration)
prev_sample = %Buffer{prev_sample | metadata: prev_sample_metadata}
put_in(state, [:pad_to_track_data, pad, :end_timestamp], prev_sample.dts)
|> put_in([:pad_to_track_data, pad, :buffer_awaiting_duration], sample)
|> update_in([:samples, pad], &[prev_sample | &1])
end
end
# It is not possible to determine the duration of the segment that is connected with discontinuity before receiving the next sample.
# This function acts to update the information about the duration of the discontinuity segment that needs to be produced
defp update_awaiting_caps(%{awaiting_caps: {{:update_with_next, pad}, caps}} = state, pad) do
use Ratio
duration =
state.pad_to_track_data[pad].buffer_awaiting_duration.dts -
List.last(state.samples[pad]).dts
%{state | awaiting_caps: {duration, caps}}
end
defp update_awaiting_caps(state, _pad), do: state
end
|
lib/membrane_mp4/muxer/cmaf.ex
| 0.876317 | 0.443359 |
cmaf.ex
|
starcoder
|
defmodule Imagism.Image do
@moduledoc """
A loaded image that can be processed and encoded.
"""
@type t() :: %Imagism.Image{}
defstruct resource: nil,
reference: nil
@doc """
Wraps an image returned from the NIF with a reference.
"""
@spec wrap_resource(any) :: Imagism.Image.t()
def wrap_resource(resource) do
%__MODULE__{
resource: resource,
reference: make_ref()
}
end
@doc """
Opens an image at a specific file `path`.
"""
@spec open(binary) :: {:error, any} | {:ok, Imagism.Image.t()}
def open(path) when is_binary(path) do
case Imagism.Native.open(path) do
{:ok, res} -> {:ok, Imagism.Image.wrap_resource(res)}
err -> err
end
end
@doc """
Decodes an image from `bits`. It will guess the image's file format
or default to JPEG.
"""
@spec decode(bitstring()) :: {:error, any} | {:ok, Imagism.Image.t()}
def decode(bits) when is_bitstring(bits) do
case Imagism.Native.decode(bits) do
{:ok, res} -> {:ok, Imagism.Image.wrap_resource(res)}
err -> err
end
end
@doc """
Returns the MIME type of an `image`.
"""
@spec content_type(Imagism.Image.t()) :: String.t()
def content_type(image) do
Imagism.Native.content_type(image.resource)
end
@doc """
Returns the dimensions of an `image`.
"""
@spec dimensions(Imagism.Image.t()) :: {integer(), integer()}
def dimensions(image) do
Imagism.Native.dimensions(image.resource)
end
@doc """
Encodes an `image` to a binary or returns an error explaining
what went wrong.
"""
@spec encode(Imagism.Image.t()) :: {:err, any} | {:ok, binary}
def encode(image) do
Imagism.Native.encode(image.resource)
end
@doc """
Brightens an `image` by a multiplier `value`.
If the value is negative, the image will be darkened instead.
"""
@spec brighten(Imagism.Image.t(), integer) :: Imagism.Image.t()
def brighten(image, value) when is_integer(value) do
Imagism.Image.wrap_resource(Imagism.Native.brighten(image.resource, value))
end
@doc """
Adjusts the contrast of `image` by a constant `value`.
If the value is negative, the contrast will be decreased.
"""
@spec contrast(Imagism.Image.t(), float) :: Imagism.Image.t()
def contrast(image, value) when is_float(value) do
Imagism.Image.wrap_resource(Imagism.Native.contrast(image.resource, value))
end
@doc """
Blur an `image` by `sigma`.
The larger the `sigma`, the longer this operation will take.
"""
@spec blur(Imagism.Image.t(), float) :: Imagism.Image.t()
def blur(image, sigma) when is_float(sigma) do
Imagism.Image.wrap_resource(Imagism.Native.blur(image.resource, sigma))
end
@doc """
Flips an `image` vertically.
"""
@spec flipv(Imagism.Image.t()) :: Imagism.Image.t()
def flipv(image) do
Imagism.Image.wrap_resource(Imagism.Native.flipv(image.resource))
end
@doc """
Flips an `image` horizontally.
"""
@spec fliph(Imagism.Image.t()) :: Imagism.Image.t()
def fliph(image) do
Imagism.Image.wrap_resource(Imagism.Native.fliph(image.resource))
end
@doc """
Resize an `image` to an exact `{w, h}` dimension.
"""
@spec resize(Imagism.Image.t(), integer, integer) :: Imagism.Image.t()
def resize(image, w, h) when is_integer(w) and is_integer(h) do
Imagism.Image.wrap_resource(Imagism.Native.resize(image.resource, w, h))
end
@doc """
Crop an `image` at a position `{x, y}` to a specific `{w, h}`.
"""
@spec crop(Imagism.Image.t(), integer, integer, integer, integer) :: Imagism.Image.t()
def crop(image, x, y, w, h)
when is_integer(x) and is_integer(y) and is_integer(w) and is_integer(h) do
Imagism.Image.wrap_resource(Imagism.Native.crop(image.resource, x, y, w, h))
end
@doc """
Rotates an `image` by an amount of `rotation` in degrees.
Only a 90, 180 or 270 degree rotation is supported.
Anything else won't change the image.
"""
@spec rotate(Imagism.Image.t(), integer) :: Imagism.Image.t()
def rotate(image, rotation) when is_integer(rotation) do
Imagism.Image.wrap_resource(Imagism.Native.rotate(image.resource, rotation))
end
end
defimpl Inspect, for: Imagism.Image do
import Inspect.Algebra
def inspect(dict, opts) do
concat(["#Imagism.Image<", to_doc(dict.reference, opts), ">"])
end
end
|
lib/imagism/image.ex
| 0.907374 | 0.429968 |
image.ex
|
starcoder
|
defmodule ElixirRigidPhysics.Geometry.Triangle do
@moduledoc """
Module for handling queries related to planar 3D triangles
"""
alias Graphmath.Vec3
require Record
Record.defrecord(:triangle, a: {0.0, 0.0, 0.0}, b: {0.0, 1.0, 0.0}, c: {0.0, 0.0, 1.0})
@type triangle :: record(:triangle, a: Vec3.vec3(), b: Vec3.vec3(), c: Vec3.vec3())
require ElixirRigidPhysics.Geometry.Plane, as: Plane
@doc """
Creates a triangle given three points.
## Examples
iex> # IO.puts "Test basic triangle creation from points"
iex> require ElixirRigidPhysics.Geometry.Triangle, as: Triangle
iex> a = {1.0, 0.0, 1.0}
iex> b = {0.0, 0.0, 0.0}
iex> c = {3.0, 0.0, 1.0}
iex> Triangle.create_from_points( a, b, c )
{:triangle, {1.0, 0.0, 1.0}, {0.0, 0.0, 0.0}, {3.0, 0.0, 1.0}}
"""
@spec create_from_points(Vec3.vec3(), Vec3.vec3(), Vec3.vec3()) :: triangle
def create_from_points(a, b, c) do
triangle(a: a, b: b, c: c)
end
@doc """
Creates plane from triangle.
## Examples
iex> # IO.puts "Test plane creation from triangle"
iex> require ElixirRigidPhysics.Geometry.Triangle, as: Triangle
iex> a = {0.0, 0.0, 0.0}
iex> b = {1.0, 0.0, 0.0}
iex> c = {0.0, 0.0, 1.0}
iex> t= Triangle.create_from_points( a, b, c )
iex> Triangle.to_plane( t )
{:plane, 0.0, -1.0, 0.0, 0.0}
iex> # IO.puts "Test plane creation from scaled triangle"
iex> require ElixirRigidPhysics.Geometry.Triangle, as: Triangle
iex> a = {0.0, 0.0, 0.0}
iex> b = {2.0, 0.0, 0.0}
iex> c = {0.0, 0.0, 2.0}
iex> t= Triangle.create_from_points( a, b, c )
iex> Triangle.to_plane( t )
{:plane, 0.0, -1.0, 0.0, 0.0}
iex> # IO.puts "Test plane creation from flipped triangle"
iex> require ElixirRigidPhysics.Geometry.Triangle, as: Triangle
iex> a = {0.0, 0.0, 0.0}
iex> b = {1.0, 0.0, 0.0}
iex> c = {0.0, 0.0, 1.0}
iex> t= Triangle.create_from_points( a, c, b )
iex> Triangle.to_plane( t )
{:plane, 0.0, 1.0, 0.0, 0.0}
iex> # IO.puts "Test plane creation from 3D triangle"
iex> require ElixirRigidPhysics.Geometry.Triangle, as: Triangle
iex> a = {0.0, 2.0, 0.0}
iex> b = {1.0, 2.0, 0.0}
iex> c = {0.0, 2.0, 1.0}
iex> t= Triangle.create_from_points( a, b, c )
iex> Triangle.to_plane( t )
{:plane, 0.0, -1.0, 0.0, 2.0}
iex> # IO.puts "Test plane creation from triangle"
iex> require ElixirRigidPhysics.Geometry.Triangle, as: Triangle
iex> a = {0.0, 0.0, 1.0}
iex> b = {0.0, 1.0, 0.0}
iex> c = {1.0, 0.0, 0.0}
iex> sqrt_3_over_3 = :math.sqrt(3)/3.0
iex> t= Triangle.create_from_points( a, b, c )
iex> {:plane, pa, pb, pc, pd} = Triangle.to_plane( t )
iex> Graphmath.Vec3.equal({pa,pb,pc},{-sqrt_3_over_3,-sqrt_3_over_3,-sqrt_3_over_3}, 0.000005)
true
iex> Float.round(pd - sqrt_3_over_3) == 0.0
true
"""
@spec to_plane(triangle) :: Plane.plane()
def to_plane(triangle(a: a, b: b, c: c)) do
ab = Vec3.subtract(b, a)
ac = Vec3.subtract(c, a)
n = ab
|> Vec3.cross(ac)
|> Vec3.normalize()
Plane.create(n, a)
end
@doc """
Converts a point in a triangle in barycentric coordinates into cartesian coordinates.
## Examples
iex> # IO.puts "Check from_barycentric for a"
iex> require ElixirRigidPhysics.Geometry.Triangle, as: Triangle
iex> a = {0.0, 0.0, 1.0}
iex> b = {0.0, 1.0, 0.0}
iex> c = {1.0, 0.0, 0.0}
iex> t= Triangle.create_from_points( a, b, c )
iex> Triangle.from_barycentric(t, {1.0, 0.0, 0.0})
{0.0, 0.0, 1.0}
iex> # IO.puts "Check from_barycentric for b"
iex> require ElixirRigidPhysics.Geometry.Triangle, as: Triangle
iex> a = {0.0, 0.0, 1.0}
iex> b = {0.0, 1.0, 0.0}
iex> c = {1.0, 0.0, 0.0}
iex> t= Triangle.create_from_points( a, b, c )
iex> Triangle.from_barycentric(t, {0.0, 1.0, 0.0})
{0.0, 1.0, 0.0}
iex> # IO.puts "Check from_barycentric for a"
iex> require ElixirRigidPhysics.Geometry.Triangle, as: Triangle
iex> a = {0.0, 0.0, 1.0}
iex> b = {0.0, 1.0, 0.0}
iex> c = {1.0, 0.0, 0.0}
iex> t= Triangle.create_from_points( a, b, c )
iex> Triangle.from_barycentric(t, {0.0, 0.0, 1.0})
{1.0, 0.0, 0.0}
"""
@spec from_barycentric(triangle, {number, number, number}) :: Vec3.vec3()
def from_barycentric(triangle(a: a, b: b, c: c), {u, v, w}) do
u
|> Vec3.weighted_sum(a, v, b)
|> Vec3.add(Vec3.scale(c, w))
end
@doc """
Gets the baryncetric coordinates of a point `q` in the space of a triangle `t`.
Note that the point must be coplanar with the triangle for this to reliably make sense.
## Examples
iex> # IO.puts "Check to_barycentric for a"
iex> require ElixirRigidPhysics.Geometry.Triangle, as: Triangle
iex> a = {0.0, 0.0, 1.0}
iex> b = {0.0, 1.0, 0.0}
iex> c = {1.0, 0.0, 0.0}
iex> t= Triangle.create_from_points( a, b, c )
iex> Triangle.to_barycentric(t, {0.0, 0.0, 1.0})
{1.0, 0.0, 0.0}
iex> # IO.puts "Check to_barycentric for b"
iex> require ElixirRigidPhysics.Geometry.Triangle, as: Triangle
iex> a = {0.0, 0.0, 1.0}
iex> b = {0.0, 1.0, 0.0}
iex> c = {1.0, 0.0, 0.0}
iex> t= Triangle.create_from_points( a, b, c )
iex> Triangle.to_barycentric(t, {0.0, 1.0, 0.0})
{0.0, 1.0, 0.0}
iex> # IO.puts "Check to_barycentric for a"
iex> require ElixirRigidPhysics.Geometry.Triangle, as: Triangle
iex> a = {0.0, 0.0, 1.0}
iex> b = {0.0, 1.0, 0.0}
iex> c = {1.0, 0.0, 0.0}
iex> t= Triangle.create_from_points( a, b, c )
iex> Triangle.to_barycentric(t, {1.0, 0.0, 0.0})
{0.0, 0.0, 1.0}
iex> # IO.puts "Check to_barycentric for center of abc"
iex> require ElixirRigidPhysics.Geometry.Triangle, as: Triangle
iex> a = {0.0, 0.0, 1.0}
iex> b = {0.0, 1.0, 0.0}
iex> c = {1.0, 0.0, 0.0}
iex> t= Triangle.create_from_points( a, b, c )
iex> Triangle.to_barycentric(t, {1/3, 1/3, 1/3})
{1/3, 1/3, 1/3}
"""
@spec to_barycentric(triangle, Vec3.vec3()) :: Vec3.vec3()
def to_barycentric(triangle(a: a, b: b, c: c), q) do
# note that a cross product has a magniture of twice the area of the tri formed by the vectors
# see https://users.csc.calpoly.edu/~zwood/teaching/csc471/2017F/barycentric.pdf for derivation
v_ba = Vec3.subtract(b, a)
v_ca = Vec3.subtract(c, a)
v_ac = Vec3.subtract(a, c)
v_cb = Vec3.subtract(c, b)
v_qb = Vec3.subtract(q, b)
v_qc = Vec3.subtract(q, c)
v_qa = Vec3.subtract(q, a)
n = Vec3.cross(v_ba, v_ca)
na = Vec3.cross(v_cb, v_qb)
nb = Vec3.cross(v_ac, v_qc)
nc = Vec3.cross(v_ba, v_qa)
# minor trick using dot product to save the magnitude squared...change to formula (11) from above
n_len_squared = Vec3.dot(n, n)
{Vec3.dot(n, na) / n_len_squared, Vec3.dot(n, nb) / n_len_squared,
Vec3.dot(n, nc) / n_len_squared}
end
end
|
lib/geometry/triangle.ex
| 0.888318 | 0.674252 |
triangle.ex
|
starcoder
|
defmodule Pilot do
require Logger
@moduledoc """
Defines a Pilot api endpoint
When used, the endpoint expects `:otp_app` as an option. The `:otp_app`
should point to an OTP application that has the endpoint configuration.
For example, the endpoint:
defmodule Example.Pilot do
use Pilot, otp_app: :example
end
Can be configured with:
config :example, Example.Pilot,
port: 8080,
router: Example.Router
## Options
The endpoint accepts the following options:
* `:port` - Specfies the port to run the endpoint on
* `:router` - The root router to use for all requests
"""
@http_methods [:get, :post, :put, :patch, :delete, :options,]
@version Mix.Project.config[:version]
@doc false
defmacro __using__(opts) do
quote bind_quoted: [opts: opts] do
@behaviour Pilot
{otp_app, config} = Pilot.Supervisor.config(__MODULE__, opts)
@pilot_config config
@pilot_otp_app otp_app
def config do
@pilot_config
end
def start_link(opts \\ []) do
Pilot.Supervisor.start_link(__MODULE__, @pilot_config, opts)
end
end
end
@doc """
Returns the endpoint configuration stored in the `:otp_app` environment
"""
@callback config() :: Keyword.t
@doc """
Starts the endpoint supervisor
"""
@callback start_link(opts :: Keyword.t) :: {:ok, pid}
| {:error, {:already_started, pid}}
| {:error, term}
@doc false
defmacro is_method(spec) do
quote do
is_atom(unquote(spec)) and unquote(spec) in unquote(@http_methods)
end
end
@doc false
def parse_query(string) do
string
|> URI.query_decoder
|> Enum.reverse
|> Enum.reduce([], &decode(&1, &1))
end
defp decode({key, nil}, collection) do
collection
|> Keyword.put(String.to_atom(key), true)
end
defp decode({key, val}, collection) do
case Poison.decode(val) do
{:ok, decoded} ->
collection |> Keyword.put(String.to_atom(key), decoded)
{:error, _} ->
collection |> Keyword.put(String.to_atom(key), val)
end
end
end
|
lib/pilot.ex
| 0.855941 | 0.441974 |
pilot.ex
|
starcoder
|
defmodule GetGeocode.Apis.Nominatim do
@moduledoc """
Nominatim API.
"""
@url "https://nominatim.openstreetmap.org/search?q=<QUERY>&format=json"
@doc """
Gets data from an `addr`ess.
Results in a list with the data, or a tuple `{:ok, "No result"}`.
## Examples
```
iex> GetGeocode.Apis.Nominatim.get_data "<NAME>"
%{
"boundingbox" => ["-3.1058605", "-3.105157", "-60.0550895", "-60.0542833"],
"class" => "highway",
"display_name" => "<NAME>, <NAME>, Região Geográfica Imediata de Manaus, Região Geográfica Intermediária de Manaus, Amazonas, Região Norte, 69000-000, Brasil",
"importance" => 0.4,
"lat" => "-3.1054153",
"licence" => "Data © OpenStreetMap contributors, ODbL 1.0. https://osm.org/copyright",
"lon" => "-60.0547259",
"osm_id" => 662237608,
"osm_type" => "way",
"place_id" => 233020447,
"type" => "residential"
}
```
Also accepts args as a tuple `{lat, lng}`:
```
iex> GetGeocode.Apis.Nominatim.get_data {-3.1054153, -60.0547259}
%{
"boundingbox" => ["-3.1058605", "-3.105157", "-60.0550895", "-60.0542833"],
"class" => "highway",
"display_name" => "<NAME>, <NAME>, Região Geográfica Imediata de Manaus, Região Geográfica Intermediária de Manaus, Amazonas, Região Norte, 69000-000, Brasil",
"importance" => 0.001,
"lat" => "-3.1054153",
"licence" => "Data © OpenStreetMap contributors, ODbL 1.0. https://osm.org/copyright",
"lon" => "-60.0547259",
"osm_id" => 662237608,
"osm_type" => "way",
"place_id" => 233020447,
"type" => "residential"
}
```
"""
@doc since: "0.0.3"
def get_data(addr) do
result =
request(addr)
|> Jason.decode!()
case result do
[] -> {:ok, "No result"}
_ -> hd(result)
end
end
defp request(data) do
{:ok, %HTTPoison.Response{body: body}} = HTTPoison.get(sanitize_query(data))
body
end
defp sanitize_query(query) when is_binary(query) do
query
|> String.trim()
|> String.downcase()
|> URI.encode()
|> gen_query()
end
defp sanitize_query({lat, lng} = _query) do
~s/#{lat},#{lng}/
|> gen_query()
end
defp gen_query(query) do
@url
|> String.replace("<QUERY>", query)
end
end
|
lib/get_geocode/apis/nominatim.ex
| 0.810629 | 0.826292 |
nominatim.ex
|
starcoder
|
defmodule TinkoffInvest.Api do
@moduledoc """
This module provides two simple requests: GET and POST
`payload` map converted to query string on request
You will need to define your custom `TinkoffInvest.Model` to make this work or use existing one.
Examples:
```
TinkoffInvest.Api.request("/orders", :get, YourCustomModel)
TinkoffInvest.Api.request("/orders", :get, YourCustomModel, %{someQueryParam: true}) # /orders?someParam=true
TinkoffInvest.Api.request("/orders", :post, YourCustomModel, %{someQueryParam: true})
TinkoffInvest.Api.request("/orders", :post, YourCustomModel, %{someQueryParam: true}, %{bodyParam: true})
```
Please notice that `:post` request accepts both query and body payloads preferably as maps
"""
alias TinkoffInvest.Api.Request
alias TinkoffInvest.Model.Api.Response
alias TinkoffInvest.Model.Api.Error
@type method() :: :get | :post
@doc """
Allows you to send request to api if you need custom method that is not currently implemented
"""
@spec request(String.t(), method(), module(), map() | nil, map() | nil) :: Response.t()
def request(path, method, module, queryPayload \\ nil, body \\ %{})
def request(path, :get, module, queryPayload, _), do: get(path, module, queryPayload)
def request(path, :post, module, queryPayload, body), do: post(path, module, queryPayload, body)
@doc """
Builds query payload from map. Account id provided by default in config though can be overridden
Examples
iex>TinkoffInvest.change_account_id("123")
:ok
iex>TinkoffInvest.Api.build_payload("/orders", %{myQueryParam: true, someOtherParam: 2})
"/orders?brokerAccountId=123&myQueryParam=true&someOtherParam=2"
You can override broker account id:
iex>TinkoffInvest.Api.build_payload("/orders", %{brokerAccountId: "SB1111", myQueryParam: true, someOtherParam: 2})
"/orders?brokerAccountId=SB1111&myQueryParam=true&someOtherParam=2"
"""
@spec build_payload(String.t(), map() | nil) :: String.t()
def build_payload(path, payload) do
path
|> build_query(payload)
end
@doc """
Build body payload and encodes it to JSON if needed.
iex>TinkoffInvest.Api.build_body_payload(nil)
""
iex>TinkoffInvest.Api.build_body_payload("[123]")
"[123]"
iex>TinkoffInvest.Api.build_body_payload(%{myField: true})
"{\\"myField\\":true}"
"""
@spec build_body_payload(map() | nil | String.t()) :: String.t()
def build_body_payload(nil), do: ""
def build_body_payload(payload) when is_binary(payload), do: payload
def build_body_payload(payload) when is_map(payload) do
Jason.encode!(payload)
end
@doc """
Transforms body response and encodes it to `TinkoffInvest.Model.Api.Response`
iex>TinkoffInvest.Api.to_response(%HTTPoison.Response{body: "SOME_ERROR", status_code: 404, request: %HTTPoison.Request{url: "/orders"}})
%TinkoffInvest.Model.Api.Response{payload: %{"code" => nil, "message" => "SOME_ERROR"}, request_url: "/orders", status: nil, status_code: 404, tracking_id: nil}
iex>TinkoffInvest.Api.to_response(%HTTPoison.Response{body: nil, status_code: 404, request: %HTTPoison.Request{url: "/orders"}})
%TinkoffInvest.Model.Api.Response{payload: %{"code" => nil, "message" => nil}, request_url: "/orders", status: nil, status_code: 404, tracking_id: nil}
iex>TinkoffInvest.Api.to_response(%HTTPoison.Response{body: %{"payload" => %{"code" => "SOME_ERR", "message" => "Well, error"}}, status_code: 404, request: %HTTPoison.Request{url: "/orders"}})
%TinkoffInvest.Model.Api.Response{payload: %{"code" => "SOME_ERR", "message" => "Well, error"}, request_url: "/orders", status: nil, status_code: 404, tracking_id: nil}
"""
def to_response(%HTTPoison.Response{body: body, status_code: status_code} = r)
when is_binary(body) and status_code not in [200] do
r
|> Map.put(:body, %{"payload" => %{"code" => nil, "message" => body}})
|> to_response()
end
def to_response(%HTTPoison.Response{body: nil} = r) do
%{
"payload" => %{"code" => nil, "message" => nil}
}
|> response_metadata(r)
|> Response.new()
end
def to_response(%HTTPoison.Response{body: body} = r) do
body
|> response_metadata(r)
|> Response.new()
end
defp response_metadata(data, %HTTPoison.Response{
status_code: code,
request: %HTTPoison.Request{url: url}
}) do
data
|> Map.put("status_code", code)
|> Map.put("request_url", url)
end
defp get(path, module, payload) do
path
|> build_payload(payload)
|> Request.get()
|> handle_response(module)
end
defp post(path, module, payload, body) do
body = build_body_payload(body)
path
|> build_payload(payload)
|> Request.post(body)
|> handle_response(module)
end
defp handle_response({:ok, resp}, module), do: handle_response(resp, module)
defp handle_response(%Response{payload: %{"code" => _} = error} = data, _) do
error
|> Error.new()
|> Response.payload(data)
end
defp handle_response(%Response{payload: payload} = data, module) do
payload
|> module.new()
|> Response.payload(data)
end
defp build_query(path, nil), do: build_query(path, %{})
defp build_query(path, payload) do
payload
|> maybe_overwrite_account_id()
|> encode_query()
|> List.wrap()
|> List.insert_at(0, "?")
|> List.insert_at(0, path)
|> Enum.join()
end
defp encode_query(payload) do
payload
|> Enum.map(&build_query_field/1)
|> Map.new()
|> URI.encode_query()
end
defp build_query_field({field, %DateTime{} = datetime}) do
value = datetime |> DateTime.to_iso8601()
{field, value}
end
defp build_query_field(x), do: x
defp maybe_overwrite_account_id(%{brokerAccountId: _} = payload), do: payload
defp maybe_overwrite_account_id(payload) do
Map.put(payload, :brokerAccountId, account_id())
end
defp account_id do
Application.fetch_env!(:tinkoff_invest, :broker_account_id)
end
end
|
lib/tinkoff_invest/api.ex
| 0.834306 | 0.515071 |
api.ex
|
starcoder
|
defmodule AWS.CloudHSM do
@moduledoc """
AWS CloudHSM Service
"""
@doc """
Adds or overwrites one or more tags for the specified AWS CloudHSM
resource.
Each tag consists of a key and a value. Tag keys must be unique to each
resource.
"""
def add_tags_to_resource(client, input, options \\ []) do
request(client, "AddTagsToResource", input, options)
end
@doc """
Creates a high-availability partition group. A high-availability partition
group is a group of partitions that spans multiple physical HSMs.
"""
def create_hapg(client, input, options \\ []) do
request(client, "CreateHapg", input, options)
end
@doc """
Creates an uninitialized HSM instance.
There is an upfront fee charged for each HSM instance that you create with
the `CreateHsm` operation. If you accidentally provision an HSM and want to
request a refund, delete the instance using the `DeleteHsm` operation, go
to the [AWS Support Center](https://console.aws.amazon.com/support/home#/),
create a new case, and select **Account and Billing Support**.
<important> It can take up to 20 minutes to create and provision an HSM.
You can monitor the status of the HSM with the `DescribeHsm` operation. The
HSM is ready to be initialized when the status changes to `RUNNING`.
</important>
"""
def create_hsm(client, input, options \\ []) do
request(client, "CreateHsm", input, options)
end
@doc """
Creates an HSM client.
"""
def create_luna_client(client, input, options \\ []) do
request(client, "CreateLunaClient", input, options)
end
@doc """
Deletes a high-availability partition group.
"""
def delete_hapg(client, input, options \\ []) do
request(client, "DeleteHapg", input, options)
end
@doc """
Deletes an HSM. After completion, this operation cannot be undone and your
key material cannot be recovered.
"""
def delete_hsm(client, input, options \\ []) do
request(client, "DeleteHsm", input, options)
end
@doc """
Deletes a client.
"""
def delete_luna_client(client, input, options \\ []) do
request(client, "DeleteLunaClient", input, options)
end
@doc """
Retrieves information about a high-availability partition group.
"""
def describe_hapg(client, input, options \\ []) do
request(client, "DescribeHapg", input, options)
end
@doc """
Retrieves information about an HSM. You can identify the HSM by its ARN or
its serial number.
"""
def describe_hsm(client, input, options \\ []) do
request(client, "DescribeHsm", input, options)
end
@doc """
Retrieves information about an HSM client.
"""
def describe_luna_client(client, input, options \\ []) do
request(client, "DescribeLunaClient", input, options)
end
@doc """
Gets the configuration files necessary to connect to all high availability
partition groups the client is associated with.
"""
def get_config(client, input, options \\ []) do
request(client, "GetConfig", input, options)
end
@doc """
Lists the Availability Zones that have available AWS CloudHSM capacity.
"""
def list_available_zones(client, input, options \\ []) do
request(client, "ListAvailableZones", input, options)
end
@doc """
Lists the high-availability partition groups for the account.
This operation supports pagination with the use of the *NextToken* member.
If more results are available, the *NextToken* member of the response
contains a token that you pass in the next call to `ListHapgs` to retrieve
the next set of items.
"""
def list_hapgs(client, input, options \\ []) do
request(client, "ListHapgs", input, options)
end
@doc """
Retrieves the identifiers of all of the HSMs provisioned for the current
customer.
This operation supports pagination with the use of the *NextToken* member.
If more results are available, the *NextToken* member of the response
contains a token that you pass in the next call to `ListHsms` to retrieve
the next set of items.
"""
def list_hsms(client, input, options \\ []) do
request(client, "ListHsms", input, options)
end
@doc """
Lists all of the clients.
This operation supports pagination with the use of the *NextToken* member.
If more results are available, the *NextToken* member of the response
contains a token that you pass in the next call to `ListLunaClients` to
retrieve the next set of items.
"""
def list_luna_clients(client, input, options \\ []) do
request(client, "ListLunaClients", input, options)
end
@doc """
Returns a list of all tags for the specified AWS CloudHSM resource.
"""
def list_tags_for_resource(client, input, options \\ []) do
request(client, "ListTagsForResource", input, options)
end
@doc """
Modifies an existing high-availability partition group.
"""
def modify_hapg(client, input, options \\ []) do
request(client, "ModifyHapg", input, options)
end
@doc """
Modifies an HSM.
<important> This operation can result in the HSM being offline for up to 15
minutes while the AWS CloudHSM service is reconfigured. If you are
modifying a production HSM, you should ensure that your AWS CloudHSM
service is configured for high availability, and consider executing this
operation during a maintenance window.
</important>
"""
def modify_hsm(client, input, options \\ []) do
request(client, "ModifyHsm", input, options)
end
@doc """
Modifies the certificate used by the client.
This action can potentially start a workflow to install the new certificate
on the client's HSMs.
"""
def modify_luna_client(client, input, options \\ []) do
request(client, "ModifyLunaClient", input, options)
end
@doc """
Removes one or more tags from the specified AWS CloudHSM resource.
To remove a tag, specify only the tag key to remove (not the value). To
overwrite the value for an existing tag, use `AddTagsToResource`.
"""
def remove_tags_from_resource(client, input, options \\ []) do
request(client, "RemoveTagsFromResource", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "cloudhsm"}
host = get_host("cloudhsm", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "CloudHsmFrontendService.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/cloud_hsm.ex
| 0.827689 | 0.500793 |
cloud_hsm.ex
|
starcoder
|
defmodule DiscordBot.Entity.Guilds do
@moduledoc """
Provides a cache of guild information, backed by ETS.
"""
use GenServer
alias DiscordBot.Broker
alias DiscordBot.Broker.Event
alias DiscordBot.Entity.GuildRecord
alias DiscordBot.Model.Guild
@doc """
Starts the guild registry.
- `opts` - a keyword list of options. See below.
Options (required):
None.
Options (optional):
- `:broker` - a process (pid or name) acting as a `DiscordBot.Broker` to use for communication.
"""
def start_link(opts) do
broker = Keyword.get(opts, :broker, Broker)
GenServer.start_link(__MODULE__, broker, opts)
end
@doc """
Creates a new guild in the cache.
The guild is added to the cache `cache` if a guild does not already
exist with the ID provided in `model`. Otherwise, the existing guild
will be updated with the new data in `model`.
Returns `:ok` if the creation is successful, otherwise `:error`.
"""
@spec create(pid | atom, Guild.t()) :: :ok | :error
def create(cache, model) do
GenServer.call(cache, {:create, model})
end
@doc """
Deletes a cached guild.
Always returns `:ok` if the deletion is performed, even if the
provided ID is not present in the cache.
"""
@spec delete(pid | atom, String.t()) :: :ok
def delete(cache, id) do
GenServer.call(cache, {:delete, id})
end
@doc """
Gets a guild and its metadata by its ID.
The returned guild will be an instance of `DiscordBot.Model.GuildRecord`.
"""
@spec from_id?(String.t()) :: {:ok, GuildRecord.t()} | :error
def from_id?(id) do
case :ets.lookup(__MODULE__, id) do
[{^id, record}] -> {:ok, record}
[] -> :error
end
end
## Callbacks
def init(broker) do
table =
if :ets.whereis(__MODULE__) == :undefined do
:ets.new(__MODULE__, [:named_table, :public, read_concurrency: true])
else
__MODULE__
end
topics = [
:guild_create,
:guild_update,
:guild_delete
]
for topic <- topics do
Broker.subscribe(broker, topic)
end
{:ok, table}
end
def handle_call({:create, model}, {pid, _ref}, table) do
{:reply, create_internal(table, model, pid), table}
end
def handle_call({:delete, id}, _from, table) do
{:reply, delete_internal(table, id), table}
end
def handle_info(%Event{topic: :guild_create, message: model, publisher: pub}, table) do
create_internal(table, model, pub)
{:noreply, table}
end
def handle_info(%Event{topic: :guild_update, message: model, publisher: pub}, table) do
create_internal(table, model, pub)
{:noreply, table}
end
def handle_info(%Event{topic: :guild_delete, message: model}, table) do
delete_internal(table, model.id)
{:noreply, table}
end
defp create_internal(_, nil, _), do: :error
defp create_internal(_, %Guild{id: nil}, _) do
:error
end
defp create_internal(table, model, source) do
record = GuildRecord.new(source, model)
:ets.insert(table, {model.id, record})
:ok
end
defp delete_internal(table, id) do
:ets.delete(table, id)
:ok
end
end
|
apps/discordbot/lib/discordbot/entity/guilds.ex
| 0.848675 | 0.404566 |
guilds.ex
|
starcoder
|
defmodule Elsa.Topic do
@moduledoc """
Provides functions for managing and interacting with topics in the Kafka cluster.
"""
import Elsa.Util, only: [with_connection: 3, reformat_endpoints: 1]
import Record, only: [defrecord: 2, extract: 2]
defrecord :kpro_rsp, extract(:kpro_rsp, from_lib: "kafka_protocol/include/kpro.hrl")
@doc """
Returns a list of all topics managed by the cluster as tuple of topic name and
number of partitions.
"""
@spec list(keyword) :: {:ok, [{String.t(), integer}]} | {:error, term}
def list(endpoints) do
{:ok, metadata} = :brod.get_metadata(reformat_endpoints(endpoints), :all)
topics =
metadata.topic_metadata
|> Enum.map(fn topic_metadata ->
{topic_metadata.topic, Enum.count(topic_metadata.partition_metadata)}
end)
{:ok, topics}
catch
error -> {:error, error}
end
@doc """
Confirms or denies the existence of a topic managed by the cluster.
"""
@spec exists?(keyword(), String.t()) :: boolean()
def exists?(endpoints, topic) do
with {:ok, topics} <- list(endpoints) do
Enum.any?(topics, fn {t, _} -> t == topic end)
end
end
@doc """
Creates the supplied topic within the cluster. Sets the number of desired
partitions and replication factor for the topic based on the optional
keyword list. If the optional configs are not specified by the caller, the
number of partitions and replicas defaults to 1.
"""
@spec create(keyword(), String.t(), keyword()) :: :ok | {:error, term()}
def create(endpoints, topic, opts \\ []) do
with_connection(endpoints, :controller, fn connection ->
config =
opts
|> Keyword.get(:config, [])
|> Enum.map(fn {key, val} -> %{config_name: to_string(key), config_value: val} end)
create_topic_args = %{
topic: topic,
num_partitions: Keyword.get(opts, :partitions, 1),
replication_factor: Keyword.get(opts, :replicas, 1),
replica_assignment: [],
config_entries: config
}
version = Elsa.Util.get_api_version(connection, :create_topics)
topic_request = :kpro_req_lib.create_topics(version, [create_topic_args], %{timeout: 5_000})
send_request(connection, topic_request, 5_000)
end)
end
@doc """
Deletes the supplied topic from the cluster.
"""
@spec delete(keyword(), String.t()) :: :ok | {:error, term()}
def delete(endpoints, topic) do
with_connection(endpoints, :controller, fn connection ->
version = Elsa.Util.get_api_version(connection, :delete_topics)
topic_request = :kpro_req_lib.delete_topics(version, [topic], %{timeout: 5_000})
send_request(connection, topic_request, 5_000)
end)
end
defp send_request(connection, request, timeout) do
case :kpro.request_sync(connection, request, timeout) do
{:ok, response} -> check_response(response)
result -> result
end
end
defp check_response(response) do
message = kpro_rsp(response, :msg)
error_key =
case Map.has_key?(message, :topic_errors) do
true -> :topic_errors
false -> :topic_error_codes
end
case Enum.find(message[error_key], fn error -> error.error_code != :no_error end) do
nil -> :ok
error -> {:error, {error.error_code, error[:error_message]}}
end
end
end
|
deps/elsa/lib/elsa/topic.ex
| 0.862974 | 0.563738 |
topic.ex
|
starcoder
|
defmodule ESpec.AssertReceive do
@moduledoc """
Defines `assert_receive` and `assert_received` helper macros
"""
@default_timeout 100
defmodule AssertReceiveError do
defexception message: nil
end
alias ESpec.ExpectTo
alias ESpec.Assertions.AssertReceive
@doc "Asserts that a message matching `pattern` was or is going to be received."
defmacro assert_receive(pattern, timeout \\ @default_timeout) do
do_assert_receive(pattern, timeout, __CALLER__)
end
@doc "Asserts that a message matching `pattern` was received and is in the current process' mailbox."
defmacro assert_received(pattern) do
do_assert_receive(pattern, 0, __CALLER__)
end
defp do_assert_receive(pattern, timeout, caller) do
binary = Macro.to_string(pattern)
pattern = Macro.expand(pattern, caller)
vars = collect_vars_from_pattern(pattern)
pins = collect_pins_from_pattern(pattern)
pattern =
case pattern do
{:when, meta, [left, right]} ->
{:when, meta, [quote(do: unquote(left) = received), right]}
left ->
quote(do: unquote(left) = received)
end
ESpec.AssertReceive.__assert_receive__(pattern, binary, vars, pins, timeout)
end
@doc false
def __assert_receive__(pattern, binary, vars, pins, timeout \\ 100) do
quote do
result =
{received, unquote(vars)} =
receive do
unquote(pattern) -> {received, unquote(vars)}
after
unquote(timeout) ->
args = [unquote(binary), unquote(pins), ESpec.AssertReceive.__mailbox_messages__()]
ExpectTo.to(
{AssertReceive, args},
{ExpectTo, {:error, :timeout}, ESpec.Expect.pruned_stacktrace()}
)
end
args = [unquote(binary), unquote(pins), ESpec.AssertReceive.__mailbox_messages__()]
ExpectTo.to({AssertReceive, args}, {ExpectTo, result, ESpec.Expect.pruned_stacktrace()})
end
end
@max_mailbox_length 10
@doc false
def __mailbox_messages__ do
{:messages, messages} = Process.info(self(), :messages)
Enum.take(messages, @max_mailbox_length)
end
defp collect_pins_from_pattern(expr) do
{_, pins} =
Macro.prewalk(expr, [], fn
{:^, _, [{name, _, _} = var]}, acc ->
{:ok, [{name, var} | acc]}
form, acc ->
{form, acc}
end)
Enum.uniq_by(pins, &elem(&1, 0))
end
defp collect_vars_from_pattern({:when, _, [left, right]}) do
pattern = collect_vars_from_pattern(left)
vars =
for {name, _, context} = var <- collect_vars_from_pattern(right),
Enum.any?(pattern, &match?({^name, _, ^context}, &1)),
do: var
pattern ++ vars
end
defp collect_vars_from_pattern(expr) do
Macro.prewalk(expr, [], fn
{:"::", _, [left, _]}, acc ->
{[left], acc}
{skip, _, [_]}, acc when skip in [:^, :@] ->
{:ok, acc}
{:_, _, context}, acc when is_atom(context) ->
{:ok, acc}
{name, meta, context}, acc when is_atom(name) and is_atom(context) ->
{:ok, [{name, [generated: true] ++ meta, context} | acc]}
any_node, acc ->
{any_node, acc}
end)
|> elem(1)
end
end
|
lib/espec/assert_receive.ex
| 0.783119 | 0.658284 |
assert_receive.ex
|
starcoder
|
defmodule Currencyconverter.Convert do
require Logger
alias Currencyconverter.CurrencyEndpoints.Exchangeratesapi.GetCurrencies
@moduledoc """
this module connects to the external currency conversion api (https://exchangeratesapi.io/documentation/).
"""
@doc """
converts the amount of a certain currency (from) to the selected currency (to).
Returns a map with status :success or :error depending on the parameters of the conversion.
## Examples
iex> convert_to("BRL", "EUR", "1")
%{
status: :success,
transaction: %{
amount: "1.00",
converted: "0.16",
currency_rate: "0.155594",
from: "BRL",
to: "EUR"
}
}
"""
def convert_to(from, to, amount) do
from = String.upcase(from)
to = String.upcase(to)
case GetCurrencies.get_conversion() do
%HTTPoison.Response{status_code: 200, body: body} ->
converted = get_converted_amount(from, to, amount, Jason.decode!(body))
%{
status: :success,
transaction: %{
from: from,
to: to,
amount: :erlang.float_to_binary(amount, decimals: 2),
converted: :erlang.float_to_binary(converted, decimals: 2),
currency_rate: get_currency_rate(from, to, Jason.decode!(body))
}
}
%HTTPoison.Response{status_code: 401, body: body} ->
%{
status: :error,
message: Jason.decode!(body)
}
%HTTPoison.Response{status_code: 404, body: _} ->
%{error: "Error 404: API endpoint not Found"}
%HTTPoison.Response{status_code: _, body: body} ->
%{error: "Error while trying to get conection with API"}
Logger.error("""
Error while trying to get conection with API
body: #{body}
""")
end
end
@doc """
get the curency rate based on return of the external api.
## Examples
iex> external_api_return = %{
...> "base" => "EUR",
...> "date" => "2021-12-22",
...> "rates" => %{"BRL" => 6.422379, "JPY" => 129.338451, "USD" => 1.132496},
...> "success" => true,
...> "timestamp" => 1640199543
...> }
iex> get_currency_rate("BRL", "EUR", external_api_return)
"0.155706"
"""
def get_currency_rate(from, to, body) do
cond do
String.upcase(to) == "EUR" and String.upcase(from) != "EUR" ->
(1 / body["rates"][String.upcase(from)]) |> :erlang.float_to_binary(decimals: 6)
String.upcase(to) == "EUR" and String.upcase(from) == "EUR" ->
1.00 |> :erlang.float_to_binary(decimals: 2)
true ->
body["rates"][String.upcase(to)] |> :erlang.float_to_binary(decimals: 6)
end
end
@doc """
converts the value of a origin currency (BRL, JPY, USD, EUR) to EUR.
(origin value x currency rate)
## Examples
iex> external_api_return = %{
...> "base" => "EUR",
...> "date" => "2021-12-22",
...> "rates" => %{"BRL" => 6.422379, "JPY" => 129.338451, "USD" => 1.132496},
...> "success" => true,
...> "timestamp" => 1640199543
...> }
iex> get_converted_amount("BRL", "EUR", 5.0, external_api_return)
0.78
"""
def get_converted_amount(from, to, amount, conversion_body) do
case from do
"EUR" -> convert_to_euro(to, amount, conversion_body)
"BRL" -> convert_value_from_euro(amount, conversion_body["rates"]["BRL"])
"USD" -> convert_value_from_euro(amount, conversion_body["rates"]["USD"])
"JPY" -> convert_value_from_euro(amount, conversion_body["rates"]["JPY"])
_ -> %{error: "invalid currency"}
end
end
@doc """
converts the value of a EUR currency to another selected currency (BRL, JPY, USD, EUR).
(EUR value x selected currency rate)
## Examples
iex> external_api_return = %{
...> "base" => "EUR",
...> "date" => "2021-12-22",
...> "rates" => %{"BRL" => 6.422379, "JPY" => 129.338451, "USD" => 1.132496},
...> "success" => true,
...> "timestamp" => 1640199543
...> }
iex> convert_to_euro("BRL", 5.0, external_api_return)
32.11
"""
def convert_to_euro(to, amount, conversion_body) do
case to do
"BRL" -> convert_value_to_euro(amount, conversion_body["rates"]["BRL"])
"USD" -> convert_value_to_euro(amount, conversion_body["rates"]["USD"])
"JPY" -> convert_value_to_euro(amount, conversion_body["rates"]["JPY"])
"EUR" -> amount
_ -> %{error: "invalid currency"}
end
end
defp convert_value_to_euro(amount, rate) do
(amount * rate) |> Float.round(2)
end
defp convert_value_from_euro(amount, rate) do
unit = if rate == 1, do: 1, else: 1 / rate
(amount * unit) |> Float.round(2)
end
end
|
lib/currencyconverter/convert.ex
| 0.892469 | 0.513485 |
convert.ex
|
starcoder
|
defmodule Inspect.Opts do
@moduledoc """
Defines the Inspect.Opts used by the Inspect protocol.
The following fields are available:
* `:structs` - when `false`, structs are not formatted by the inspect
protocol, they are instead printed as maps, defaults to `true`.
* `:binaries` - when `:as_strings` all binaries will be printed as strings,
non-printable bytes will be escaped.
When `:as_binaries` all binaries will be printed in bit syntax.
When the default `:infer`, the binary will be printed as a string if it
is printable, otherwise in bit syntax.
* `:charlists` - when `:as_charlists` all lists will be printed as char
lists, non-printable elements will be escaped.
When `:as_lists` all lists will be printed as lists.
When the default `:infer`, the list will be printed as a charlist if it
is printable, otherwise as list.
* `:limit` - limits the number of items that are printed for tuples,
bitstrings, and lists, does not apply to strings nor charlists, defaults
to 50.
* `:pretty` - if set to `true` enables pretty printing, defaults to `false`.
* `:width` - defaults to 80 characters, used when pretty is `true` or when
printing to IO devices. Set to 0 to force each item to be printed on its
own line.
* `:base` - prints integers as `:binary`, `:octal`, `:decimal`, or `:hex`, defaults
to `:decimal`. When inspecting binaries any `:base` other than `:decimal`
implies `binaries: :as_binaries`.
* `:safe` - when `false`, failures while inspecting structs will be raised
as errors instead of being wrapped in the `Inspect.Error` exception. This
is useful when debugging failures and crashes for custom inspect
implementations
* `:syntax_colors` - when set to a keyword list of colors the output will
be colorized. The keys are types and the values are the colors to use for
each type. e.g. `[number: :red, atom: :blue]`. Types can include
`:number`, `:atom`, `regex`, `:tuple`, `:map`, `:list`, and `:reset`.
Colors can be any `t:IO.ANSI.ansidata/0` as accepted by `IO.ANSI.format/1`.
"""
# TODO: Remove :char_lists key by 2.0
defstruct structs: true,
binaries: :infer,
charlists: :infer,
char_lists: :infer,
limit: 50,
width: 80,
base: :decimal,
pretty: false,
safe: true,
syntax_colors: []
@type color_key :: atom
# TODO: Remove :char_lists key and :as_char_lists value by 2.0
@type t :: %__MODULE__{
structs: boolean,
binaries: :infer | :as_binaries | :as_strings,
charlists: :infer | :as_lists | :as_charlists,
char_lists: :infer | :as_lists | :as_char_lists,
limit: pos_integer | :infinity,
width: pos_integer | :infinity,
base: :decimal | :binary | :hex | :octal,
pretty: boolean,
safe: boolean,
syntax_colors: [{color_key, IO.ANSI.ansidata}]
}
end
defmodule Inspect.Error do
@moduledoc """
Raised when a struct cannot be inspected.
"""
defexception [:message]
end
defmodule Inspect.Algebra do
@moduledoc ~S"""
A set of functions for creating and manipulating algebra
documents.
This module implements the functionality described in
["Strictly Pretty" (2000) by <NAME>][0] with small
additions, like support for String nodes, and a custom
rendering function that maximises horizontal space use.
iex> Inspect.Algebra.empty
:doc_nil
iex> "foo"
"foo"
With the functions in this module, we can concatenate different
elements together and render them:
iex> doc = Inspect.Algebra.concat(Inspect.Algebra.empty, "foo")
iex> Inspect.Algebra.format(doc, 80)
["foo"]
The functions `nest/2`, `space/2` and `line/2` help you put the
document together into a rigid structure. However, the document
algebra gets interesting when using functions like `break/1`, which
converts the given string into a line break depending on how much space
there is to print. Let's glue two docs together with a break and then
render it:
iex> doc = Inspect.Algebra.glue("a", " ", "b")
iex> Inspect.Algebra.format(doc, 80)
["a", " ", "b"]
Notice the break was represented as is, because we haven't reached
a line limit. Once we do, it is replaced by a newline:
iex> doc = Inspect.Algebra.glue(String.duplicate("a", 20), " ", "b")
iex> Inspect.Algebra.format(doc, 10)
["aaaaaaaaaaaaaaaaaaaa", "\n", "b"]
Finally, this module also contains Elixir related functions, a bit
tied to Elixir formatting, namely `surround/3` and `surround_many/5`.
## Implementation details
The original Haskell implementation of the algorithm by [Wadler][1]
relies on lazy evaluation to unfold document groups on two alternatives:
`:flat` (breaks as spaces) and `:break` (breaks as newlines).
Implementing the same logic in a strict language such as Elixir leads
to an exponential growth of possible documents, unless document groups
are encoded explicitly as `:flat` or `:break`. Those groups are then reduced
to a simple document, where the layout is already decided, per [Lindig][0].
This implementation slightly changes the semantic of Lindig's algorithm
to allow elements that belong to the same group to be printed together
in the same line, even if they do not fit the line fully. This was achieved
by changing `:break` to mean a possible break and `:flat` to force a flat
structure. Then deciding if a break works as a newline is just a matter
of checking if we have enough space until the next break that is not
inside a group (which is still flat).
Custom pretty printers can be implemented using the documents returned
by this module and by providing their own rendering functions.
[0]: http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.34.2200
[1]: http://homepages.inf.ed.ac.uk/wadler/papers/prettier/prettier.pdf
"""
@surround_separator ","
@tail_separator " |"
@newline "\n"
@nesting 1
@space " "
# Functional interface to "doc" records
@type t :: :doc_nil | :doc_line | doc_cons | doc_nest | doc_break | doc_group | doc_color | binary
@typep doc_cons :: {:doc_cons, t, t}
defmacrop doc_cons(left, right) do
quote do: {:doc_cons, unquote(left), unquote(right)}
end
@typep doc_nest :: {:doc_nest, t, non_neg_integer}
defmacrop doc_nest(doc, indent) do
quote do: {:doc_nest, unquote(doc), unquote(indent)}
end
@typep doc_break :: {:doc_break, binary}
defmacrop doc_break(break) do
quote do: {:doc_break, unquote(break)}
end
@typep doc_group :: {:doc_group, t}
defmacrop doc_group(group) do
quote do: {:doc_group, unquote(group)}
end
@typep doc_color :: {:doc_color, t, IO.ANSI.ansidata}
defmacrop doc_color(doc, color) do
quote do: {:doc_color, unquote(doc), unquote(color)}
end
defmacrop is_doc(doc) do
if Macro.Env.in_guard?(__CALLER__) do
do_is_doc(doc)
else
var = quote do: doc
quote do
unquote(var) = unquote(doc)
unquote(do_is_doc(var))
end
end
end
defp do_is_doc(doc) do
quote do
is_binary(unquote(doc)) or
unquote(doc) in [:doc_nil, :doc_line] or
(is_tuple(unquote(doc)) and
elem(unquote(doc), 0) in [:doc_cons, :doc_nest, :doc_break, :doc_group, :doc_color])
end
end
@doc """
Converts an Elixir term to an algebra document
according to the `Inspect` protocol.
"""
@spec to_doc(any, Inspect.Opts.t) :: t
def to_doc(term, opts)
def to_doc(%{__struct__: struct} = map, %Inspect.Opts{} = opts) when is_atom(struct) do
if opts.structs do
try do
Inspect.inspect(map, opts)
rescue
e ->
stacktrace = System.stacktrace
# Because we try to raise a nice error message in case
# we can't inspect a struct, there is a chance the error
# message itself relies on the struct being printed, so
# we need to trap the inspected messages to guarantee
# we won't try to render any failed instruct when building
# the error message.
if Process.get(:inspect_trap) do
Inspect.Map.inspect(map, opts)
else
try do
Process.put(:inspect_trap, true)
res = Inspect.Map.inspect(map, opts)
res = IO.iodata_to_binary(format(res, :infinity))
exception = Inspect.Error.exception(
message: "got #{inspect e.__struct__} with message " <>
"#{inspect Exception.message(e)} while inspecting #{res}"
)
if opts.safe do
Inspect.inspect(exception, opts)
else
reraise(exception, stacktrace)
end
after
Process.delete(:inspect_trap)
end
end
end
else
Inspect.Map.inspect(map, opts)
end
end
def to_doc(arg, %Inspect.Opts{} = opts) do
Inspect.inspect(arg, opts)
end
@doc """
Returns a document entity used to represent nothingness.
## Examples
iex> Inspect.Algebra.empty
:doc_nil
"""
@spec empty() :: :doc_nil
def empty, do: :doc_nil
@doc ~S"""
Concatenates two document entities returning a new document.
## Examples
iex> doc = Inspect.Algebra.concat("hello", "world")
iex> Inspect.Algebra.format(doc, 80)
["hello", "world"]
"""
@spec concat(t, t) :: t
def concat(doc1, doc2) when is_doc(doc1) and is_doc(doc2) do
doc_cons(doc1, doc2)
end
@doc ~S"""
Concatenates a list of documents returning a new document.
## Examples
iex> doc = Inspect.Algebra.concat(["a", "b", "c"])
iex> Inspect.Algebra.format(doc, 80)
["a", "b", "c"]
"""
@spec concat([t]) :: t
def concat(docs) when is_list(docs) do
fold_doc(docs, &concat(&1, &2))
end
@doc ~S"""
Colors a document if the `color_key` has a color in the options.
"""
@spec color(t, Inspect.Opts.color_key, Inspect.Opts.t) :: doc_color
def color(doc, color_key, %Inspect.Opts{syntax_colors: syntax_colors}) when is_doc(doc) do
if precolor = Keyword.get(syntax_colors, color_key) do
postcolor = Keyword.get(syntax_colors, :reset, :reset)
concat(doc_color(doc, precolor), doc_color(empty(), postcolor))
else
doc
end
end
@doc ~S"""
Nests the given document at the given `level`.
Nesting will be appended to the line breaks.
## Examples
iex> doc = Inspect.Algebra.nest(Inspect.Algebra.glue("hello", "world"), 5)
iex> Inspect.Algebra.format(doc, 5)
["hello", "\n ", "world"]
"""
@spec nest(t, non_neg_integer) :: doc_nest
def nest(doc, level)
def nest(doc, 0) when is_doc(doc) do
doc
end
def nest(doc, level) when is_doc(doc) and is_integer(level) and level > 0 do
doc_nest(doc, level)
end
@doc ~S"""
Returns a document entity representing a break based on the given
`string`.
This break can be rendered as a linebreak or as the given `string`,
depending on the `mode` of the chosen layout or the provided
separator.
## Examples
Let's create a document by concatenating two strings with a break between
them:
iex> doc = Inspect.Algebra.concat(["a", Inspect.Algebra.break("\t"), "b"])
iex> Inspect.Algebra.format(doc, 80)
["a", "\t", "b"]
Notice the break was represented with the given string, because we didn't
reach a line limit. Once we do, it is replaced by a newline:
iex> break = Inspect.Algebra.break("\t")
iex> doc = Inspect.Algebra.concat([String.duplicate("a", 20), break, "b"])
iex> Inspect.Algebra.format(doc, 10)
["aaaaaaaaaaaaaaaaaaaa", "\n", "b"]
"""
@spec break(binary) :: doc_break
def break(string) when is_binary(string), do: doc_break(string)
@doc ~S"""
Returns a document entity with the `" "` string as break.
See `break/1` for more information.
"""
@spec break() :: doc_break
def break(), do: doc_break(@space)
@doc ~S"""
Glues two documents together inserting `" "` as a break between them.
This means the two documents will be separeted by `" "` in case they
fit in the same line. Otherwise a line break is used.
## Examples
iex> doc = Inspect.Algebra.glue("hello", "world")
iex> Inspect.Algebra.format(doc, 80)
["hello", " ", "world"]
"""
@spec glue(t, t) :: t
def glue(doc1, doc2), do: concat(doc1, concat(break(), doc2))
@doc ~S"""
Glues two documents (`doc1` and `doc2`) together inserting the given
break `break_string` between them.
For more information on how the break is inserted, see `break/1`.
## Examples
iex> doc = Inspect.Algebra.glue("hello", "\t", "world")
iex> Inspect.Algebra.format(doc, 80)
["hello", "\t", "world"]
"""
@spec glue(t, binary, t) :: t
def glue(doc1, break_string, doc2) when is_binary(break_string),
do: concat(doc1, concat(break(break_string), doc2))
@doc ~S"""
Returns a group containing the specified document `doc`.
Documents in a group are attempted to be rendered together
to the best of the renderer ability.
## Examples
iex> doc = Inspect.Algebra.group(
...> Inspect.Algebra.concat(
...> Inspect.Algebra.group(
...> Inspect.Algebra.concat(
...> "Hello,",
...> Inspect.Algebra.concat(
...> Inspect.Algebra.break,
...> "A"
...> )
...> )
...> ),
...> Inspect.Algebra.concat(
...> Inspect.Algebra.break,
...> "B"
...> )
...> ))
iex> Inspect.Algebra.format(doc, 80)
["Hello,", " ", "A", " ", "B"]
iex> Inspect.Algebra.format(doc, 6)
["Hello,", "\n", "A", " ", "B"]
"""
@spec group(t) :: doc_group
def group(doc) when is_doc(doc) do
doc_group(doc)
end
@doc ~S"""
Inserts a mandatory single space between two documents.
## Examples
iex> doc = Inspect.Algebra.space("Hughes", "Wadler")
iex> Inspect.Algebra.format(doc, 5)
["Hughes", " ", "Wadler"]
"""
@spec space(t, t) :: t
def space(doc1, doc2), do: concat(doc1, concat(" ", doc2))
@doc ~S"""
Inserts a mandatory linebreak between two documents.
## Examples
iex> doc = Inspect.Algebra.line("Hughes", "Wadler")
iex> Inspect.Algebra.format(doc, 80)
["Hughes", "\n", "Wadler"]
"""
@spec line(t, t) :: t
def line(doc1, doc2), do: concat(doc1, concat(:doc_line, doc2))
@doc ~S"""
Folds a list of documents into a document using the given folder function.
The list of documents is folded "from the right"; in that, this function is
similar to `List.foldr/3`, except that it doesn't expect an initial
accumulator and uses the last element of `docs` as the initial accumulator.
## Examples
iex> docs = ["A", "B", "C"]
iex> docs = Inspect.Algebra.fold_doc(docs, fn(doc, acc) ->
...> Inspect.Algebra.concat([doc, "!", acc])
...> end)
iex> Inspect.Algebra.format(docs, 80)
["A", "!", "B", "!", "C"]
"""
@spec fold_doc([t], ((t, t) -> t)) :: t
def fold_doc(docs, folder_fun)
def fold_doc([], _folder_fun),
do: empty()
def fold_doc([doc], _folder_fun),
do: doc
def fold_doc([doc | docs], folder_fun) when is_function(folder_fun, 2),
do: folder_fun.(doc, fold_doc(docs, folder_fun))
# Elixir conveniences
@doc ~S"""
Surrounds a document with characters.
Puts the given document `doc` between the `left` and `right` documents enclosing
and nesting it. The document is marked as a group, to show the maximum as
possible concisely together.
## Examples
iex> doc = Inspect.Algebra.surround("[", Inspect.Algebra.glue("a", "b"), "]")
iex> Inspect.Algebra.format(doc, 3)
["[", "a", "\n ", "b", "]"]
"""
@spec surround(t, t, t) :: t
def surround(left, doc, right) when is_doc(left) and is_doc(doc) and is_doc(right) do
group(concat(left, concat(nest(doc, @nesting), right)))
end
@doc ~S"""
Maps and glues a collection of items.
It uses the given `left` and `right` documents as surrounding and the
separator document `separator` to separate items in `docs`. A limit can be
passed: when this limit is reached, this function stops gluing and outputs
`"..."` instead.
## Examples
iex> doc = Inspect.Algebra.surround_many("[", Enum.to_list(1..5), "]",
...> %Inspect.Opts{limit: :infinity}, fn i, _opts -> to_string(i) end)
iex> Inspect.Algebra.format(doc, 5) |> IO.iodata_to_binary
"[1,\n 2,\n 3,\n 4,\n 5]"
iex> doc = Inspect.Algebra.surround_many("[", Enum.to_list(1..5), "]",
...> %Inspect.Opts{limit: 3}, fn i, _opts -> to_string(i) end)
iex> Inspect.Algebra.format(doc, 20) |> IO.iodata_to_binary
"[1, 2, 3, ...]"
iex> doc = Inspect.Algebra.surround_many("[", Enum.to_list(1..5), "]",
...> %Inspect.Opts{limit: 3}, fn i, _opts -> to_string(i) end, "!")
iex> Inspect.Algebra.format(doc, 20) |> IO.iodata_to_binary
"[1! 2! 3! ...]"
"""
@spec surround_many(t, [any], t, Inspect.Opts.t, (term, Inspect.Opts.t -> t), t) :: t
def surround_many(left, docs, right, %Inspect.Opts{} = opts, fun, separator \\ @surround_separator)
when is_doc(left) and is_list(docs) and is_doc(right) and is_function(fun, 2) and is_doc(separator) do
do_surround_many(left, docs, right, opts.limit, opts, fun, separator)
end
defp do_surround_many(left, [], right, _, _opts, _fun, _) do
concat(left, right)
end
defp do_surround_many(left, docs, right, limit, opts, fun, sep) do
surround(left, do_surround_many(docs, limit, opts, fun, sep), right)
end
defp do_surround_many(_, 0, _opts, _fun, _sep) do
"..."
end
defp do_surround_many([], _limit, _opts, _fun, _sep) do
:doc_nil
end
defp do_surround_many([h], limit, opts, fun, _sep) do
fun.(h, %{opts | limit: limit})
end
defp do_surround_many([h | t], limit, opts, fun, sep) when is_list(t) do
limit = decrement(limit)
h = fun.(h, %{opts | limit: limit})
t = do_surround_many(t, limit, opts, fun, sep)
do_join(h, t, sep)
end
defp do_surround_many([h | t], limit, opts, fun, _sep) do
limit = decrement(limit)
h = fun.(h, %{opts | limit: limit})
t = fun.(t, %{opts | limit: limit})
do_join(h, t, @tail_separator)
end
defp do_join(:doc_nil, :doc_nil, _), do: :doc_nil
defp do_join(h, :doc_nil, _), do: h
defp do_join(:doc_nil, t, _), do: t
defp do_join(h, t, sep), do: glue(concat(h, sep), t)
defp decrement(:infinity), do: :infinity
defp decrement(counter), do: counter - 1
@doc ~S"""
Formats a given document for a given width.
Takes the maximum width and a document to print as its arguments
and returns an IO data representation of the best layout for the
document to fit in the given width.
## Examples
iex> doc = Inspect.Algebra.glue("hello", " ", "world")
iex> Inspect.Algebra.format(doc, 30) |> IO.iodata_to_binary()
"hello world"
iex> Inspect.Algebra.format(doc, 10) |> IO.iodata_to_binary()
"hello\nworld"
"""
@spec format(t, non_neg_integer | :infinity) :: iodata
def format(doc, width) when is_doc(doc) and (width == :infinity or width >= 0) do
format(width, 0, [{0, default_mode(width), doc_group(doc)}])
end
defp default_mode(:infinity), do: :flat
defp default_mode(_), do: :break
# Record representing the document mode to be rendered: flat or broken
@typep mode :: :flat | :break
@spec fits?(integer, [{integer, mode, t}]) :: boolean
defp fits?(w, _) when w < 0, do: false
defp fits?(_, []), do: true
defp fits?(_, [{_, _, :doc_line} | _]), do: true
defp fits?(w, [{_, _, :doc_nil} | t]), do: fits?(w, t)
defp fits?(w, [{i, m, doc_cons(x, y)} | t]), do: fits?(w, [{i, m, x} | [{i, m, y} | t]])
defp fits?(w, [{i, m, doc_color(x, _)} | t]), do: fits?(w, [{i, m, x} | t])
defp fits?(w, [{i, m, doc_nest(x, j)} | t]), do: fits?(w, [{i + j, m, x} | t])
defp fits?(w, [{i, _, doc_group(x)} | t]), do: fits?(w, [{i, :flat, x} | t])
defp fits?(w, [{_, _, s} | t]) when is_binary(s), do: fits?((w - byte_size(s)), t)
defp fits?(w, [{_, :flat, doc_break(s)} | t]), do: fits?((w - byte_size(s)), t)
defp fits?(_, [{_, :break, doc_break(_)} | _]), do: true
@spec format(integer | :infinity, integer, [{integer, mode, t}]) :: [binary]
defp format(_, _, []), do: []
defp format(w, _, [{i, _, :doc_line} | t]), do: [indent(i) | format(w, i, t)]
defp format(w, k, [{_, _, :doc_nil} | t]), do: format(w, k, t)
defp format(w, k, [{i, m, doc_cons(x, y)} | t]), do: format(w, k, [{i, m, x} | [{i, m, y} | t]])
defp format(w, k, [{i, m, doc_nest(x, j)} | t]), do: format(w, k, [{i + j, m, x} | t])
defp format(w, k, [{i, m, doc_group(x)} | t]), do: format(w, k, [{i, m, x} | t])
defp format(w, k, [{i, m, doc_color(x, c)} | t]), do: [ansi(c) | format(w, k, [{i, m, x} | t])]
defp format(w, k, [{_, _, s} | t]) when is_binary(s), do: [s | format(w, (k + byte_size(s)), t)]
defp format(w, k, [{_, :flat, doc_break(s)} | t]), do: [s | format(w, (k + byte_size(s)), t)]
defp format(w, k, [{i, :break, doc_break(s)} | t]) do
k = k + byte_size(s)
if w == :infinity or fits?(w - k, t) do
[s | format(w, k, t)]
else
[indent(i) | format(w, i, t)]
end
end
defp ansi(color) do
IO.ANSI.format_fragment(color, true)
end
defp indent(0), do: @newline
defp indent(i), do: @newline <> :binary.copy(" ", i)
end
|
lib/elixir/lib/inspect/algebra.ex
| 0.827759 | 0.603377 |
algebra.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.